From 8348e1951d1daabc39ad52ee92f66b923129d083 Mon Sep 17 00:00:00 2001 From: eclipse-score-bot Date: Mon, 28 Apr 2025 15:58:03 +0200 Subject: [PATCH 001/231] Initial commit --- README.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..05a8a446 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# docs-as-code +Contains docs-as-code tooling From f003bf53c7ef315a3c7800918fb792d8f75481c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 28 Apr 2025 17:43:17 +0200 Subject: [PATCH 002/231] rename and move folders (#2) --- {tooling/docs/_tooling => src}/README.md | 0 {tooling/docs/_tooling => src}/assets/css/score.css | 0 .../_tooling => src}/assets/css/score_design.css | 0 .../_tooling => src}/assets/css/score_needs.css | 0 {tooling/docs/_tooling => src}/docs.bzl | 0 {tooling/docs/_tooling => src}/dummy.py | 0 {tooling/docs/_tooling => src}/extensions/.gitkeep | 0 {tooling/docs/_tooling => src}/extensions/README.md | 0 .../_tooling => src}/extensions/decision_record.md | 0 .../extensions/score_draw_uml_funcs/__init__.py | 0 .../extensions/score_draw_uml_funcs/helpers.py | 0 .../extensions/score_header_service/README.md | 0 .../extensions/score_header_service/__init__.py | 0 .../score_header_service/header_service.py | 0 .../test/test_header_service.py | 0 .../test/test_header_service_integration.py | 0 .../extensions/score_layout/__init__.py | 0 .../extensions/score_layout/html_options.py | 0 .../extensions/score_layout/sphinx_options.py | 0 .../extensions/score_metamodel/README.md | 0 .../extensions/score_metamodel/__init__.py | 0 .../extensions/score_metamodel/checks/__init__.py | 0 .../score_metamodel/checks/attributes_format.py | 0 .../score_metamodel/checks/check_options.py | 0 .../score_metamodel/checks/graph_checks.py | 0 .../score_metamodel/checks/id_contains_feature.py | 0 .../extensions/score_metamodel/checks/standards.py | 0 .../score_metamodel/checks/traceability.py | 0 .../extensions/score_metamodel/log.py | 0 .../score_metamodel/metamodel-schema.json | 0 .../extensions/score_metamodel/metamodel.yaml | 0 .../extensions/score_metamodel/tests/README.md | 0 .../extensions/score_metamodel/tests/__init__.py | 0 .../test_attributes_format_description.rst | 0 .../attributes/test_attributes_format_id_format.rst | 0 .../attributes/test_attributes_format_id_length.rst | 0 .../rst/attributes/test_attributes_format_title.rst | 0 .../extensions/score_metamodel/tests/rst/conf.py | 0 .../tests/rst/graph/test_metamodel_graph.rst | 0 .../test_id_contains_feature.rst | 0 .../tests/rst/options/test_options_extra_option.rst | 0 .../tests/rst/options/test_options_options.rst | 0 .../score_metamodel/tests/test_attributes_format.py | 0 .../score_metamodel/tests/test_check_options.py | 0 .../tests/test_id_contains_feature.py | 0 .../score_metamodel/tests/test_rules_file_based.py | 0 .../score_metamodel/tests/test_standards.py | 0 .../score_metamodel/tests/test_traceability.py | 0 .../_tooling => src}/extensions/score_plantuml.py | 0 .../extensions/score_source_code_linker/README.md | 0 .../extensions/score_source_code_linker/__init__.py | 0 .../collect_source_files.bzl | 0 .../score_source_code_linker/data_flow.png | Bin .../score_source_code_linker/parse_source_files.py | 0 .../tests/test_requirement_links.py | 0 .../tests/test_source_link.py | 0 {tooling/docs/_tooling => src}/incremental.py | 0 {tooling/docs/_tooling => src}/requirements.txt | 0 .../docs/_tooling => src}/requirements_lock.txt | 0 .../docs/_tooling => src}/templates/layout.html | 0 60 files changed, 0 insertions(+), 0 deletions(-) rename {tooling/docs/_tooling => src}/README.md (100%) rename {tooling/docs/_tooling => src}/assets/css/score.css (100%) rename {tooling/docs/_tooling => src}/assets/css/score_design.css (100%) rename {tooling/docs/_tooling => src}/assets/css/score_needs.css (100%) rename {tooling/docs/_tooling => src}/docs.bzl (100%) rename {tooling/docs/_tooling => src}/dummy.py (100%) rename {tooling/docs/_tooling => src}/extensions/.gitkeep (100%) rename {tooling/docs/_tooling => src}/extensions/README.md (100%) rename {tooling/docs/_tooling => src}/extensions/decision_record.md (100%) rename {tooling/docs/_tooling => src}/extensions/score_draw_uml_funcs/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_draw_uml_funcs/helpers.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_header_service/README.md (100%) rename {tooling/docs/_tooling => src}/extensions/score_header_service/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_header_service/header_service.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_header_service/test/test_header_service.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_header_service/test/test_header_service_integration.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_layout/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_layout/html_options.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_layout/sphinx_options.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/README.md (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/attributes_format.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/check_options.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/graph_checks.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/id_contains_feature.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/standards.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/checks/traceability.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/log.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/metamodel-schema.json (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/metamodel.yaml (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/README.md (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_length.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/conf.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/rst/options/test_options_options.rst (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/test_attributes_format.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/test_check_options.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/test_id_contains_feature.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/test_rules_file_based.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/test_standards.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_metamodel/tests/test_traceability.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_plantuml.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/README.md (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/__init__.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/collect_source_files.bzl (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/data_flow.png (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/parse_source_files.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/tests/test_requirement_links.py (100%) rename {tooling/docs/_tooling => src}/extensions/score_source_code_linker/tests/test_source_link.py (100%) rename {tooling/docs/_tooling => src}/incremental.py (100%) rename {tooling/docs/_tooling => src}/requirements.txt (100%) rename {tooling/docs/_tooling => src}/requirements_lock.txt (100%) rename {tooling/docs/_tooling => src}/templates/layout.html (100%) diff --git a/tooling/docs/_tooling/README.md b/src/README.md similarity index 100% rename from tooling/docs/_tooling/README.md rename to src/README.md diff --git a/tooling/docs/_tooling/assets/css/score.css b/src/assets/css/score.css similarity index 100% rename from tooling/docs/_tooling/assets/css/score.css rename to src/assets/css/score.css diff --git a/tooling/docs/_tooling/assets/css/score_design.css b/src/assets/css/score_design.css similarity index 100% rename from tooling/docs/_tooling/assets/css/score_design.css rename to src/assets/css/score_design.css diff --git a/tooling/docs/_tooling/assets/css/score_needs.css b/src/assets/css/score_needs.css similarity index 100% rename from tooling/docs/_tooling/assets/css/score_needs.css rename to src/assets/css/score_needs.css diff --git a/tooling/docs/_tooling/docs.bzl b/src/docs.bzl similarity index 100% rename from tooling/docs/_tooling/docs.bzl rename to src/docs.bzl diff --git a/tooling/docs/_tooling/dummy.py b/src/dummy.py similarity index 100% rename from tooling/docs/_tooling/dummy.py rename to src/dummy.py diff --git a/tooling/docs/_tooling/extensions/.gitkeep b/src/extensions/.gitkeep similarity index 100% rename from tooling/docs/_tooling/extensions/.gitkeep rename to src/extensions/.gitkeep diff --git a/tooling/docs/_tooling/extensions/README.md b/src/extensions/README.md similarity index 100% rename from tooling/docs/_tooling/extensions/README.md rename to src/extensions/README.md diff --git a/tooling/docs/_tooling/extensions/decision_record.md b/src/extensions/decision_record.md similarity index 100% rename from tooling/docs/_tooling/extensions/decision_record.md rename to src/extensions/decision_record.md diff --git a/tooling/docs/_tooling/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_draw_uml_funcs/__init__.py rename to src/extensions/score_draw_uml_funcs/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_draw_uml_funcs/helpers.py rename to src/extensions/score_draw_uml_funcs/helpers.py diff --git a/tooling/docs/_tooling/extensions/score_header_service/README.md b/src/extensions/score_header_service/README.md similarity index 100% rename from tooling/docs/_tooling/extensions/score_header_service/README.md rename to src/extensions/score_header_service/README.md diff --git a/tooling/docs/_tooling/extensions/score_header_service/__init__.py b/src/extensions/score_header_service/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_header_service/__init__.py rename to src/extensions/score_header_service/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_header_service/header_service.py b/src/extensions/score_header_service/header_service.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_header_service/header_service.py rename to src/extensions/score_header_service/header_service.py diff --git a/tooling/docs/_tooling/extensions/score_header_service/test/test_header_service.py b/src/extensions/score_header_service/test/test_header_service.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_header_service/test/test_header_service.py rename to src/extensions/score_header_service/test/test_header_service.py diff --git a/tooling/docs/_tooling/extensions/score_header_service/test/test_header_service_integration.py b/src/extensions/score_header_service/test/test_header_service_integration.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_header_service/test/test_header_service_integration.py rename to src/extensions/score_header_service/test/test_header_service_integration.py diff --git a/tooling/docs/_tooling/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_layout/__init__.py rename to src/extensions/score_layout/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_layout/html_options.py b/src/extensions/score_layout/html_options.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_layout/html_options.py rename to src/extensions/score_layout/html_options.py diff --git a/tooling/docs/_tooling/extensions/score_layout/sphinx_options.py b/src/extensions/score_layout/sphinx_options.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_layout/sphinx_options.py rename to src/extensions/score_layout/sphinx_options.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/README.md b/src/extensions/score_metamodel/README.md similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/README.md rename to src/extensions/score_metamodel/README.md diff --git a/tooling/docs/_tooling/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/__init__.py rename to src/extensions/score_metamodel/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/__init__.py b/src/extensions/score_metamodel/checks/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/__init__.py rename to src/extensions/score_metamodel/checks/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/attributes_format.py rename to src/extensions/score_metamodel/checks/attributes_format.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/check_options.py rename to src/extensions/score_metamodel/checks/check_options.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/graph_checks.py rename to src/extensions/score_metamodel/checks/graph_checks.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/id_contains_feature.py rename to src/extensions/score_metamodel/checks/id_contains_feature.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/standards.py b/src/extensions/score_metamodel/checks/standards.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/standards.py rename to src/extensions/score_metamodel/checks/standards.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/checks/traceability.py b/src/extensions/score_metamodel/checks/traceability.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/checks/traceability.py rename to src/extensions/score_metamodel/checks/traceability.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/log.py b/src/extensions/score_metamodel/log.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/log.py rename to src/extensions/score_metamodel/log.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/metamodel-schema.json b/src/extensions/score_metamodel/metamodel-schema.json similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/metamodel-schema.json rename to src/extensions/score_metamodel/metamodel-schema.json diff --git a/tooling/docs/_tooling/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/metamodel.yaml rename to src/extensions/score_metamodel/metamodel.yaml diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/README.md b/src/extensions/score_metamodel/tests/README.md similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/README.md rename to src/extensions/score_metamodel/tests/README.md diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/__init__.py b/src/extensions/score_metamodel/tests/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/__init__.py rename to src/extensions/score_metamodel/tests/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst rename to src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst rename to src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_length.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_length.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_length.rst rename to src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_length.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst rename to src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/conf.py b/src/extensions/score_metamodel/tests/rst/conf.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/conf.py rename to src/extensions/score_metamodel/tests/rst/conf.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst rename to src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst b/src/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst rename to src/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst rename to src/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/rst/options/test_options_options.rst rename to src/extensions/score_metamodel/tests/rst/options/test_options_options.rst diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/test_attributes_format.py b/src/extensions/score_metamodel/tests/test_attributes_format.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/test_attributes_format.py rename to src/extensions/score_metamodel/tests/test_attributes_format.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/test_check_options.py rename to src/extensions/score_metamodel/tests/test_check_options.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/test_id_contains_feature.py b/src/extensions/score_metamodel/tests/test_id_contains_feature.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/test_id_contains_feature.py rename to src/extensions/score_metamodel/tests/test_id_contains_feature.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/test_rules_file_based.py rename to src/extensions/score_metamodel/tests/test_rules_file_based.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/test_standards.py b/src/extensions/score_metamodel/tests/test_standards.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/test_standards.py rename to src/extensions/score_metamodel/tests/test_standards.py diff --git a/tooling/docs/_tooling/extensions/score_metamodel/tests/test_traceability.py b/src/extensions/score_metamodel/tests/test_traceability.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_metamodel/tests/test_traceability.py rename to src/extensions/score_metamodel/tests/test_traceability.py diff --git a/tooling/docs/_tooling/extensions/score_plantuml.py b/src/extensions/score_plantuml.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_plantuml.py rename to src/extensions/score_plantuml.py diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/README.md b/src/extensions/score_source_code_linker/README.md similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/README.md rename to src/extensions/score_source_code_linker/README.md diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/__init__.py rename to src/extensions/score_source_code_linker/__init__.py diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/collect_source_files.bzl b/src/extensions/score_source_code_linker/collect_source_files.bzl similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/collect_source_files.bzl rename to src/extensions/score_source_code_linker/collect_source_files.bzl diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/data_flow.png b/src/extensions/score_source_code_linker/data_flow.png similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/data_flow.png rename to src/extensions/score_source_code_linker/data_flow.png diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/parse_source_files.py b/src/extensions/score_source_code_linker/parse_source_files.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/parse_source_files.py rename to src/extensions/score_source_code_linker/parse_source_files.py diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/tests/test_requirement_links.py rename to src/extensions/score_source_code_linker/tests/test_requirement_links.py diff --git a/tooling/docs/_tooling/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py similarity index 100% rename from tooling/docs/_tooling/extensions/score_source_code_linker/tests/test_source_link.py rename to src/extensions/score_source_code_linker/tests/test_source_link.py diff --git a/tooling/docs/_tooling/incremental.py b/src/incremental.py similarity index 100% rename from tooling/docs/_tooling/incremental.py rename to src/incremental.py diff --git a/tooling/docs/_tooling/requirements.txt b/src/requirements.txt similarity index 100% rename from tooling/docs/_tooling/requirements.txt rename to src/requirements.txt diff --git a/tooling/docs/_tooling/requirements_lock.txt b/src/requirements_lock.txt similarity index 100% rename from tooling/docs/_tooling/requirements_lock.txt rename to src/requirements_lock.txt diff --git a/tooling/docs/_tooling/templates/layout.html b/src/templates/layout.html similarity index 100% rename from tooling/docs/_tooling/templates/layout.html rename to src/templates/layout.html From a54d4fb6c7e253dbc73c447295081dc8a85eb9e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 29 Apr 2025 12:13:22 +0200 Subject: [PATCH 003/231] Integrate functionality (#3) Moved docs.bzl to root Added missing dotfiles Copied score_metamodel from score and discarded old local changes Small clean up of several files Renaming of 'tooling' -> 'src' Formatted code --- .bazelrc | 12 ++ .bazelversion | 1 + .gitignore | 25 +++ .yamlfmt | 3 + BUILD | 32 ++++ LICENSE | 13 ++ MODULE.bazel | 95 +++++++++++ src/docs.bzl => docs.bzl | 101 ++++++----- process-docs/BUILD | 73 ++++++++ process-docs/conf.py | 77 +++++++++ process-docs/index.rst | 34 ++++ pyproject.toml | 7 + src/BUILD | 139 +++++++++++++++ src/README.md | 16 +- src/docs_assets_lib/__init__.py | 17 ++ src/extensions/BUILD | 88 ++++++++++ src/extensions/__init__.py | 0 .../score_draw_uml_funcs/__init__.py | 42 +++-- .../score_header_service/header_service.py | 2 +- src/extensions/score_layout/html_options.py | 2 +- src/extensions/score_metamodel/__init__.py | 4 +- .../checks/attributes_format.py | 6 +- .../score_metamodel/checks/check_options.py | 12 +- src/extensions/score_metamodel/metamodel.yaml | 16 +- .../score_metamodel/tests/__init__.py | 2 +- .../tests/test_attributes_format.py | 6 +- .../tests/test_check_options.py | 4 +- .../tests/test_rules_file_based.py | 4 +- .../score_metamodel/tests/test_standards.py | 8 +- .../score_source_code_linker/README.md | 2 +- .../score_source_code_linker/__init__.py | 126 +++++++++----- .../collect_source_files.bzl | 10 +- .../tests/test_source_link.py | 8 +- src/find_runfiles/__init__.py | 122 +++++++++++++ src/find_runfiles/test_find_runfiles.py | 94 ++++++++++ src/incremental.py | 161 ++++++++---------- src/requirements.txt | 3 - src/requirements_lock.txt | 115 +++---------- 38 files changed, 1137 insertions(+), 345 deletions(-) create mode 100644 .bazelrc create mode 100644 .bazelversion create mode 100644 .gitignore create mode 100644 .yamlfmt create mode 100644 BUILD create mode 100644 LICENSE create mode 100644 MODULE.bazel rename src/docs.bzl => docs.bzl (63%) create mode 100644 process-docs/BUILD create mode 100644 process-docs/conf.py create mode 100644 process-docs/index.rst create mode 100644 pyproject.toml create mode 100644 src/BUILD create mode 100644 src/docs_assets_lib/__init__.py create mode 100644 src/extensions/BUILD create mode 100644 src/extensions/__init__.py create mode 100644 src/find_runfiles/__init__.py create mode 100644 src/find_runfiles/test_find_runfiles.py diff --git a/.bazelrc b/.bazelrc new file mode 100644 index 00000000..85aed888 --- /dev/null +++ b/.bazelrc @@ -0,0 +1,12 @@ +build --java_language_version=17 +build --tool_java_language_version=17 +build --java_runtime_version=remotejdk_17 +build --tool_java_runtime_version=remotejdk_17 + +test --test_output=errors + +# stop legacy behavior of creating __init__.py files +build --incompatible_default_to_explicit_init_py + +common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ +common --registry=https://bcr.bazel.build diff --git a/.bazelversion b/.bazelversion new file mode 100644 index 00000000..ba7f754d --- /dev/null +++ b/.bazelversion @@ -0,0 +1 @@ +7.4.0 diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..421cd8aa --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +# Commonly used for local settings and secrets +.env + +# Bazel +bazel-* +MODULE.bazel.lock +user.bazelrc + +# Ruff +.ruff_cache + +# docs:incremental and docs:ide_support build artifacts +/_build* + +# Vale - editorial style guide +.vale.ini +styles/ + +# direnv - folder-specific bash configuration +.envrc + +# Python +.venv +__pycache__/ +/.coverage diff --git a/.yamlfmt b/.yamlfmt new file mode 100644 index 00000000..e9d24f96 --- /dev/null +++ b/.yamlfmt @@ -0,0 +1,3 @@ +formatter: + type: basic + retain_line_breaks: true diff --git a/BUILD b/BUILD new file mode 100644 index 00000000..16742e14 --- /dev/null +++ b/BUILD @@ -0,0 +1,32 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@score_cr_checker//:cr_checker.bzl", "copyright_checker") + +copyright_checker( + name = "copyright", + srcs = [ + "process-docs", + "src", + "//:BUILD", + "//:MODULE.bazel", + ], + config = "@score_cr_checker//resources:config", + template = "@score_cr_checker//resources:templates", + visibility = ["//visibility:public"], +) + +exports_files([ + "MODULE.bazel", + "BUILD", +]) diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..8c69a8bc --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ +Copyright 2025 Contributors to the Eclipse Foundation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/MODULE.bazel b/MODULE.bazel new file mode 100644 index 00000000..cbe4a5fd --- /dev/null +++ b/MODULE.bazel @@ -0,0 +1,95 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +module( + name = "docs-as-code", + version = "0.1.0", + compatibility_level = 0, +) + +############################################################################### +# +# Packaging dependencies +# +############################################################################### +bazel_dep(name = "rules_pkg", version = "1.0.1") + +############################################################################### +# +# Python version +# +############################################################################### +bazel_dep(name = "rules_python", version = "1.0.0") + +PYTHON_VERSION = "3.12" + +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + configure_coverage_tool = True, + is_default = True, + python_version = PYTHON_VERSION, +) +use_repo(python) + +############################################################################### +# +# docs dependencies (Sphinx) +# +############################################################################### +pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip") +pip.parse( + hub_name = "pip_process", + python_version = PYTHON_VERSION, + requirements_lock = "//src:requirements.txt", +) +use_repo(pip, "pip_process") + +# Additional Python rules provided by aspect, e.g. an improved version of +bazel_dep(name = "aspect_rules_py", version = "1.0.0") +bazel_dep(name = "buildifier_prebuilt", version = "7.3.1") + +############################################################################### +# +# Generic linting and formatting rules +# +############################################################################### +bazel_dep(name = "aspect_rules_lint", version = "1.3.1") + +# PlantUML for docs +bazel_dep(name = "rules_java", version = "8.6.3") + +http_jar = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_jar") + +http_jar( + name = "plantuml", + sha256 = "6f38f70455d08438979451c2257cd5d58647c6460094bb829bc2a12878d47331", + url = "https://github.com/plantuml/plantuml/releases/download/v1.2025.0/plantuml-1.2025.0.jar", +) + +# Bazel LSP +http_file = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file") + +http_file( + name = "starpls_prebuilt", + sha256 = "45692ecb9d94a19a15b1e7b240acdff5702f78cd22188dac41e1879cb8bdcdcf", + urls = ["https://github.com/withered-magic/starpls/releases/download/v0.1.21/starpls-linux-amd64"], +) + +# Provides, pytest & venv +bazel_dep(name = "score_python_basics", version = "0.3.0") + +# Checker rule for CopyRight checks/fixes +bazel_dep(name = "score_cr_checker", version = "0.2.0", dev_dependency = True) + +# Grab dash +bazel_dep(name = "score_dash_license_checker", version = "0.1.1", dev_dependency = True) diff --git a/src/docs.bzl b/docs.bzl similarity index 63% rename from src/docs.bzl rename to docs.bzl index f9321495..17ee5cfa 100644 --- a/src/docs.bzl +++ b/docs.bzl @@ -1,5 +1,5 @@ # ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation +# Copyright (c) 2025 Contributors to the Eclipse Foundation # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. @@ -37,22 +37,19 @@ # # For user-facing documentation, refer to `/README.md`. -load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library", "py_venv") -load("@pip_sphinx//:requirements.bzl", "all_requirements", "requirement") -load("@rules_java//java:defs.bzl", "java_binary") -load("@rules_pkg//pkg:mappings.bzl", "pkg_files") -load("@rules_pkg//pkg:tar.bzl", "pkg_tar") -load("@rules_python//python:pip.bzl", "compile_pip_requirements") +load("@aspect_rules_py//py:defs.bzl", "py_binary") +load("@pip_process//:requirements.bzl", "all_requirements", "requirement") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") -load("//docs:_tooling/extensions/score_source_code_linker/collect_source_files.bzl", "parse_source_files_for_needs_links") -load("//tools/testing/pytest:defs.bzl", "score_py_pytest") +load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") +load("@score_python_basics//:defs.bzl", "score_virtualenv") +load("//src/extensions:score_source_code_linker/collect_source_files.bzl", "parse_source_files_for_needs_links") sphinx_requirements = all_requirements + [ - "@rules_python//python/runfiles", - ":plantuml_for_python", + "//src:plantuml_for_python", + "//src/extensions:score_extensions", ] -def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build"): +def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = []): """ Creates all targets related to documentation. By using this function, you'll get any and all updates for documentation targets in one place. @@ -61,30 +58,42 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ """ # Parse source files for needs links + # This needs to be created to generate a target, otherwise it won't execute as dependency for other macros parse_source_files_for_needs_links( name = "score_source_code_parser", srcs_and_deps = source_files_to_scan_for_needs_links if source_files_to_scan_for_needs_links else [], ) - # Get the output of source_code_linker - # Does not work: - # rule_info = native.existing_rule(source_code_linker.name) - # source_code_links = rule_info[SourceCodeLinks].file.path - # Workaround: - source_code_links = "score_source_code_parser.json" - - # Run-time build of documentation, incl. incremental build support and non-IDE live preview. - _incremental(":score_source_code_parser", source_code_links, source_dir = source_dir, conf_dir = conf_dir, build_dir = build_dir_for_incremental) + # TODO: Explain what this does / how it works? + for target in docs_targets: + suffix = "_" + target["suffix"] if target["suffix"] else "" + external_needs_deps = target.get("target", []) + external_needs_def = target.get("external_needs_info", []) + _incremental( + incremental_name = "incremental" + suffix, + live_name = "live_preview" + suffix, + conf_dir = conf_dir, + source_dir = source_dir, + build_dir = build_dir_for_incremental, + external_needs_deps = external_needs_deps, + external_needs_def = external_needs_def, + ) + _docs( + name = "docs" + suffix, + format = "html", + external_needs_deps = external_needs_deps, + external_needs_def = external_needs_def, + ) # Virtual python environment for working on the documentation (esbonio). # incl. python support when working on conf.py and sphinx extensions. # creates :ide_support target for virtualenv _ide_support() - # creates :docs target for build time documentation - _docs() + # creates 'needs.json' build target + _docs(name = "docs_needs", format = "needs") -def _incremental(source_code_linker, source_code_links, source_dir = "docs", conf_dir = "docs", build_dir = "_build", extra_dependencies = list()): +def _incremental(incremental_name = "incremental", live_name = "live_preview", source_dir = "docs", conf_dir = "docs", build_dir = "_build", extra_dependencies = list(), external_needs_deps = list(), external_needs_def = None): """ A target for building docs incrementally at runtime, incl live preview. Args: @@ -97,44 +106,47 @@ def _incremental(source_code_linker, source_code_links, source_dir = "docs", con """ dependencies = sphinx_requirements + extra_dependencies - py_binary( - name = "incremental", - srcs = ["//docs:_tooling/incremental.py"], - data = [source_code_linker, "//docs:docs_assets"], + name = incremental_name, + srcs = ["//src:incremental.py"], deps = dependencies, + data = [":score_source_code_parser"] + external_needs_deps, env = { - "SOURCE_CODE_LINKS": source_code_links, "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, "BUILD_DIRECTORY": build_dir, + "EXTERNAL_NEEDS_INFO": json.encode(external_needs_def), "ACTION": "incremental", }, ) py_binary( - name = "live_preview", - srcs = ["//docs:_tooling/incremental.py"], - data = ["//docs:docs_assets"], + name = live_name, + srcs = ["//src:incremental.py"], deps = dependencies, + data = external_needs_deps, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, "BUILD_DIRECTORY": build_dir, + "EXTERNAL_NEEDS_INFO": json.encode(external_needs_def), "ACTION": "live_preview", }, ) def _ide_support(): - py_venv( + score_virtualenv( name = "ide_support", venv_name = ".venv_docs", - deps = sphinx_requirements, + reqs = sphinx_requirements, ) -def _docs(): +def _docs(name = "docs", format = "html", external_needs_deps = list(), external_needs_def = dict()): + ext_needs_arg = "--define=external_needs_source=" + json.encode(external_needs_def) + + #fail(ext_needs_arg) sphinx_docs( - name = "docs", + name = name, srcs = native.glob([ "**/*.png", "**/*.svg", @@ -143,29 +155,28 @@ def _docs(): "**/*.css", "**/*.puml", "**/*.need", - # Include the docs tooling itself + # Include the docs src itself # Note: we don't use py_library here to make it as close as possible to docs:incremental. "**/*.py", "**/*.yaml", "**/*.json", "**/*.csv", - ], exclude = ["**/tests/rst/**/*.rst"]), + ], exclude = ["**/tests/*"]), config = ":conf.py", extra_opts = [ "-W", "--keep-going", - # This is 'overwriting' the configuration parameter inside sphinx. As we only get this information during runtime - "--define=source_code_linker_file=$(location :score_source_code_parser)", - ], + ] + [ext_needs_arg], formats = [ - "html", + format, ], - sphinx = ":sphinx_build", + sphinx = "//src:sphinx_build", tags = [ "manual", ], tools = [ ":score_source_code_parser", - ":plantuml", - ], + "//src:plantuml", + ] + external_needs_deps, + visibility = ["//visibility:public"], ) diff --git a/process-docs/BUILD b/process-docs/BUILD new file mode 100644 index 00000000..282f9f38 --- /dev/null +++ b/process-docs/BUILD @@ -0,0 +1,73 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("//:docs.bzl", "docs") + +# Creates all documentation targets: +# - `docs:incremental` for building docs incrementally at runtime +# - `docs:live_preview` for live preview in the browser without an IDE +# - `docs:ide_support` for creating python virtualenv for IDE support +# - `docs:docs` for building documentation at build-time +docs( + conf_dir = "process-docs", + docs_targets = [ + { + "suffix": "", # local without external needs + }, + + # ╭───────────────────────────────────────╮ + # │ This is commented out until local │ + # │ multi-repo testing is implemented │ + # ╰───────────────────────────────────────╯ + + # { + # "suffix": "release", # The version imported from MODULE.bazel + # "target": ["@score_platform//docs:docs"], + # "external_needs_info": [ + # { + # "base_url": "https://eclipse-score.github.io/score/pr-980/", + # "json_path": "/score_platform~/docs/docs/_build/html/needs.json", + # "version": "0.1", + # }, + # ], + # }, + # { + # "suffix": "latest", # latest main branch documentation build + # "external_needs_info": [ + # { + # "base_url": "https://eclipse-score.github.io/score/main/", + # "json_url": "https://maximiliansoerenpollak.github.io/score/needs.json", + # "version": "0.1", + # }, + # ], + # }, + ], + source_dir = "process-docs", + source_files_to_scan_for_needs_links = [ + # Note: you can add filegroups, globs, or entire targets here. + "//src/extensions:score_extensions", + ":score_extensions", + ":score_source_code_linker", + ], +) + +py_library( + name = "score_source_code_linker", + srcs = glob( + ["src/extensions/score_source_code_linker/**/*.py"], + exclude = ["src/extensions/score_source_code_linker/tests/*.py"], + ), + imports = ["src/extensions"], + visibility = ["//visibility:public"], +) diff --git a/process-docs/conf.py b/process-docs/conf.py new file mode 100644 index 00000000..e0267d70 --- /dev/null +++ b/process-docs/conf.py @@ -0,0 +1,77 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +import logging + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "docs-as-code" +author = "Score" +version = "0.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +logger = logging.getLogger("process-docs") +logger.debug("Loading docs-as-code conf.py") + +extensions = [ + "sphinx_design", + "sphinx_needs", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] +logger.debug("After loading extensions") + +exclude_patterns = [ + # The following entries are not required when building the documentation + # via 'bazel build //docs:docs', as that command runs in a sandboxed environment. + # However, when building the documentation via 'sphinx-build' or esbonio, + # these entries are required to prevent the build from failing. + "bazel-*", + ".venv_docs", +] + +templates_path = ["templates"] + +# Enable numref +numfig = True + + +# -- sphinx-needs configuration -------------------------------------------- +# Setting the needs layouts +needs_global_options = {"collapse": True} +needs_string_links = { + "source_code_linker": { + "regex": r"(?P[^,]+)", + "link_url": "{{value}}", + "link_name": "Source Code Link", + "options": ["source_code_link"], + }, +} + +# TODO: Fixing this in all builds +html_static_path = ["../src/assets"] + +logger.debug("After loading S-CORE conf.py") diff --git a/process-docs/index.rst b/process-docs/index.rst new file mode 100644 index 00000000..e3c47208 --- /dev/null +++ b/process-docs/index.rst @@ -0,0 +1,34 @@ +.. + # ******************************************************************************* + # Copyright (c) 2024 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +Hello World +================= +This is a simple example of a documentation page using the `docs` tool. + +.. stkh_req:: TestTitle + :id: stkh_req__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this + + +.. .. std_req:: External Link Test Req +.. :id: std_req__iso26262__testing +.. :status: valid +.. :links: gd_req__dynamic_diagram +.. +.. This is some test content diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..7c055b77 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +# This file is at the root level, as it applies to all Python code, +# not only to docs or to tools. +[tool.pyright] +extends = "bazel-bin/process-docs/ide_support.runfiles/score_python_basics~/pyproject.toml" + +[tool.ruff] +extend = "bazel-bin/process-docs/ide_support.runfiles/score_python_basics~/pyproject.toml" diff --git a/src/BUILD b/src/BUILD new file mode 100644 index 00000000..2c0cc69f --- /dev/null +++ b/src/BUILD @@ -0,0 +1,139 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_lint//format:defs.bzl", "format_multirun", "format_test") +load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements", "requirement") +load("@rules_pkg//pkg:mappings.bzl", "pkg_files") +load("@rules_pkg//pkg:tar.bzl", "pkg_tar") +load("@rules_python//python:pip.bzl", "compile_pip_requirements") +load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary") +load("@score_dash_license_checker//:dash.bzl", "dash_license_checker") + +sphinx_requirements = all_requirements + [ + ":plantuml_for_python", + "//src/extensions:score_extensions", +] + +sphinx_build_binary( + name = "sphinx_build", + visibility = ["//visibility:public"], + deps = sphinx_requirements, +) + +# These are only exported because they're passed as files to the //docs.bzl +# macros, and thus must be visible to other packages. They should only be +# referenced by the //docs.bzl macros. +exports_files( + [ + "requirements.txt", + "incremental.py", + ], + visibility = ["//visibility:public"], +) + +# In order to update the requirements, change the `requirements.txt` file and run: +# `bazel run //docs:requirements`. +# This will update the `requirements_lock.txt` file. +# To upgrade all dependencies to their latest versions, run: +# `bazel run //docs:requirements -- --upgrade`. +compile_pip_requirements( + name = "requirements", + srcs = [ + "requirements.in", + "@score_python_basics//:requirements.txt", + ], + requirements_txt = "requirements.txt", + tags = [ + "manual", + ], +) + +filegroup( + name = "html", + srcs = [":docs"], + output_group = "html", +) + +pkg_files( + name = "html_files", + srcs = [":html"], + strip_prefix = "html", + #renames={"html": ""}, +) + +pkg_tar( + name = "github-pages", + srcs = [":html_files"], +) + +java_binary( + name = "plantuml", + jvm_flags = ["-Djava.awt.headless=true"], + main_class = "net.sourceforge.plantuml.Run", + visibility = ["//visibility:public"], + runtime_deps = [ + "@plantuml//jar", + ], +) + +# This makes it possible for py_venv to depend on plantuml. +# Note: py_venv can only depend on py_library. +# TODO: This can be removed with the next +# upgrade of `aspect_rules_py` since the py_venv rule now supports a data field +py_library( + name = "plantuml_for_python", + srcs = ["dummy.py"], + data = [":plantuml"], + visibility = ["//visibility:public"], +) + +# Running this executes the `collect_source_files.bzl` aspect. +# Collects all source files from specified targets in 'deps', and makes them available for parsing for the source_code_linker + +# Needed for Dash tool to check python dependency licenses. +filegroup( + name = "requirements_lock", + srcs = [ + "requirements.txt", + ], + visibility = ["//visibility:public"], +) + +dash_license_checker( + src = ":requirements_lock", + file_type = "requirements", # let it auto-detect based on project_config + visibility = ["//visibility:public"], +) + +format_multirun( + name = "format.fix", + python = "@aspect_rules_lint//format:ruff", + starlark = "@buildifier_prebuilt//:buildifier", + visibility = [ + "//visibility:public", + ], + yaml = "@aspect_rules_lint//format:yamlfmt", +) + +format_test( + name = "format.check", + no_sandbox = True, + python = "@aspect_rules_lint//format:ruff", + starlark = "@buildifier_prebuilt//:buildifier", + visibility = [ + "//visibility:public", + ], + workspace = "//:MODULE.bazel", + yaml = "@aspect_rules_lint//format:yamlfmt", +) diff --git a/src/README.md b/src/README.md index 744a2ee7..554bcff6 100644 --- a/src/README.md +++ b/src/README.md @@ -1,6 +1,6 @@ # S-CORE Project Tooling Development Guide -*This document is meant for *developers* of the `_tooling` of docs in the score repository.* +*This document is meant for *developers* of doc-as-code.* It should be treated as a 'get-started' guide, giving you all needed information to get up and running. ## Quick Start @@ -11,7 +11,7 @@ It should be treated as a 'get-started' guide, giving you all needed information 1. Install Bazelisk (version manager for Bazel) 2. Create the Python virtual environment: ```bash - bazel run //docs:ide_support + bazel run //process-docs:ide_support ``` 3. Select `.venv_docs/bin/python` as the python interpreter inside your IDE *Note: This virtual environment does **not** have pip, therefore `pip install` is not available.* @@ -54,12 +54,12 @@ It should be treated as a 'get-started' guide, giving you all needed information -## Tooling Directory Architecture +## docs-as-code Directory Architecture ``` -docs/_tooling/ +process-docs/ # Local documentation to test functionality +src/ ├── assets/ # Documentation styling (CSS) -├── conf_extras/ # Sphinx configuration extensions ├── decision_records/ # Architecture Decision Records (ADRs) ├── extensions/ # Custom Sphinx extensions │ └── score_metamodel/ @@ -79,7 +79,7 @@ Find everything related to testing and how to add your on test suite [here](/too 2. Create a dedicated test directory 3. Include an appropriate README in markdown -> If you want to develop your own sphinx extension, check out the [extensions guide](/docs/_tooling/extensions/README.md) +> If you want to develop your own sphinx extension, check out the [extensions guide](/src/extensions/README.md) ## Best Practices @@ -113,6 +113,6 @@ Common issues and solutions: - Verify extension dependencies ## Additional Resources -- [Sphinx extension guide](/docs/_tooling/extensions/README.md) -- [S-CORE Metamodel Documentation](/docs/_tooling/extensions/score_metamodel/README.md) +- [Sphinx extension guide](/src/extensions/README.md) +- [S-CORE Metamodel Documentation](/src/extensions/score_metamodel/README.md) - [Pytest Integration Guide](/tools/testing/pytest/README.md) diff --git a/src/docs_assets_lib/__init__.py b/src/docs_assets_lib/__init__.py new file mode 100644 index 00000000..7d8f54d2 --- /dev/null +++ b/src/docs_assets_lib/__init__.py @@ -0,0 +1,17 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +from pathlib import Path + + +def get_path(): + return Path(__file__).parent diff --git a/src/extensions/BUILD b/src/extensions/BUILD new file mode 100644 index 00000000..703d9a7a --- /dev/null +++ b/src/extensions/BUILD @@ -0,0 +1,88 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_python_basics//:defs.bzl", "score_py_pytest", "score_virtualenv") + +score_virtualenv( + name = "process_venv", + reqs = all_requirements, + venv_name = ".venv_process", +) + +py_library( + name = "score_extensions", + srcs = glob( + ["**/*.py"], + exclude = ["**/tests/**/*"], + ), + data = glob([ + "score_layout/assets/**", + "score_metamodel/*.yaml", # Needed to remove 'resolving of symlink' in score_metamodel.__init__ + ]), + imports = ["."], + visibility = ["//visibility:public"], + deps = [ + "@rules_python//python/runfiles", + ], +) + +# Dedicated metamodel target only for pytest. +# It's required to define the imports for bazel pytest, so that python/pytest can +# import "from score_metamodel" without issues. +py_library( + name = "score_metamodel", + srcs = glob( + ["score_metamodel/**/*.py"], + exclude = ["**/tests/**/*"], + ), + imports = ["extensions"], + visibility = ["//visibility:public"], +) + +score_py_pytest( + name = "score_metamodel_test", + size = "small", + srcs = glob(["score_metamodel/tests/**/*.py"]), + visibility = ["//visibility:public"], + deps = [":score_metamodel"] + all_requirements, +) + +# ───────────────────────── Source code linker ──────────────────────── +# For more information see documentation at score_source_code_linker/README.md +py_library( + name = "score_source_code_linker", + srcs = glob( + ["score_source_code_linker/**/*.py"], + exclude = ["score_source_code_linker/tests/*.py"], + ), + imports = ["."], + visibility = ["//visibility:public"], +) + +score_py_pytest( + name = "score_source_code_linker_test", + size = "small", + srcs = glob(["score_source_code_linker/tests/**/*.py"]), + deps = [ + ":score_source_code_linker", + ] + all_requirements, +) + +# Needed to make the file parser executeable and findable for the source_code_linker aspect +py_binary( + name = "parsed_source_files_for_source_code_linker", + srcs = ["score_source_code_linker/parse_source_files.py"], + visibility = ["//visibility:public"], +) diff --git a/src/extensions/__init__.py b/src/extensions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index de779f12..d7c8774f 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -311,9 +311,11 @@ def add_logical_interfaces( logical_iface = logical_iface_tmp[0] proc_logical_interfaces[logical_iface] = iface structure_text += gen_interface_element(logical_iface, all_needs, True) - linkage_text += f"{gen_link_text(all_needs[iface], '-u->', - all_needs[logical_iface], - 'implements')}\n" + linkage_text += f"{ + gen_link_text( + all_needs[iface], '-u->', all_needs[logical_iface], 'implements' + ) + }\n" else: print(f"{iface}: Not connected to any virtual interface") return structure_text, linkage_text, proc_logical_interfaces @@ -335,17 +337,17 @@ def add_used_interfaces( retval = get_hierarchy_text(impl_comp_str[0], all_needs) structure_text += retval[2] + retval[0] + retval[1] + retval[3] structure_text += gen_interface_element(iface, all_needs, True) - linkage_text += f"{gen_link_text(impl_comp, '-u->', - all_needs[iface], - 'implements')} \n" + linkage_text += f"{ + gen_link_text(impl_comp, '-u->', all_needs[iface], 'implements') + } \n" else: print(f"{iface}: No implementing component defined") structure_text += gen_interface_element(iface, all_needs, True) for comp in comps: - linkage_text += f"{gen_link_text(all_needs[comp], - '-d[#green]->', - all_needs[iface], 'uses')} \n" + linkage_text += f"{ + gen_link_text(all_needs[comp], '-d[#green]->', all_needs[iface], 'uses') + } \n" return structure_text, linkage_text @@ -397,8 +399,9 @@ def _generate_structure_and_components( if comps: impl_comp[iface] = comps[0] if im := impl_comp.get(iface): - structure_text += f"{gen_struct_element('component', - all_needs[im])}\n" + structure_text += ( + f"{gen_struct_element('component', all_needs[im])}\n" + ) else: logger.info(f"Interface {iface} could not be found") return structure_text, impl_comp @@ -413,13 +416,16 @@ def _generate_links( link_text = "" for iface in interfacelist: if imcomp := impl_comp.get(iface): - link_text += f"{gen_link_text({'id': 'Feature_User'}, '-d->', - all_needs[iface], - 'use')} \n" - link_text += f"{gen_link_text(all_needs[imcomp], - '-u->', - all_needs[iface], - 'implements')} \n" + link_text += f"{ + gen_link_text( + {'id': 'Feature_User'}, '-d->', all_needs[iface], 'use' + ) + } \n" + link_text += f"{ + gen_link_text( + all_needs[imcomp], '-u->', all_needs[iface], 'implements' + ) + } \n" else: logger.info(f"{need}: Interface {iface} could not be found") return link_text diff --git a/src/extensions/score_header_service/header_service.py b/src/extensions/score_header_service/header_service.py index b8df38de..78d43440 100644 --- a/src/extensions/score_header_service/header_service.py +++ b/src/extensions/score_header_service/header_service.py @@ -32,7 +32,7 @@ from sphinx_needs.data import SphinxNeedsData from sphinx_needs.services.base import BaseService -# req-traceability: GD__automatic_document_header_generation +# re-qtraceability: GD__automatic_document_header_generation APPROVER_TEAMS = ["automotive-score-committers"] diff --git a/src/extensions/score_layout/html_options.py b/src/extensions/score_layout/html_options.py index a5bbd2d3..80e85407 100644 --- a/src/extensions/score_layout/html_options.py +++ b/src/extensions/score_layout/html_options.py @@ -44,7 +44,7 @@ def return_html_theme_options(app: Sphinx) -> dict[str, object]: html_theme = "pydata_sphinx_theme" # "alabaster" -html_static_path = ["_tooling/assets", "_assets"] +html_static_path = ["src/assets", "_assets"] html_css_files = [ "css/score.css", "css/score_needs.css", diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 0415d108..e9cb3e02 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -92,14 +92,14 @@ def is_check_enabled(check: local_check_function | graph_check_function): # graph of other needs. for need in needs_all_needs.values(): for check in enabled_local_checks: - logger.info(f"Running local check {check} for need {need['id']}") + logger.debug(f"Running local check {check} for need {need['id']}") check(app, need, log) # Graph-Based checks: These warnings require a graph of all other needs to # be checked. needs = list(needs_all_needs.values()) for check in [c for c in graph_checks if is_check_enabled(c)]: - logger.info(f"Running graph check {check} for all needs") + logger.debug(f"Running graph check {check} for all needs") check(app, needs, log) if log.has_warnings: diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index bdbb3672..d5d75efe 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -17,7 +17,7 @@ from score_metamodel import CheckLogger, local_check -# req-Id: gd_req__req__attr_uid +# req-#id: gd_req__req__attr_uid @local_check def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): """ @@ -64,7 +64,7 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): log.warning_for_option(need, "id", msg) -# req-Id: gd_req__requirements_attr_title +# req-#id: gd_req__requirements_attr_title @local_check def check_title(app: Sphinx, need: NeedsInfoType, log: CheckLogger): """ @@ -85,7 +85,7 @@ def check_title(app: Sphinx, need: NeedsInfoType, log: CheckLogger): break -# req-Id: gd_req__req__attr_desc_weak +# req-#id: gd_req__req__attr_desc_weak @local_check def check_description(app: Sphinx, need: NeedsInfoType, log: CheckLogger): """ diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 845334cd..fa7b898b 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -76,12 +76,12 @@ def validate_fields( ) -# req-Id: gd_req__req__attr_type -# req-Id: gd_req__requirements_attr_security -# req-Id: gd_req__req__attr_safety -# req-Id: gd_req__req__attr_status -# req-Id: gd_req__req__attr_rationale -# req-Id: gd_req__req__attr_mandatory +# req-#id: gd_req__req__attr_type +# req-#id: gd_req__requirements_attr_security +# req-#id: gd_req__req__attr_safety +# req-#id: gd_req__req__attr_status +# req-#id: gd_req__req__attr_rationale +# req-#id: gd_req__req__attr_mandatory @local_check def check_options( app: Sphinx, diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index dd1edb67..7ec02645 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -158,9 +158,9 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" realizes: "^wp__.+$" # The following 3 guidance requirements enforce the requirement structure and attributes: - # req-Id: gd_req__req__structure - # req-Id: gd_req__requirements_attr_description - # req-Id: gd_req__req__linkage + # req-#Id: gd_req__req__structure + # req-#Id: gd_req__requirements_attr_description + # req-#Id: gd_req__req__linkage # Requirements stkh_req: title: "Stakeholder Requirement" @@ -190,7 +190,7 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: - # req-Id: gd_req__req__linkage_fulfill + # req-#id: gd_req__req__linkage_fulfill satisfies: "^stkh_req__.*$" optional_options: codelink: "^.*$" @@ -530,10 +530,10 @@ needs_extra_links: # - condition: defines the condition that should be checked # - [and / or / xor / not] ############################################################## -# req-Id: gd_req__req__linkage_architecture -# req-Id: gd_req__req__linkage_safety +# req-#id: gd_req__req__linkage_architecture +# req-#id: gd_req__req__linkage_safety graph_checks: - # req-Id: gd_req__req__linkage_safety + # req-#id: gd_req__req__linkage_safety req_safety_linkage: needs: include: "comp_req, feat_req" @@ -552,7 +552,7 @@ graph_checks: condition: "status == valid" check: satisfies: "status == valid" - # req-Id: gd_req__req__linkage_architecture + # req-#id: gd_req__req__linkage_architecture arch_safety_linkage: needs: include: "comp_req, feat_req" diff --git a/src/extensions/score_metamodel/tests/__init__.py b/src/extensions/score_metamodel/tests/__init__.py index 9968e2bc..a915b2d1 100644 --- a/src/extensions/score_metamodel/tests/__init__.py +++ b/src/extensions/score_metamodel/tests/__init__.py @@ -16,7 +16,7 @@ import pytest from sphinx.util.logging import SphinxLoggerAdapter -from docs._tooling.extensions.score_metamodel import CheckLogger, NeedsInfoType +from src.extensions.score_metamodel import CheckLogger, NeedsInfoType def fake_check_logger(): diff --git a/src/extensions/score_metamodel/tests/test_attributes_format.py b/src/extensions/score_metamodel/tests/test_attributes_format.py index b15871ca..16ae228a 100644 --- a/src/extensions/score_metamodel/tests/test_attributes_format.py +++ b/src/extensions/score_metamodel/tests/test_attributes_format.py @@ -14,13 +14,13 @@ from sphinx.application import Sphinx -from docs._tooling.extensions.score_metamodel.checks.attributes_format import ( +from src.extensions.score_metamodel.checks.attributes_format import ( check_description, check_id_format, check_id_length, check_title, ) -from docs._tooling.extensions.score_metamodel.tests import fake_check_logger, need +from src.extensions.score_metamodel.tests import fake_check_logger, need class TestId: @@ -113,7 +113,7 @@ def test_check_id_length_negative(self): check_id_length(app, need_1, logger) logger.assert_warning( f"exceeds the maximum allowed length of 45 characters " - f"(current length: {len(need_1["id"])}).", + f"(current length: {len(need_1['id'])}).", expect_location=False, ) diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 7db2cf62..24138aee 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -17,11 +17,11 @@ import pytest from sphinx.application import Sphinx -from docs._tooling.extensions.score_metamodel.checks.check_options import ( +from src.extensions.score_metamodel.checks.check_options import ( check_extra_options, check_options, ) -from docs._tooling.extensions.score_metamodel.tests import fake_check_logger, need +from src.extensions.score_metamodel.tests import fake_check_logger, need @pytest.mark.metadata( diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index 89aada86..38301747 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -20,14 +20,14 @@ import pytest from sphinx.testing.util import SphinxTestApp -from docs._tooling.extensions.score_metamodel import ( +from src.extensions.score_metamodel import ( graph_check_function, local_check_function, ) RST_DIR = Path(__file__).absolute().parent / "rst" DOCS_DIR = Path(__file__).absolute().parent.parent.parent.parent.parent -TOOLING_DIR_NAME = "_tooling" +TOOLING_DIR_NAME = "src" ### List of relative paths of all rst files in RST_DIR RST_FILES = [str(f.relative_to(RST_DIR)) for f in Path(RST_DIR).rglob("*.rst")] diff --git a/src/extensions/score_metamodel/tests/test_standards.py b/src/extensions/score_metamodel/tests/test_standards.py index 54315e46..6cb1438e 100644 --- a/src/extensions/score_metamodel/tests/test_standards.py +++ b/src/extensions/score_metamodel/tests/test_standards.py @@ -15,8 +15,8 @@ # from sphinx.application import Sphinx -from docs._tooling.extensions.score_metamodel.checks import standards -from docs._tooling.extensions.score_metamodel.tests import need # ,fake_check_logger +from src.extensions.score_metamodel.checks import standards +from src.extensions.score_metamodel.tests import need # ,fake_check_logger class TestStandards: @@ -632,9 +632,7 @@ def test_my_pie_workproducts_contained_in_exactly_one_workflow(self): 1, 1, ], "For function my_pie_workproducts_contained_in_exactly_one_workflow expected" - f"[1, 1, 1] but got { - results - }" + f"[1, 1, 1] but got {results}" def test_get_standards_needs(self): """ diff --git a/src/extensions/score_source_code_linker/README.md b/src/extensions/score_source_code_linker/README.md index a90ee4a2..12c72197 100644 --- a/src/extensions/score_source_code_linker/README.md +++ b/src/extensions/score_source_code_linker/README.md @@ -108,7 +108,7 @@ Tags are defined inside `parse_source_files.py` You can use them like this: ```python -# req-traceability: +# req-#traceability: def dummy_function(): pass ``` diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 9e9d1710..7619680c 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -11,22 +11,35 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import json +import os from copy import deepcopy +from pathlib import Path +from pprint import pprint +from score_source_code_linker.parse_source_files import GITHUB_BASE_URL from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment from sphinx_needs.data import SphinxNeedsData from sphinx_needs.logging import get_logger -from score_source_code_linker.parse_source_files import GITHUB_BASE_URL - LOGGER = get_logger(__name__) +LOGGER.setLevel("DEBUG") def setup(app: Sphinx) -> dict[str, str | bool]: - app.add_config_value("source_code_linker_file", "", rebuild="env") - if app.config.source_code_linker_file: - LOGGER.info("Loading source code linker...", type="score_source_code_linker") + # Extension: score_source_code_linker + app.add_config_value("disable_source_code_linker", False, rebuild="env") + app.add_config_value("score_source_code_linker_file_overwrite", "", rebuild="env") + # TODO: can we detect live_preview & esbonio here? Until then we have a flag: + if app.config.disable_source_code_linker: + LOGGER.info( + "INFO: Disabled source code linker. Not loading extension.", + type="score_source_code_linker", + ) + else: + LOGGER.debug( + "INFO: Loading source code linker...", type="score_source_code_linker" + ) app.connect("env-updated", add_source_link) return { "version": "0.1", @@ -35,7 +48,7 @@ def setup(app: Sphinx) -> dict[str, str | bool]: } -# req-Id: gd_req__req__attr_impl +# re-qid: gd_req__req__attr_impl def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: """ 'Main' function that facilitates the running of all other functions @@ -46,41 +59,72 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: env: Buildenvironment, this is filled automatically app: Sphinx app application, this is filled automatically """ + Needs_Data = SphinxNeedsData(env) needs = Needs_Data.get_needs_mutable() needs_copy = deepcopy(needs) - json_paths = [app.config.source_code_linker_file] - for path in json_paths: - try: - with open(path) as f: - gh_json = json.load(f) - for id, link in gh_json.items(): - id = id.strip() - try: - # NOTE: Removing & adding the need is important to make sure - # the needs gets 're-evaluated'. - need = needs_copy[id] # NeedsInfoType - Needs_Data.remove_need(need["id"]) - # extra_options are only available at runtime - need["source_code_link"] = ",".join(link) # type: ignore - Needs_Data.add_need(need) - except KeyError: - # NOTE: manipulating link to remove git-hash, - # making the output file location more readable - files = [ - x.replace(GITHUB_BASE_URL, "").split("/", 1)[-1] for x in link - ] - LOGGER.warning( - f"Could not find {id} in the needs id's. " - f"Found in file(s): {files}", - type="score_source_code_linker", - ) - except Exception as e: - LOGGER.warning( - f"An unexpected error occurred while adding source_code_links to needs." - f"Error: {e}", - type="score_source_code_linker", - ) - LOGGER.warning( - f"Reading file: {path} right now", type="score_source_code_linker" - ) + # bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main/process-docs/score_source_code_parser.json + # bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main/tooling/extensions/score_source_code_linker/__init__.py + # bazel-out/k8-fastbuild/bin/process-docs/score_source_code_parser.json + # /home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/score_source_code_parser.json + # + + ## -> build: + + # bazel-out/k8-opt-exec-ST-d57f47055a04/bin/tooling/sphinx_build.runfiles/_main/tooling/extensions/score_source_code_linker/__init__.py + + # Tried with build + # bazel-out/k8-fastbuild/bin/process-docs/_docs/_sources/process-docs/score_source_code_parser.json + + # SEARCHING: + # bazel-out/k8-opt-exec-ST-d57f47055a04/bin/process-docs/score_source_code_parser.json + p5 = Path(__file__).parents[5] + + # bazel-out/k8-opt-exec-ST-d57f47055a04/bin/tooling + # LOGGER.info("DEBUG: ============= CONF DIR===========") + # LOGGER.info(f"DEBUG: {Path(app.confdir).name}") + # LOGGER.info("DEBUG: =============================") + if str(p5).endswith("src"): + LOGGER.info("DEBUG: WE ARE IN THE IF") + path = str(p5.parent / Path(app.confdir).name / "score_source_code_parser.json") + else: + LOGGER.info("DEBUG: WE ARE IN THE ELSE") + path = str(p5 / "score_source_code_parser.json") + # LOGGER.info("DEBUG============= FILE PATH OF JSON (where we search)===========") + # LOGGER.info(f"DEBUG: {path}") + # LOGGER.info("DEBUG: =============================") + if app.config.score_source_code_linker_file_overwrite: + path = app.config.score_source_code_linker_file_overwrite + # json_paths = [str(Path(__file__).parent.parent.parent.parent.parent.parent/"score_source_code_parser.json")] + # json_paths = [app.config.source_code_linker_file] + + try: + with open(path) as f: + gh_json = json.load(f) + for id, link in gh_json.items(): + id = id.strip() + try: + # NOTE: Removing & adding the need is important to make sure + # the needs gets 're-evaluated'. + need = needs_copy[id] # NeedsInfoType + Needs_Data.remove_need(need["id"]) + need["source_code_link"] = ",".join(link) + Needs_Data.add_need(need) + except KeyError: + # NOTE: manipulating link to remove git-hash, + # making the output file location more readable + files = [x.replace(GITHUB_BASE_URL, "").split("/", 1)[-1] for x in link] + LOGGER.warning( + f"Could not find {id} in the needs id's. " + + f"Found in file(s): {files}", + type="score_source_code_linker", + ) + except Exception as e: + LOGGER.warning( + f"An unexpected error occurred while adding source_code_links to needs." + + f"Error: {e}", + type="score_source_code_linker", + ) + LOGGER.warning( + f"Reading file: {path} right now", type="score_source_code_linker" + ) diff --git a/src/extensions/score_source_code_linker/collect_source_files.bzl b/src/extensions/score_source_code_linker/collect_source_files.bzl index ee946b5c..5dff0503 100755 --- a/src/extensions/score_source_code_linker/collect_source_files.bzl +++ b/src/extensions/score_source_code_linker/collect_source_files.bzl @@ -130,7 +130,7 @@ parse_source_files_for_needs_links = rule( ), "_source_files_parser": attr.label( # TODO: rename to source_files_parser in next PR - default = Label("//docs:parsed_source_files_for_source_code_linker"), + default = Label(":parsed_source_files_for_source_code_linker"), executable = True, cfg = "exec", ), @@ -141,11 +141,3 @@ parse_source_files_for_needs_links = rule( ], doc = "Rule that collects and parses source files for linking documentation. (Internal)", ) - -# ----------------------------------------------------------------------------- -# Backwards compatibility -# ----------------------------------------------------------------------------- -# This should be removed once all references have been updated. -def collect_source_files_for_score_source_code_linker(deps, name): - print("DEPRECATED: Use `parse_source_files_for_needs_links` instead.") - parse_source_files_for_needs_links(srcs_and_deps = deps, name = name) diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index e95418eb..2b95d705 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -38,7 +38,9 @@ def _create_app( (src_dir / "conf.py").write_text(conf_content) (src_dir / "index.rst").write_text(rst_content) - (src_dir / "requierments.txt").write_text(json.dumps(requierments_text)) + (src_dir / "score_source_code_parser.json").write_text( + json.dumps(requierments_text) + ) return SphinxTestApp( freshenv=True, @@ -48,7 +50,9 @@ def _create_app( buildername="html", warningiserror=True, confoverrides={ - "source_code_linker_file": str(src_dir / "requierments.txt") + "score_source_code_linker_file_overwrite": str( + src_dir / "score_source_code_parser.json" + ) }, ) diff --git a/src/find_runfiles/__init__.py b/src/find_runfiles/__init__.py new file mode 100644 index 00000000..4a87da6b --- /dev/null +++ b/src/find_runfiles/__init__.py @@ -0,0 +1,122 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import logging +import os +import sys +from pathlib import Path + +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + + +def _log_debug(message: str): + # TODO: why does logger not print anything? + logger.debug(message) + print(message) + + +def find_git_root(): + # TODO: is __file__ ever resolved into the bazel cache directories? + # Then this function will not work! + # TODO: use os.getenv("BUILD_WORKSPACE_DIRECTORY")? + git_root = Path(__file__).resolve() + while not (git_root / ".git").exists(): + git_root = git_root.parent + if git_root == Path("/"): + sys.exit( + "Could not find git root. Please run this script from the " + "root of the repository." + ) + return git_root + + +def get_runfiles_dir_impl( + cwd: Path, + conf_dir: Path, + env_runfiles: Path | None, + git_root: Path, +) -> Path: + """Functional (and therefore testable) logic to determine the runfiles directory.""" + + _log_debug( + "get_runfiles_dir_impl(\n" + f" {cwd=},\n" + f" {conf_dir=},\n" + f" {env_runfiles=},\n" + f" {git_root=}\n" + ")" + ) + + if env_runfiles: + # Runfiles are only available when running in Bazel. + # bazel build and bazel run are both supported. + # i.e. `bazel build //docs:docs` and `bazel run //docs:incremental`. + _log_debug("Using env[runfiles] to find the runfiles...") + + if env_runfiles.is_absolute(): + # In case of `bazel run` it will point to the global cache directory, which + # has a new hash every time. And it's not pretty. + # However `bazel-out` is a symlink to that same cache directory! + parts = str(env_runfiles).split("/bazel-out/") + if len(parts) != 2: + # This will intentionally also fail if "bazel-out" appears multiple + # times in the path. Will be fixed on demand only. + sys.exit("Could not find bazel-out in runfiles path.") + runfiles_dir = git_root / Path("bazel-out") / parts[1] + _log_debug(f"Made runfiles dir pretty: {runfiles_dir}") + else: + runfiles_dir = git_root / env_runfiles + + else: + # The only way to land here is when running from within the virtual + # environment created by the `:ide_support` rule. + # i.e. esbonio or manual sphinx-build execution within the virtual + # environment. + _log_debug("Running outside bazel.") + + print(f"{git_root=}") + + # TODO: "process-docs" is in SOURCE_DIR!! + runfiles_dir = ( + Path(git_root) / "bazel-bin" / "process-docs" / "ide_support.runfiles" + ) + + return runfiles_dir + + +def get_runfiles_dir() -> Path: + """Runfiles directory relative to conf.py""" + + # FIXME CONF_DIRECTORY is our invention. When running from esbonio, this is not + # set. It seems to provide app.confdir instead... + conf_dir = os.getenv("CONF_DIRECTORY") + assert conf_dir + + env_runfiles = os.getenv("RUNFILES_DIR") + + runfiles = Path( + get_runfiles_dir_impl( + cwd=Path(os.getcwd()), + conf_dir=Path(conf_dir), + env_runfiles=Path(env_runfiles) if env_runfiles else None, + git_root=find_git_root(), + ) + ) + + if not runfiles.exists(): + sys.exit( + f"Could not find runfiles at {runfiles}. Have a look at " + "README.md for instructions on how to build docs." + ) + + return runfiles diff --git a/src/find_runfiles/test_find_runfiles.py b/src/find_runfiles/test_find_runfiles.py new file mode 100644 index 00000000..3ed2fc9d --- /dev/null +++ b/src/find_runfiles/test_find_runfiles.py @@ -0,0 +1,94 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +from pathlib import Path + +# TODO: why is there an __init__.py file in tooling? +from src import find_runfiles + + +def get_runfiles_dir_impl( + cwd: str, conf_dir: str, env_runfiles: str | None, git_root: str +): + return str( + find_runfiles.get_runfiles_dir_impl( + cwd=Path(cwd), + conf_dir=Path(conf_dir), + env_runfiles=Path(env_runfiles) if env_runfiles else None, + git_root=Path(git_root), + ) + ) + + +def test_run_incremental(): + """bazel run //process-docs:incremental""" + # in incremental.py: + assert ( + get_runfiles_dir_impl( + cwd="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main", + conf_dir="process-docs", + env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", + git_root="/workspaces/process", + ) + == "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles" + ) + + # in conf.py: + assert ( + get_runfiles_dir_impl( + cwd="/workspaces/process/process-docs", + conf_dir="process-docs", + env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", + git_root="/workspaces/process", + ) + == "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles" + ) + + +def test_build_incremental_and_exec_it(): + """bazel build //process-docs:incremental && bazel-bin/process-docs/incremental""" + assert ( + get_runfiles_dir_impl( + cwd="/workspaces/process/process-docs", + conf_dir="process-docs", + env_runfiles="bazel-bin/process-docs/incremental.runfiles", + git_root="/workspaces/process", + ) + == "/workspaces/process/bazel-bin/process-docs/incremental.runfiles" + ) + + +def test_esbonio_old(): + """Observed with esbonio 0.x""" + assert ( + get_runfiles_dir_impl( + cwd="/workspaces/process/process-docs", + conf_dir="process-docs", + env_runfiles=None, + git_root="/workspaces/process", + ) + == "/workspaces/process/bazel-bin/process-docs/ide_support.runfiles" + ) + + +def test3(): + # docs named differently, just to make sure nothing is hardcoded + # bazel run //other-docs:incremental + assert ( + get_runfiles_dir_impl( + cwd="/workspaces/process/other-docs", + conf_dir="other-docs", + env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles", + git_root="/workspaces/process", + ) + == "/workspaces/process/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles" + ) diff --git a/src/incremental.py b/src/incremental.py index 2f4738e4..1ac6cb17 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -13,117 +13,90 @@ import argparse import logging +import itertools import os import sys +import json from pathlib import Path +from typing import Any import debugpy -from python.runfiles import Runfiles # type: ignore + +from python.runfiles import Runfiles from sphinx.cmd.build import main as sphinx_main from sphinx_autobuild.__main__ import main as sphinx_autobuild_main logger = logging.getLogger(__name__) -parser = argparse.ArgumentParser() -parser.add_argument( - "-dp", "--debug_port", help="port to listen to debugging client", default=5678 -) -parser.add_argument("--debug", help="Enable Debugging via debugpy", action="store_true") -args = parser.parse_args() -if args.debug: - debugpy.listen(("0.0.0.0", args.debug_port)) - logger.info("Waiting for client to connect on port: " + str(args.debug_port)) - debugpy.wait_for_client() - pass +logger.debug("DEBUG: CWD: ", os.getcwd()) +logger.debug("DEBUG: SOURCE_DIRECTORY: ", os.getenv("SOURCE_DIRECTORY")) +logger.debug("DEBUG: RUNFILES_DIR: ", os.getenv("RUNFILES_DIR")) def get_env(name: str) -> str: val = os.environ.get(name, None) - logger.debug(f"Env: {name} = {val}") + logger.debug(f"DEBUG: Env: {name} = {val}") if val is None: raise ValueError(f"Environment variable {name} is not set") return val -def get_runfiles_dir() -> Path: - if r := Runfiles.Create(): - # Runfiles are only available when running in Bazel. - # bazel build and bazel run are both supported. - # i.e. `bazel build //docs:docs` and `bazel run //docs:incremental`. - logger.info("Using runfiles to determine plantuml path.") - - runfiles_dir = Path(r.EnvVars()["RUNFILES_DIR"]) - - if not runfiles_dir.exists(): - sys.exit( - f"Could not find runfiles at {runfiles_dir}. Have a look at " - "README.md for instructions on how to build docs." - ) - - else: - # The only way to land here is when running from within the virtual - # environment created by the `docs:ide_support` rule in the BUILD file. - # i.e. esbonio or manual sphinx-build execution within the virtual - # environment. - # We'll still use the plantuml binary from the bazel build. - # But we need to find it first. - logger.info("Running outside bazel.") - - git_root = Path(__file__).resolve().parents[3] - assert ( - git_root / ".git" - ).exists(), f"Could not find git root. Assumed path: {git_root}" - - runfiles_dir = git_root / "bazel-bin" / "docs" / "ide_support.runfiles" - if not runfiles_dir.exists(): - sys.exit( - f"Could not find ide_support.runfiles at {runfiles_dir}. " - "Have a look at README.md for instructions on how to build docs." - ) - - return runfiles_dir - - -# Registering a default value -source_code_linker_file = "" - -runfiles_dir = get_runfiles_dir() -# runfiles_dir points to a cache directory which has a new hash every time. -# Use the relative path that is available from workspace root. -relative_path = Path("bazel-out") / str(runfiles_dir).split("/bazel-out/", 1)[-1] - - -# Asset_dir is interpreted by sphinx. Paths are relative conf.py (conf_dir) -assets_dir_prefix = str(Path("..") / relative_path) + "/" + "_main/docs" - - -workspace = os.getenv("BUILD_WORKSPACE_DIRECTORY") -if workspace: - os.chdir(workspace) - - -base_arguments = [ - get_env("SOURCE_DIRECTORY"), - get_env("BUILD_DIRECTORY"), - "-W", # treat warning as errors - "--keep-going", # do not abort after one error - "-T", # show details in case of errors in extensions - "--jobs", - "auto", - "--conf-dir", - get_env("CONF_DIRECTORY"), -] - - -action = get_env("ACTION") -if action == "live_preview": - sphinx_autobuild_main(base_arguments) -else: - filename = get_env("SOURCE_CODE_LINKS") - source_code_linker_file = str(relative_path.parent) + "/" + filename - incremental_args = base_arguments + [ - # Overwriting config values - f"--define=source_code_linker_file={source_code_linker_file}", - f"--define=html_static_path={assets_dir_prefix}/_assets,{assets_dir_prefix}/_tooling/assets", +def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str]]: + """ + Transforms the 'string' we get from 'docs.bzl' back into something we can parse easliy inside sphinx/python + !! HACK: This truly isn't great !! + """ + l_dict = [] + x = [ + x.split(",") + for x in external_needs_source.replace("]", "") + .replace("[", "") + .replace("{", "") + .split("}") ] - sphinx_main(incremental_args) + for d in x: + b = [a.split(":", 1) for a in d if len(d) > 1] + l = {a[0]: a[1] for a in b} + if l: + l_dict.append(l) + return l_dict + + +if __name__ == "__main__": + # Add debuging functionality + parser = argparse.ArgumentParser() + parser.add_argument( + "-dp", "--debug_port", help="port to listen to debugging client", default=5678 + ) + parser.add_argument( + "--debug", help="Enable Debugging via debugpy", action="store_true" + ) + args = parser.parse_args() + if args.debug: + debugpy.listen(("0.0.0.0", args.debug_port)) + logger.info("Waiting for client to connect on port: " + str(args.debug_port)) + debugpy.wait_for_client() + + workspace = os.getenv("BUILD_WORKSPACE_DIRECTORY") + if workspace: + os.chdir(workspace) + + base_arguments = [ + get_env("SOURCE_DIRECTORY"), + get_env("BUILD_DIRECTORY"), + "-W", # treat warning as errors + "--keep-going", # do not abort after one error + "-T", # show details in case of errors in extensions + "--jobs", + "auto", + "--conf-dir", + get_env("CONF_DIRECTORY"), + f"--define=external_needs_source={json.dumps(transform_env_str_to_dict(get_env('EXTERNAL_NEEDS_INFO')))}", + ] + action = get_env("ACTION") + if action == "live_preview": + sphinx_autobuild_main( + base_arguments + ["--define=disable_source_code_linker=True"] + ) + else: + sphinx_main(base_arguments) diff --git a/src/requirements.txt b/src/requirements.txt index 3c34e3cc..c8d7046c 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -20,7 +20,4 @@ sphinx-needs[plotting] esbonio<1 # Although not required in all targets, we want pytest within ide_support to run tests from the IDE. -pytest -pytest-cov - debugpy diff --git a/src/requirements_lock.txt b/src/requirements_lock.txt index 868d76e1..6b5859b0 100644 --- a/src/requirements_lock.txt +++ b/src/requirements_lock.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# bazel run //docs:requirements.update +# bazel run //tooling:requirements.update # --extra-index-url https://pypi.org/simple/ @@ -277,70 +277,6 @@ contourpy==1.3.1 \ --hash=sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9 \ --hash=sha256:f611e628ef06670df83fce17805c344710ca5cde01edfdc72751311da8585375 # via matplotlib -coverage[toml]==7.6.10 \ - --hash=sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9 \ - --hash=sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f \ - --hash=sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273 \ - --hash=sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994 \ - --hash=sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e \ - --hash=sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50 \ - --hash=sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e \ - --hash=sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e \ - --hash=sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c \ - --hash=sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853 \ - --hash=sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8 \ - --hash=sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8 \ - --hash=sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe \ - --hash=sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165 \ - --hash=sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb \ - --hash=sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59 \ - --hash=sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609 \ - --hash=sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18 \ - --hash=sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098 \ - --hash=sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd \ - --hash=sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3 \ - --hash=sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43 \ - --hash=sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d \ - --hash=sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359 \ - --hash=sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90 \ - --hash=sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78 \ - --hash=sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a \ - --hash=sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99 \ - --hash=sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988 \ - --hash=sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2 \ - --hash=sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0 \ - --hash=sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694 \ - --hash=sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377 \ - --hash=sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d \ - --hash=sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23 \ - --hash=sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312 \ - --hash=sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf \ - --hash=sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6 \ - --hash=sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b \ - --hash=sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c \ - --hash=sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690 \ - --hash=sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a \ - --hash=sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f \ - --hash=sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4 \ - --hash=sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25 \ - --hash=sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd \ - --hash=sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852 \ - --hash=sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0 \ - --hash=sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244 \ - --hash=sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315 \ - --hash=sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078 \ - --hash=sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0 \ - --hash=sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27 \ - --hash=sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132 \ - --hash=sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5 \ - --hash=sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247 \ - --hash=sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022 \ - --hash=sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b \ - --hash=sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3 \ - --hash=sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18 \ - --hash=sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5 \ - --hash=sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f - # via pytest-cov cryptography==44.0.2 \ --hash=sha256:04abd71114848aa25edb28e225ab5f268096f44cf0127f3d36975bdf1bdf3390 \ --hash=sha256:0529b1d5a0105dd3731fa65680b45ce49da4d8115ea76e9da77a875396727b41 \ @@ -409,7 +345,7 @@ debugpy==1.8.12 \ --hash=sha256:cbbd4149c4fc5e7d508ece083e78c17442ee13b0e69bfa6bd63003e486770f45 \ --hash=sha256:f30b03b0f27608a0b26c75f0bb8a880c752c0e0b01090551b9d87c7d783e2069 \ --hash=sha256:fdb3c6d342825ea10b90e43d7f20f01535a72b3a1997850c0c3cefa5c27a4a2c - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in deprecated==1.2.18 \ --hash=sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d \ --hash=sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec @@ -423,7 +359,7 @@ docutils==0.21.2 \ esbonio==0.16.5 \ --hash=sha256:04ba926e3603f7b1fde1abc690b47afd60749b64b1029b6bce8e1de0bb284921 \ --hash=sha256:acab2e16c6cf8f7232fb04e0d48514ce50566516b1f6fcf669ccf2f247e8b10f - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in fonttools==4.56.0 \ --hash=sha256:003548eadd674175510773f73fb2060bb46adb77c94854af3e0cc5bc70260049 \ --hash=sha256:0073b62c3438cf0058488c002ea90489e8801d3a7af5ce5f7c05c105bee815c3 \ @@ -490,10 +426,12 @@ imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -iniconfig==2.0.0 \ - --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ - --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 - # via pytest +iniconfig==2.1.0 \ + --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ + --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 + # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt + # pytest jinja2==3.1.5 \ --hash=sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb \ --hash=sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb @@ -754,6 +692,7 @@ packaging==24.2 \ --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt # matplotlib # pytest # sphinx @@ -837,7 +776,9 @@ platformdirs==4.3.6 \ pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 - # via pytest + # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt + # pytest pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc @@ -845,11 +786,11 @@ pycparser==2.22 \ pydata-sphinx-theme==0.16.1 \ --hash=sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde \ --hash=sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7 - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in pygithub==2.6.1 \ --hash=sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3 \ --hash=sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in pygls==1.3.1 \ --hash=sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018 \ --hash=sha256:6e00f11efc56321bdeb6eac04f6d86131f654c7d49124344a9ebb968da3dd91e @@ -885,16 +826,10 @@ pyspellchecker==0.8.2 \ --hash=sha256:2b026be14a162ba810bdda8e5454c56e364f42d3b9e14aeff31706e5ebcdc78f \ --hash=sha256:4fee22e1859c5153c3bc3953ac3041bf07d4541520b7e01901e955062022290a # via esbonio -pytest==8.3.4 \ - --hash=sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6 \ - --hash=sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761 - # via - # -r docs/_tooling/requirements.txt - # pytest-cov -pytest-cov==6.0.0 \ - --hash=sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35 \ - --hash=sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0 - # via -r docs/_tooling/requirements.txt +pytest==8.3.5 \ + --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ + --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 + # via -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 @@ -1027,7 +962,7 @@ rpds-py==0.22.3 \ ruamel-yaml==0.18.10 \ --hash=sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58 \ --hash=sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1 - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in ruamel-yaml-clib==0.2.12 \ --hash=sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b \ --hash=sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4 \ @@ -1096,7 +1031,7 @@ sphinx==8.1.3 \ --hash=sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2 \ --hash=sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927 # via - # -r docs/_tooling/requirements.txt + # -r tooling/requirements.in # esbonio # pydata-sphinx-theme # sphinx-autobuild @@ -1108,7 +1043,7 @@ sphinx==8.1.3 \ sphinx-autobuild==2024.10.3 \ --hash=sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa \ --hash=sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1 - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in sphinx-data-viewer==0.1.5 \ --hash=sha256:a7d5e58613562bb745380bfe61ca8b69997998167fd6fa9aea55606c9a4b17e4 \ --hash=sha256:b74b1d304c505c464d07c7b225ed0d84ea02dcc88bc1c49cdad7c2275fbbdad4 @@ -1116,11 +1051,11 @@ sphinx-data-viewer==0.1.5 \ sphinx-design==0.6.1 \ --hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \ --hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632 - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in sphinx-needs[plotting]==4.2.0 \ --hash=sha256:f1ae86afb3d1d3f3c5d8cecffe740ae03f32a908212b4471866dff1a0738b252 \ --hash=sha256:f1f1f76adb30da787a472dff4b0da13b0e1a9c602e628501294cc9ae84d58357 - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in sphinxcontrib-applehelp==2.0.0 \ --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 @@ -1143,7 +1078,7 @@ sphinxcontrib-jsmath==1.0.1 \ # via sphinx sphinxcontrib-plantuml==0.30 \ --hash=sha256:2a1266ca43bddf44640ae44107003df4490de2b3c3154a0d627cfb63e9a169bf - # via -r docs/_tooling/requirements.txt + # via -r tooling/requirements.in sphinxcontrib-qthelp==2.0.0 \ --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ --hash=sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb From 36a3f540d58512cc8c9cd0851788a4f91db8dfc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 29 Apr 2025 14:04:32 +0200 Subject: [PATCH 004/231] Updating & renaming requirements (#4) Ran update & updgrade on requirements. --- src/requirements.in | 23 + src/requirements.txt | 1384 ++++++++++++++++++++++++++++++++++++- src/requirements_lock.txt | 1336 ----------------------------------- 3 files changed, 1386 insertions(+), 1357 deletions(-) create mode 100644 src/requirements.in delete mode 100644 src/requirements_lock.txt diff --git a/src/requirements.in b/src/requirements.in new file mode 100644 index 00000000..c8d7046c --- /dev/null +++ b/src/requirements.in @@ -0,0 +1,23 @@ +--extra-index-url https://pypi.org/simple/ + +Sphinx + +# At least 4.2.0, as it fixes a bug in combination with esbonio live preview: +# https://github.com/useblocks/sphinx-needs/issues/1350 +sphinx-needs>=4.2.0 + +sphinxcontrib-plantuml +pydata-sphinx-theme +sphinx-design +sphinx-autobuild +ruamel.yaml +PyGithub +sphinx-needs[plotting] +# Until release of esbonio 1.x, we need to install it ourselves so the VS Code esbonio-extension +# can find it. +# esbonio >= 1 comes bundled with the esbonio-extension >= 1. +# esbonio<1 is required for the esbonio-extension <1 +esbonio<1 + +# Although not required in all targets, we want pytest within ide_support to run tests from the IDE. +debugpy diff --git a/src/requirements.txt b/src/requirements.txt index c8d7046c..93432373 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,23 +1,1365 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# bazel run //src:requirements.update +# --extra-index-url https://pypi.org/simple/ -Sphinx - -# At least 4.2.0, as it fixes a bug in combination with esbonio live preview: -# https://github.com/useblocks/sphinx-needs/issues/1350 -sphinx-needs>=4.2.0 - -sphinxcontrib-plantuml -pydata-sphinx-theme -sphinx-design -sphinx-autobuild -ruamel.yaml -PyGithub -sphinx-needs[plotting] -# Until release of esbonio 1.x, we need to install it ourselves so the VS Code esbonio-extension -# can find it. -# esbonio >= 1 comes bundled with the esbonio-extension >= 1. -# esbonio<1 is required for the esbonio-extension <1 -esbonio<1 - -# Although not required in all targets, we want pytest within ide_support to run tests from the IDE. -debugpy +accessible-pygments==0.0.5 \ + --hash=sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872 \ + --hash=sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7 + # via pydata-sphinx-theme +alabaster==1.0.0 \ + --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ + --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b + # via sphinx +anyio==4.9.0 \ + --hash=sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028 \ + --hash=sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c + # via + # starlette + # watchfiles +attrs==25.3.0 \ + --hash=sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3 \ + --hash=sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b + # via + # cattrs + # jsonschema + # lsprotocol + # referencing +babel==2.17.0 \ + --hash=sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d \ + --hash=sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2 + # via + # pydata-sphinx-theme + # sphinx +beautifulsoup4==4.13.4 \ + --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ + --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 + # via pydata-sphinx-theme +cattrs==24.1.3 \ + --hash=sha256:981a6ef05875b5bb0c7fb68885546186d306f10f0f6718fe9b96c226e68821ff \ + --hash=sha256:adf957dddd26840f27ffbd060a6c4dd3b2192c5b7c2c0525ef1bd8131d8a83f5 + # via + # lsprotocol + # pygls +certifi==2025.4.26 \ + --hash=sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6 \ + --hash=sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3 + # via requests +cffi==1.17.1 \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b + # via + # cryptography + # pynacl +charset-normalizer==3.4.1 \ + --hash=sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537 \ + --hash=sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa \ + --hash=sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a \ + --hash=sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294 \ + --hash=sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b \ + --hash=sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd \ + --hash=sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601 \ + --hash=sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd \ + --hash=sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4 \ + --hash=sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d \ + --hash=sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2 \ + --hash=sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313 \ + --hash=sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd \ + --hash=sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa \ + --hash=sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8 \ + --hash=sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1 \ + --hash=sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2 \ + --hash=sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496 \ + --hash=sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d \ + --hash=sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b \ + --hash=sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e \ + --hash=sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a \ + --hash=sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4 \ + --hash=sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca \ + --hash=sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78 \ + --hash=sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408 \ + --hash=sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5 \ + --hash=sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3 \ + --hash=sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f \ + --hash=sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a \ + --hash=sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765 \ + --hash=sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6 \ + --hash=sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146 \ + --hash=sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6 \ + --hash=sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9 \ + --hash=sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd \ + --hash=sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c \ + --hash=sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f \ + --hash=sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545 \ + --hash=sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176 \ + --hash=sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770 \ + --hash=sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824 \ + --hash=sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f \ + --hash=sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf \ + --hash=sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487 \ + --hash=sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d \ + --hash=sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd \ + --hash=sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b \ + --hash=sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534 \ + --hash=sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f \ + --hash=sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b \ + --hash=sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9 \ + --hash=sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd \ + --hash=sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125 \ + --hash=sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9 \ + --hash=sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de \ + --hash=sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11 \ + --hash=sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d \ + --hash=sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35 \ + --hash=sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f \ + --hash=sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda \ + --hash=sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7 \ + --hash=sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a \ + --hash=sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971 \ + --hash=sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8 \ + --hash=sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41 \ + --hash=sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d \ + --hash=sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f \ + --hash=sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757 \ + --hash=sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a \ + --hash=sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886 \ + --hash=sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77 \ + --hash=sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76 \ + --hash=sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247 \ + --hash=sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85 \ + --hash=sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb \ + --hash=sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7 \ + --hash=sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e \ + --hash=sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6 \ + --hash=sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037 \ + --hash=sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1 \ + --hash=sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e \ + --hash=sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807 \ + --hash=sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407 \ + --hash=sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c \ + --hash=sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12 \ + --hash=sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3 \ + --hash=sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089 \ + --hash=sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd \ + --hash=sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e \ + --hash=sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00 \ + --hash=sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616 + # via requests +click==8.1.8 \ + --hash=sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2 \ + --hash=sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a + # via uvicorn +colorama==0.4.6 \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via sphinx-autobuild +contourpy==1.3.2 \ + --hash=sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f \ + --hash=sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92 \ + --hash=sha256:107ba8a6a7eec58bb475329e6d3b95deba9440667c4d62b9b6063942b61d7f16 \ + --hash=sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f \ + --hash=sha256:1c48188778d4d2f3d48e4643fb15d8608b1d01e4b4d6b0548d9b336c28fc9b6f \ + --hash=sha256:3859783aefa2b8355697f16642695a5b9792e7a46ab86da1118a4a23a51a33d7 \ + --hash=sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e \ + --hash=sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08 \ + --hash=sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841 \ + --hash=sha256:49b65a95d642d4efa8f64ba12558fcb83407e58a2dfba9d796d77b63ccfcaff5 \ + --hash=sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2 \ + --hash=sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415 \ + --hash=sha256:5ebac872ba09cb8f2131c46b8739a7ff71de28a24c869bcad554477eb089a878 \ + --hash=sha256:5f5964cdad279256c084b69c3f412b7801e15356b16efa9d78aa974041903da0 \ + --hash=sha256:65a887a6e8c4cd0897507d814b14c54a8c2e2aa4ac9f7686292f9769fcf9a6ab \ + --hash=sha256:6a37a2fb93d4df3fc4c0e363ea4d16f83195fc09c891bc8ce072b9d084853445 \ + --hash=sha256:70771a461aaeb335df14deb6c97439973d253ae70660ca085eec25241137ef43 \ + --hash=sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c \ + --hash=sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823 \ + --hash=sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69 \ + --hash=sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15 \ + --hash=sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef \ + --hash=sha256:8c5acb8dddb0752bf252e01a3035b21443158910ac16a3b0d20e7fed7d534ce5 \ + --hash=sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73 \ + --hash=sha256:8d2e74acbcba3bfdb6d9d8384cdc4f9260cae86ed9beee8bd5f54fee49a430b9 \ + --hash=sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912 \ + --hash=sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5 \ + --hash=sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85 \ + --hash=sha256:9be002b31c558d1ddf1b9b415b162c603405414bacd6932d031c5b5a8b757f0d \ + --hash=sha256:ad687a04bc802cbe8b9c399c07162a3c35e227e2daccf1668eb1f278cb698631 \ + --hash=sha256:b4f54d6a2defe9f257327b0f243612dd051cc43825587520b1bf74a31e2f6ef2 \ + --hash=sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54 \ + --hash=sha256:b7cd50c38f500bbcc9b6a46643a40e0913673f869315d8e70de0438817cb7773 \ + --hash=sha256:ba38e3f9f330af820c4b27ceb4b9c7feee5fe0493ea53a8720f4792667465934 \ + --hash=sha256:c440093bbc8fc21c637c03bafcbef95ccd963bc6e0514ad887932c18ca2a759a \ + --hash=sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441 \ + --hash=sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422 \ + --hash=sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532 \ + --hash=sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739 \ + --hash=sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b \ + --hash=sha256:cdd22595308f53ef2f891040ab2b93d79192513ffccbd7fe19be7aa773a5e09f \ + --hash=sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1 \ + --hash=sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87 \ + --hash=sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52 \ + --hash=sha256:d6658ccc7251a4433eebd89ed2672c2ed96fba367fd25ca9512aa92a4b46c4f1 \ + --hash=sha256:d91a3ccc7fea94ca0acab82ceb77f396d50a1f67412efe4c526f5d20264e6ecd \ + --hash=sha256:dc41ba0714aa2968d1f8674ec97504a8f7e334f48eeacebcaa6256213acb0989 \ + --hash=sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb \ + --hash=sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f \ + --hash=sha256:ded1706ed0c1049224531b81128efbd5084598f18d8a2d9efae833edbd2b40ad \ + --hash=sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9 \ + --hash=sha256:e259bced5549ac64410162adc973c5e2fb77f04df4a439d00b478e57a0e65512 \ + --hash=sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd \ + --hash=sha256:eab0f6db315fa4d70f1d8ab514e527f0366ec021ff853d7ed6a2d33605cf4b83 \ + --hash=sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe \ + --hash=sha256:f939a054192ddc596e031e50bb13b657ce318cf13d264f095ce9db7dc6ae81c0 \ + --hash=sha256:fd93cc7f3139b6dd7aab2f26a90dde0aa9fc264dbf70f6740d498a70b860b82c + # via matplotlib +cryptography==44.0.2 \ + --hash=sha256:04abd71114848aa25edb28e225ab5f268096f44cf0127f3d36975bdf1bdf3390 \ + --hash=sha256:0529b1d5a0105dd3731fa65680b45ce49da4d8115ea76e9da77a875396727b41 \ + --hash=sha256:1bc312dfb7a6e5d66082c87c34c8a62176e684b6fe3d90fcfe1568de675e6688 \ + --hash=sha256:268e4e9b177c76d569e8a145a6939eca9a5fec658c932348598818acf31ae9a5 \ + --hash=sha256:29ecec49f3ba3f3849362854b7253a9f59799e3763b0c9d0826259a88efa02f1 \ + --hash=sha256:2bf7bf75f7df9715f810d1b038870309342bff3069c5bd8c6b96128cb158668d \ + --hash=sha256:3b721b8b4d948b218c88cb8c45a01793483821e709afe5f622861fc6182b20a7 \ + --hash=sha256:3c00b6b757b32ce0f62c574b78b939afab9eecaf597c4d624caca4f9e71e7843 \ + --hash=sha256:3dc62975e31617badc19a906481deacdeb80b4bb454394b4098e3f2525a488c5 \ + --hash=sha256:4973da6ca3db4405c54cd0b26d328be54c7747e89e284fcff166132eb7bccc9c \ + --hash=sha256:4e389622b6927d8133f314949a9812972711a111d577a5d1f4bee5e58736b80a \ + --hash=sha256:51e4de3af4ec3899d6d178a8c005226491c27c4ba84101bfb59c901e10ca9f79 \ + --hash=sha256:5f6f90b72d8ccadb9c6e311c775c8305381db88374c65fa1a68250aa8a9cb3a6 \ + --hash=sha256:6210c05941994290f3f7f175a4a57dbbb2afd9273657614c506d5976db061181 \ + --hash=sha256:6f101b1f780f7fc613d040ca4bdf835c6ef3b00e9bd7125a4255ec574c7916e4 \ + --hash=sha256:7bdcd82189759aba3816d1f729ce42ffded1ac304c151d0a8e89b9996ab863d5 \ + --hash=sha256:7ca25849404be2f8e4b3c59483d9d3c51298a22c1c61a0e84415104dacaf5562 \ + --hash=sha256:81276f0ea79a208d961c433a947029e1a15948966658cf6710bbabb60fcc2639 \ + --hash=sha256:8cadc6e3b5a1f144a039ea08a0bdb03a2a92e19c46be3285123d32029f40a922 \ + --hash=sha256:8e0ddd63e6bf1161800592c71ac794d3fb8001f2caebe0966e77c5234fa9efc3 \ + --hash=sha256:909c97ab43a9c0c0b0ada7a1281430e4e5ec0458e6d9244c0e821bbf152f061d \ + --hash=sha256:96e7a5e9d6e71f9f4fca8eebfd603f8e86c5225bb18eb621b2c1e50b290a9471 \ + --hash=sha256:9a1e657c0f4ea2a23304ee3f964db058c9e9e635cc7019c4aa21c330755ef6fd \ + --hash=sha256:9eb9d22b0a5d8fd9925a7764a054dca914000607dff201a24c791ff5c799e1fa \ + --hash=sha256:af4ff3e388f2fa7bff9f7f2b31b87d5651c45731d3e8cfa0944be43dff5cfbdb \ + --hash=sha256:b042d2a275c8cee83a4b7ae30c45a15e6a4baa65a179a0ec2d78ebb90e4f6699 \ + --hash=sha256:bc821e161ae88bfe8088d11bb39caf2916562e0a2dc7b6d56714a48b784ef0bb \ + --hash=sha256:c505d61b6176aaf982c5717ce04e87da5abc9a36a5b39ac03905c4aafe8de7aa \ + --hash=sha256:c63454aa261a0cf0c5b4718349629793e9e634993538db841165b3df74f37ec0 \ + --hash=sha256:c7362add18b416b69d58c910caa217f980c5ef39b23a38a0880dfd87bdf8cd23 \ + --hash=sha256:d03806036b4f89e3b13b6218fefea8d5312e450935b1a2d55f0524e2ed7c59d9 \ + --hash=sha256:d1b3031093a366ac767b3feb8bcddb596671b3aaff82d4050f984da0c248b615 \ + --hash=sha256:d1c3572526997b36f245a96a2b1713bf79ce99b271bbcf084beb6b9b075f29ea \ + --hash=sha256:efcfe97d1b3c79e486554efddeb8f6f53a4cdd4cf6086642784fa31fc384e1d7 \ + --hash=sha256:f514ef4cd14bb6fb484b4a60203e912cfcb64f2ab139e88c2274511514bf7308 + # via pyjwt +cycler==0.12.1 \ + --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ + --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c + # via matplotlib +debugpy==1.8.14 \ + --hash=sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15 \ + --hash=sha256:1b2ac8c13b2645e0b1eaf30e816404990fbdb168e193322be8f545e8c01644a9 \ + --hash=sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f \ + --hash=sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f \ + --hash=sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e \ + --hash=sha256:3d937d93ae4fa51cdc94d3e865f535f185d5f9748efb41d0d49e33bf3365bd79 \ + --hash=sha256:413512d35ff52c2fb0fd2d65e69f373ffd24f0ecb1fac514c04a668599c5ce7f \ + --hash=sha256:4c9156f7524a0d70b7a7e22b2e311d8ba76a15496fb00730e46dcdeedb9e1eea \ + --hash=sha256:5349b7c3735b766a281873fbe32ca9cca343d4cc11ba4a743f84cb854339ff35 \ + --hash=sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f \ + --hash=sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20 \ + --hash=sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e \ + --hash=sha256:7118d462fe9724c887d355eef395fae68bc764fd862cdca94e70dcb9ade8a23d \ + --hash=sha256:7816acea4a46d7e4e50ad8d09d963a680ecc814ae31cdef3622eb05ccacf7b01 \ + --hash=sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322 \ + --hash=sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84 \ + --hash=sha256:93fee753097e85623cab1c0e6a68c76308cd9f13ffdf44127e6fab4fbf024339 \ + --hash=sha256:b1528cfee6c1b1c698eb10b6b096c598738a8238822d218173d21c3086de8123 \ + --hash=sha256:b44985f97cc3dd9d52c42eb59ee9d7ee0c4e7ecd62bca704891f997de4cef23d \ + --hash=sha256:c442f20577b38cc7a9aafecffe1094f78f07fb8423c3dddb384e6b8f49fd2987 \ + --hash=sha256:c99295c76161ad8d507b413cd33422d7c542889fbb73035889420ac1fad354f2 \ + --hash=sha256:cf431c343a99384ac7eab2f763980724834f933a271e90496944195318c619e2 \ + --hash=sha256:d235e4fa78af2de4e5609073972700523e372cf5601742449970110d565ca28c \ + --hash=sha256:d5582bcbe42917bc6bbe5c12db1bffdf21f6bfc28d4554b738bf08d50dc0c8c3 \ + --hash=sha256:f117dedda6d969c5c9483e23f573b38f4e39412845c7bc487b6f2648df30fe84 \ + --hash=sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826 + # via -r src/requirements.in +deprecated==1.2.18 \ + --hash=sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d \ + --hash=sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec + # via pygithub +docutils==0.21.2 \ + --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ + --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 + # via + # pydata-sphinx-theme + # sphinx +esbonio==0.16.5 \ + --hash=sha256:04ba926e3603f7b1fde1abc690b47afd60749b64b1029b6bce8e1de0bb284921 \ + --hash=sha256:acab2e16c6cf8f7232fb04e0d48514ce50566516b1f6fcf669ccf2f247e8b10f + # via -r src/requirements.in +fonttools==4.57.0 \ + --hash=sha256:03290e818782e7edb159474144fca11e36a8ed6663d1fcbd5268eb550594fd8e \ + --hash=sha256:0425c2e052a5f1516c94e5855dbda706ae5a768631e9fcc34e57d074d1b65b92 \ + --hash=sha256:05efceb2cb5f6ec92a4180fcb7a64aa8d3385fd49cfbbe459350229d1974f0b1 \ + --hash=sha256:17168a4670bbe3775f3f3f72d23ee786bd965395381dfbb70111e25e81505b9d \ + --hash=sha256:3122c604a675513c68bd24c6a8f9091f1c2376d18e8f5fe5a101746c81b3e98f \ + --hash=sha256:34687a5d21f1d688d7d8d416cb4c5b9c87fca8a1797ec0d74b9fdebfa55c09ab \ + --hash=sha256:3871349303bdec958360eedb619169a779956503ffb4543bb3e6211e09b647c4 \ + --hash=sha256:39acf68abdfc74e19de7485f8f7396fa4d2418efea239b7061d6ed6a2510c746 \ + --hash=sha256:3cf97236b192a50a4bf200dc5ba405aa78d4f537a2c6e4c624bb60466d5b03bd \ + --hash=sha256:408ce299696012d503b714778d89aa476f032414ae57e57b42e4b92363e0b8ef \ + --hash=sha256:44c26a311be2ac130f40a96769264809d3b0cb297518669db437d1cc82974888 \ + --hash=sha256:46370ac47a1e91895d40e9ad48effbe8e9d9db1a4b80888095bc00e7beaa042f \ + --hash=sha256:4dea5893b58d4637ffa925536462ba626f8a1b9ffbe2f5c272cdf2c6ebadb817 \ + --hash=sha256:51d8482e96b28fb28aa8e50b5706f3cee06de85cbe2dce80dbd1917ae22ec5a6 \ + --hash=sha256:541cb48191a19ceb1a2a4b90c1fcebd22a1ff7491010d3cf840dd3a68aebd654 \ + --hash=sha256:579ba873d7f2a96f78b2e11028f7472146ae181cae0e4d814a37a09e93d5c5cc \ + --hash=sha256:57e30241524879ea10cdf79c737037221f77cc126a8cdc8ff2c94d4a522504b9 \ + --hash=sha256:69ab81b66ebaa8d430ba56c7a5f9abe0183afefd3a2d6e483060343398b13fb1 \ + --hash=sha256:6e3e1ec10c29bae0ea826b61f265ec5c858c5ba2ce2e69a71a62f285cf8e4595 \ + --hash=sha256:727ece10e065be2f9dd239d15dd5d60a66e17eac11aea47d447f9f03fdbc42de \ + --hash=sha256:7339e6a3283e4b0ade99cade51e97cde3d54cd6d1c3744459e886b66d630c8b3 \ + --hash=sha256:767604f244dc17c68d3e2dbf98e038d11a18abc078f2d0f84b6c24571d9c0b13 \ + --hash=sha256:7a64edd3ff6a7f711a15bd70b4458611fb240176ec11ad8845ccbab4fe6745db \ + --hash=sha256:81aa97669cd726349eb7bd43ca540cf418b279ee3caba5e2e295fb4e8f841c02 \ + --hash=sha256:84c41ba992df5b8d680b89fd84c6a1f2aca2b9f1ae8a67400c8930cd4ea115f6 \ + --hash=sha256:84fd56c78d431606332a0627c16e2a63d243d0d8b05521257d77c6529abe14d8 \ + --hash=sha256:889e45e976c74abc7256d3064aa7c1295aa283c6bb19810b9f8b604dfe5c7f31 \ + --hash=sha256:8e2e12d0d862f43d51e5afb8b9751c77e6bec7d2dc00aad80641364e9df5b199 \ + --hash=sha256:967b65232e104f4b0f6370a62eb33089e00024f2ce143aecbf9755649421c683 \ + --hash=sha256:9d077f909f2343daf4495ba22bb0e23b62886e8ec7c109ee8234bdbd678cf344 \ + --hash=sha256:9d57b4e23ebbe985125d3f0cabbf286efa191ab60bbadb9326091050d88e8213 \ + --hash=sha256:a1968f2a2003c97c4ce6308dc2498d5fd4364ad309900930aa5a503c9851aec8 \ + --hash=sha256:a2a722c0e4bfd9966a11ff55c895c817158fcce1b2b6700205a376403b546ad9 \ + --hash=sha256:a97bb05eb24637714a04dee85bdf0ad1941df64fe3b802ee4ac1c284a5f97b7c \ + --hash=sha256:aff40f8ac6763d05c2c8f6d240c6dac4bb92640a86d9b0c3f3fff4404f34095c \ + --hash=sha256:babe8d1eb059a53e560e7bf29f8e8f4accc8b6cfb9b5fd10e485bde77e71ef41 \ + --hash=sha256:bbceffc80aa02d9e8b99f2a7491ed8c4a783b2fc4020119dc405ca14fb5c758c \ + --hash=sha256:c59375e85126b15a90fcba3443eaac58f3073ba091f02410eaa286da9ad80ed8 \ + --hash=sha256:ca2aed95855506b7ae94e8f1f6217b7673c929e4f4f1217bcaa236253055cb36 \ + --hash=sha256:cc066cb98b912f525ae901a24cd381a656f024f76203bc85f78fcc9e66ae5aec \ + --hash=sha256:cdef9a056c222d0479a1fdb721430f9efd68268014c54e8166133d2643cb05d9 \ + --hash=sha256:d07f1b64008e39fceae7aa99e38df8385d7d24a474a8c9872645c4397b674481 \ + --hash=sha256:d639397de852f2ccfb3134b152c741406752640a266d9c1365b0f23d7b88077f \ + --hash=sha256:dff02c5c8423a657c550b48231d0a48d7e2b2e131088e55983cfe74ccc2c7cc9 \ + --hash=sha256:e952c684274a7714b3160f57ec1d78309f955c6335c04433f07d36c5eb27b1f9 \ + --hash=sha256:ea1e9e43ca56b0c12440a7c689b1350066595bebcaa83baad05b8b2675129d98 \ + --hash=sha256:f022601f3ee9e1f6658ed6d184ce27fa5216cee5b82d279e0f0bde5deebece72 \ + --hash=sha256:f0e9618630edd1910ad4f07f60d77c184b2f572c8ee43305ea3265675cbbfe7e \ + --hash=sha256:f1d6bc9c23356908db712d282acb3eebd4ae5ec6d8b696aa40342b1d84f8e9e3 \ + --hash=sha256:f4376819c1c778d59e0a31db5dc6ede854e9edf28bbfa5b756604727f7f800ac + # via matplotlib +h11==0.16.0 \ + --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \ + --hash=sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86 + # via uvicorn +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 + # via + # anyio + # requests +imagesize==1.4.1 \ + --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ + --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a + # via sphinx +iniconfig==2.1.0 \ + --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ + --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 + # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # pytest +jinja2==3.1.6 \ + --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ + --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67 + # via sphinx +jsonschema==4.23.0 \ + --hash=sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4 \ + --hash=sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566 + # via sphinx-needs +jsonschema-specifications==2025.4.1 \ + --hash=sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af \ + --hash=sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608 + # via jsonschema +kiwisolver==1.4.8 \ + --hash=sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50 \ + --hash=sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c \ + --hash=sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8 \ + --hash=sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc \ + --hash=sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f \ + --hash=sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79 \ + --hash=sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6 \ + --hash=sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2 \ + --hash=sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605 \ + --hash=sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09 \ + --hash=sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab \ + --hash=sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e \ + --hash=sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc \ + --hash=sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8 \ + --hash=sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7 \ + --hash=sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880 \ + --hash=sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b \ + --hash=sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b \ + --hash=sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff \ + --hash=sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3 \ + --hash=sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c \ + --hash=sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0 \ + --hash=sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6 \ + --hash=sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30 \ + --hash=sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47 \ + --hash=sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0 \ + --hash=sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1 \ + --hash=sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90 \ + --hash=sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d \ + --hash=sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b \ + --hash=sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c \ + --hash=sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a \ + --hash=sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e \ + --hash=sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc \ + --hash=sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16 \ + --hash=sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a \ + --hash=sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712 \ + --hash=sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c \ + --hash=sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3 \ + --hash=sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc \ + --hash=sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561 \ + --hash=sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d \ + --hash=sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc \ + --hash=sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db \ + --hash=sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed \ + --hash=sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751 \ + --hash=sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957 \ + --hash=sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165 \ + --hash=sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2 \ + --hash=sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476 \ + --hash=sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84 \ + --hash=sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246 \ + --hash=sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4 \ + --hash=sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25 \ + --hash=sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d \ + --hash=sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271 \ + --hash=sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb \ + --hash=sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31 \ + --hash=sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e \ + --hash=sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85 \ + --hash=sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b \ + --hash=sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7 \ + --hash=sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03 \ + --hash=sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b \ + --hash=sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d \ + --hash=sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a \ + --hash=sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d \ + --hash=sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3 \ + --hash=sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67 \ + --hash=sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f \ + --hash=sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c \ + --hash=sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502 \ + --hash=sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062 \ + --hash=sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954 \ + --hash=sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb \ + --hash=sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a \ + --hash=sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b \ + --hash=sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed \ + --hash=sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34 \ + --hash=sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794 + # via matplotlib +lsprotocol==2023.0.1 \ + --hash=sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2 \ + --hash=sha256:cc5c15130d2403c18b734304339e51242d3018a05c4f7d0f198ad6e0cd21861d + # via pygls +markupsafe==3.0.2 \ + --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ + --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ + --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ + --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ + --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ + --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ + --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ + --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ + --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ + --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ + --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ + --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ + --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ + --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ + --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ + --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ + --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ + --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ + --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ + --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ + --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ + --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ + --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ + --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ + --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ + --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ + --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ + --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ + --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ + --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ + --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ + --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ + --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ + --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ + --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ + --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ + --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ + --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ + --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ + --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ + --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ + --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ + --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ + --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ + --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ + --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ + --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ + --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ + --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ + --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ + --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ + --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ + --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ + --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ + --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ + --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ + --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ + --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ + --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ + --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ + --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 + # via jinja2 +matplotlib==3.10.1 \ + --hash=sha256:01e63101ebb3014e6e9f80d9cf9ee361a8599ddca2c3e166c563628b39305dbb \ + --hash=sha256:02582304e352f40520727984a5a18f37e8187861f954fea9be7ef06569cf85b4 \ + --hash=sha256:057206ff2d6ab82ff3e94ebd94463d084760ca682ed5f150817b859372ec4401 \ + --hash=sha256:0721a3fd3d5756ed593220a8b86808a36c5031fce489adb5b31ee6dbb47dd5b2 \ + --hash=sha256:0f69dc9713e4ad2fb21a1c30e37bd445d496524257dfda40ff4a8efb3604ab5c \ + --hash=sha256:11b65088c6f3dae784bc72e8d039a2580186285f87448babb9ddb2ad0082993a \ + --hash=sha256:1985ad3d97f51307a2cbfc801a930f120def19ba22864182dacef55277102ba6 \ + --hash=sha256:19b06241ad89c3ae9469e07d77efa87041eac65d78df4fcf9cac318028009b01 \ + --hash=sha256:2589659ea30726284c6c91037216f64a506a9822f8e50592d48ac16a2f29e044 \ + --hash=sha256:35e87384ee9e488d8dd5a2dd7baf471178d38b90618d8ea147aced4ab59c9bea \ + --hash=sha256:3f06bad951eea6422ac4e8bdebcf3a70c59ea0a03338c5d2b109f57b64eb3972 \ + --hash=sha256:4c59af3e8aca75d7744b68e8e78a669e91ccbcf1ac35d0102a7b1b46883f1dd7 \ + --hash=sha256:4f0647b17b667ae745c13721602b540f7aadb2a32c5b96e924cd4fea5dcb90f1 \ + --hash=sha256:56c5d9fcd9879aa8040f196a235e2dcbdf7dd03ab5b07c0696f80bc6cf04bedd \ + --hash=sha256:5d45d3f5245be5b469843450617dcad9af75ca50568acf59997bed9311131a0b \ + --hash=sha256:648406f1899f9a818cef8c0231b44dcfc4ff36f167101c3fd1c9151f24220fdc \ + --hash=sha256:66e907a06e68cb6cfd652c193311d61a12b54f56809cafbed9736ce5ad92f107 \ + --hash=sha256:7e496c01441be4c7d5f96d4e40f7fca06e20dcb40e44c8daa2e740e1757ad9e6 \ + --hash=sha256:8e875b95ac59a7908978fe307ecdbdd9a26af7fa0f33f474a27fcf8c99f64a19 \ + --hash=sha256:8e8e25b1209161d20dfe93037c8a7f7ca796ec9aa326e6e4588d8c4a5dd1e473 \ + --hash=sha256:a144867dd6bf8ba8cb5fc81a158b645037e11b3e5cf8a50bd5f9917cb863adfe \ + --hash=sha256:a3dfb036f34873b46978f55e240cff7a239f6c4409eac62d8145bad3fc6ba5a3 \ + --hash=sha256:a97ff127f295817bc34517255c9db6e71de8eddaab7f837b7d341dee9f2f587f \ + --hash=sha256:aa3854b5f9473564ef40a41bc922be978fab217776e9ae1545c9b3a5cf2092a3 \ + --hash=sha256:bc411ebd5889a78dabbc457b3fa153203e22248bfa6eedc6797be5df0164dbf9 \ + --hash=sha256:c42eee41e1b60fd83ee3292ed83a97a5f2a8239b10c26715d8a6172226988d7b \ + --hash=sha256:c96f2c2f825d1257e437a1482c5a2cf4fee15db4261bd6fc0750f81ba2b4ba3d \ + --hash=sha256:cfd414bce89cc78a7e1d25202e979b3f1af799e416010a20ab2b5ebb3a02425c \ + --hash=sha256:d0673b4b8f131890eb3a1ad058d6e065fb3c6e71f160089b65f8515373394698 \ + --hash=sha256:d3809916157ba871bcdd33d3493acd7fe3037db5daa917ca6e77975a94cef779 \ + --hash=sha256:dc6ab14a7ab3b4d813b88ba957fc05c79493a037f54e246162033591e770de6f \ + --hash=sha256:e8d2d0e3881b129268585bf4765ad3ee73a4591d77b9a18c214ac7e3a79fb2ba \ + --hash=sha256:e9b4bb156abb8fa5e5b2b460196f7db7264fc6d62678c03457979e7d5254b7be \ + --hash=sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16 + # via sphinx-needs +numpy==2.2.5 \ + --hash=sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70 \ + --hash=sha256:02f226baeefa68f7d579e213d0f3493496397d8f1cff5e2b222af274c86a552a \ + --hash=sha256:059b51b658f4414fff78c6d7b1b4e18283ab5fa56d270ff212d5ba0c561846f4 \ + --hash=sha256:0bcb1d057b7571334139129b7f941588f69ce7c4ed15a9d6162b2ea54ded700c \ + --hash=sha256:0cd48122a6b7eab8f06404805b1bd5856200e3ed6f8a1b9a194f9d9054631beb \ + --hash=sha256:19f4718c9012e3baea91a7dba661dcab2451cda2550678dc30d53acb91a7290f \ + --hash=sha256:1a161c2c79ab30fe4501d5a2bbfe8b162490757cf90b7f05be8b80bc02f7bb8e \ + --hash=sha256:1f4a922da1729f4c40932b2af4fe84909c7a6e167e6e99f71838ce3a29f3fe26 \ + --hash=sha256:261a1ef047751bb02f29dfe337230b5882b54521ca121fc7f62668133cb119c9 \ + --hash=sha256:262d23f383170f99cd9191a7c85b9a50970fe9069b2f8ab5d786eca8a675d60b \ + --hash=sha256:2ba321813a00e508d5421104464510cc962a6f791aa2fca1c97b1e65027da80d \ + --hash=sha256:2c1a1c6ccce4022383583a6ded7bbcda22fc635eb4eb1e0a053336425ed36dfa \ + --hash=sha256:352d330048c055ea6db701130abc48a21bec690a8d38f8284e00fab256dc1376 \ + --hash=sha256:369e0d4647c17c9363244f3468f2227d557a74b6781cb62ce57cf3ef5cc7c610 \ + --hash=sha256:36ab5b23915887543441efd0417e6a3baa08634308894316f446027611b53bf1 \ + --hash=sha256:37e32e985f03c06206582a7323ef926b4e78bdaa6915095ef08070471865b906 \ + --hash=sha256:3a801fef99668f309b88640e28d261991bfad9617c27beda4a3aec4f217ea073 \ + --hash=sha256:3d14b17b9be5f9c9301f43d2e2a4886a33b53f4e6fdf9ca2f4cc60aeeee76372 \ + --hash=sha256:422cc684f17bc963da5f59a31530b3936f57c95a29743056ef7a7903a5dbdf88 \ + --hash=sha256:4520caa3807c1ceb005d125a75e715567806fed67e315cea619d5ec6e75a4191 \ + --hash=sha256:47834cde750d3c9f4e52c6ca28a7361859fcaf52695c7dc3cc1a720b8922683e \ + --hash=sha256:47f9ed103af0bc63182609044b0490747e03bd20a67e391192dde119bf43d52f \ + --hash=sha256:498815b96f67dc347e03b719ef49c772589fb74b8ee9ea2c37feae915ad6ebda \ + --hash=sha256:54088a5a147ab71a8e7fdfd8c3601972751ded0739c6b696ad9cb0343e21ab73 \ + --hash=sha256:55f09e00d4dccd76b179c0f18a44f041e5332fd0e022886ba1c0bbf3ea4a18d0 \ + --hash=sha256:5a0ac90e46fdb5649ab6369d1ab6104bfe5854ab19b645bf5cda0127a13034ae \ + --hash=sha256:6411f744f7f20081b1b4e7112e0f4c9c5b08f94b9f086e6f0adf3645f85d3a4d \ + --hash=sha256:6413d48a9be53e183eb06495d8e3b006ef8f87c324af68241bbe7a39e8ff54c3 \ + --hash=sha256:7451f92eddf8503c9b8aa4fe6aa7e87fd51a29c2cfc5f7dbd72efde6c65acf57 \ + --hash=sha256:8b4c0773b6ada798f51f0f8e30c054d32304ccc6e9c5d93d46cb26f3d385ab19 \ + --hash=sha256:8dfa94b6a4374e7851bbb6f35e6ded2120b752b063e6acdd3157e4d2bb922eba \ + --hash=sha256:97c8425d4e26437e65e1d189d22dff4a079b747ff9c2788057bfb8114ce1e133 \ + --hash=sha256:9d75f338f5f79ee23548b03d801d28a505198297534f62416391857ea0479571 \ + --hash=sha256:9de6832228f617c9ef45d948ec1cd8949c482238d68b2477e6f642c33a7b0a54 \ + --hash=sha256:a4cbdef3ddf777423060c6f81b5694bad2dc9675f110c4b2a60dc0181543fac7 \ + --hash=sha256:a9c0d994680cd991b1cb772e8b297340085466a6fe964bc9d4e80f5e2f43c291 \ + --hash=sha256:aa70fdbdc3b169d69e8c59e65c07a1c9351ceb438e627f0fdcd471015cd956be \ + --hash=sha256:abe38cd8381245a7f49967a6010e77dbf3680bd3627c0fe4362dd693b404c7f8 \ + --hash=sha256:b13f04968b46ad705f7c8a80122a42ae8f620536ea38cf4bdd374302926424dd \ + --hash=sha256:b4ea7e1cff6784e58fe281ce7e7f05036b3e1c89c6f922a6bfbc0a7e8768adbe \ + --hash=sha256:b6f91524d31b34f4a5fee24f5bc16dcd1491b668798b6d85585d836c1e633a6a \ + --hash=sha256:c26843fd58f65da9491165072da2cccc372530681de481ef670dcc8e27cfb066 \ + --hash=sha256:c42365005c7a6c42436a54d28c43fe0e01ca11eb2ac3cefe796c25a5f98e5e9b \ + --hash=sha256:c8b82a55ef86a2d8e81b63da85e55f5537d2157165be1cb2ce7cfa57b6aef38b \ + --hash=sha256:ced69262a8278547e63409b2653b372bf4baff0870c57efa76c5703fd6543282 \ + --hash=sha256:d2e3bdadaba0e040d1e7ab39db73e0afe2c74ae277f5614dad53eadbecbbb169 \ + --hash=sha256:d403c84991b5ad291d3809bace5e85f4bbf44a04bdc9a88ed2bb1807b3360bb8 \ + --hash=sha256:d7543263084a85fbc09c704b515395398d31d6395518446237eac219eab9e55e \ + --hash=sha256:d8882a829fd779f0f43998e931c466802a77ca1ee0fe25a3abe50278616b1471 \ + --hash=sha256:e4f0b035d9d0ed519c813ee23e0a733db81ec37d2e9503afbb6e54ccfdee0fa7 \ + --hash=sha256:e8b025c351b9f0e8b5436cf28a07fa4ac0204d67b38f01433ac7f9b870fa38c6 \ + --hash=sha256:eb7fd5b184e5d277afa9ec0ad5e4eb562ecff541e7f60e69ee69c8d59e9aeaba \ + --hash=sha256:ec31367fd6a255dc8de4772bd1658c3e926d8e860a0b6e922b615e532d320ddc \ + --hash=sha256:ee461a4eaab4f165b68780a6a1af95fb23a29932be7569b9fab666c407969051 \ + --hash=sha256:f5045039100ed58fa817a6227a356240ea1b9a1bc141018864c306c1a16d4175 + # via + # contourpy + # matplotlib +packaging==24.2 \ + --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ + --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f + # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # matplotlib + # pytest + # sphinx +pillow==11.2.1 \ + --hash=sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928 \ + --hash=sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b \ + --hash=sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91 \ + --hash=sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97 \ + --hash=sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4 \ + --hash=sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193 \ + --hash=sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95 \ + --hash=sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941 \ + --hash=sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f \ + --hash=sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f \ + --hash=sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3 \ + --hash=sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044 \ + --hash=sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb \ + --hash=sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681 \ + --hash=sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d \ + --hash=sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2 \ + --hash=sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb \ + --hash=sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d \ + --hash=sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406 \ + --hash=sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70 \ + --hash=sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79 \ + --hash=sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e \ + --hash=sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013 \ + --hash=sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d \ + --hash=sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2 \ + --hash=sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36 \ + --hash=sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7 \ + --hash=sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751 \ + --hash=sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c \ + --hash=sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c \ + --hash=sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c \ + --hash=sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b \ + --hash=sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1 \ + --hash=sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd \ + --hash=sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8 \ + --hash=sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691 \ + --hash=sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14 \ + --hash=sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b \ + --hash=sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f \ + --hash=sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0 \ + --hash=sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed \ + --hash=sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0 \ + --hash=sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909 \ + --hash=sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22 \ + --hash=sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788 \ + --hash=sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16 \ + --hash=sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156 \ + --hash=sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad \ + --hash=sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076 \ + --hash=sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7 \ + --hash=sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e \ + --hash=sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6 \ + --hash=sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772 \ + --hash=sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155 \ + --hash=sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830 \ + --hash=sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67 \ + --hash=sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4 \ + --hash=sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61 \ + --hash=sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8 \ + --hash=sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01 \ + --hash=sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e \ + --hash=sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1 \ + --hash=sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d \ + --hash=sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579 \ + --hash=sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6 \ + --hash=sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1 \ + --hash=sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7 \ + --hash=sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047 \ + --hash=sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443 \ + --hash=sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a \ + --hash=sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf \ + --hash=sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd \ + --hash=sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193 \ + --hash=sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600 \ + --hash=sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c \ + --hash=sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363 \ + --hash=sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e \ + --hash=sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35 \ + --hash=sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9 \ + --hash=sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28 \ + --hash=sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b + # via matplotlib +platformdirs==4.3.7 \ + --hash=sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94 \ + --hash=sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351 + # via esbonio +pluggy==1.5.0 \ + --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ + --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 + # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # pytest +pycparser==2.22 \ + --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ + --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc + # via cffi +pydata-sphinx-theme==0.16.1 \ + --hash=sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde \ + --hash=sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7 + # via -r src/requirements.in +pygithub==2.6.1 \ + --hash=sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3 \ + --hash=sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf + # via -r src/requirements.in +pygls==1.3.1 \ + --hash=sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018 \ + --hash=sha256:6e00f11efc56321bdeb6eac04f6d86131f654c7d49124344a9ebb968da3dd91e + # via esbonio +pygments==2.19.1 \ + --hash=sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f \ + --hash=sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c + # via + # accessible-pygments + # pydata-sphinx-theme + # sphinx +pyjwt[crypto]==2.10.1 \ + --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ + --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb + # via pygithub +pynacl==1.5.0 \ + --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ + --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ + --hash=sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93 \ + --hash=sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1 \ + --hash=sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92 \ + --hash=sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff \ + --hash=sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba \ + --hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \ + --hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \ + --hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543 + # via pygithub +pyparsing==3.2.3 \ + --hash=sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf \ + --hash=sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be + # via matplotlib +pyspellchecker==0.8.2 \ + --hash=sha256:2b026be14a162ba810bdda8e5454c56e364f42d3b9e14aeff31706e5ebcdc78f \ + --hash=sha256:4fee22e1859c5153c3bc3953ac3041bf07d4541520b7e01901e955062022290a + # via esbonio +pytest==8.3.5 \ + --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ + --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 + # via -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt +python-dateutil==2.9.0.post0 \ + --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ + --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 + # via matplotlib +referencing==0.36.2 \ + --hash=sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa \ + --hash=sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0 + # via + # jsonschema + # jsonschema-specifications +requests==2.32.3 \ + --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ + --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 + # via + # pygithub + # requests-file + # sphinx + # sphinx-needs +requests-file==2.1.0 \ + --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ + --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c + # via sphinx-needs +roman-numerals-py==3.1.0 \ + --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ + --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d + # via sphinx +rpds-py==0.24.0 \ + --hash=sha256:0047638c3aa0dbcd0ab99ed1e549bbf0e142c9ecc173b6492868432d8989a046 \ + --hash=sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724 \ + --hash=sha256:041f00419e1da7a03c46042453598479f45be3d787eb837af382bfc169c0db33 \ + --hash=sha256:04ecf5c1ff4d589987b4d9882872f80ba13da7d42427234fce8f22efb43133bc \ + --hash=sha256:04f2b712a2206e13800a8136b07aaedc23af3facab84918e7aa89e4be0260032 \ + --hash=sha256:0aeb3329c1721c43c58cae274d7d2ca85c1690d89485d9c63a006cb79a85771a \ + --hash=sha256:0e374c0ce0ca82e5b67cd61fb964077d40ec177dd2c4eda67dba130de09085c7 \ + --hash=sha256:0f00c16e089282ad68a3820fd0c831c35d3194b7cdc31d6e469511d9bffc535c \ + --hash=sha256:174e46569968ddbbeb8a806d9922f17cd2b524aa753b468f35b97ff9c19cb718 \ + --hash=sha256:1b221c2457d92a1fb3c97bee9095c874144d196f47c038462ae6e4a14436f7bc \ + --hash=sha256:208b3a70a98cf3710e97cabdc308a51cd4f28aa6e7bb11de3d56cd8b74bab98d \ + --hash=sha256:20f2712bd1cc26a3cc16c5a1bfee9ed1abc33d4cdf1aabd297fe0eb724df4272 \ + --hash=sha256:24795c099453e3721fda5d8ddd45f5dfcc8e5a547ce7b8e9da06fecc3832e26f \ + --hash=sha256:2a0f156e9509cee987283abd2296ec816225145a13ed0391df8f71bf1d789e2d \ + --hash=sha256:2b2356688e5d958c4d5cb964af865bea84db29971d3e563fb78e46e20fe1848b \ + --hash=sha256:2c13777ecdbbba2077670285dd1fe50828c8742f6a4119dbef6f83ea13ad10fb \ + --hash=sha256:2d3ee4615df36ab8eb16c2507b11e764dcc11fd350bbf4da16d09cda11fcedef \ + --hash=sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b \ + --hash=sha256:32bab0a56eac685828e00cc2f5d1200c548f8bc11f2e44abf311d6b548ce2e45 \ + --hash=sha256:34d90ad8c045df9a4259c47d2e16a3f21fdb396665c94520dbfe8766e62187a4 \ + --hash=sha256:369d9c6d4c714e36d4a03957b4783217a3ccd1e222cdd67d464a3a479fc17796 \ + --hash=sha256:3a55fc10fdcbf1a4bd3c018eea422c52cf08700cf99c28b5cb10fe97ab77a0d3 \ + --hash=sha256:3d2d8e4508e15fc05b31285c4b00ddf2e0eb94259c2dc896771966a163122a0c \ + --hash=sha256:3fab5f4a2c64a8fb64fc13b3d139848817a64d467dd6ed60dcdd6b479e7febc9 \ + --hash=sha256:43dba99f00f1d37b2a0265a259592d05fcc8e7c19d140fe51c6e6f16faabeb1f \ + --hash=sha256:44d51febb7a114293ffd56c6cf4736cb31cd68c0fddd6aa303ed09ea5a48e029 \ + --hash=sha256:493fe54318bed7d124ce272fc36adbf59d46729659b2c792e87c3b95649cdee9 \ + --hash=sha256:4b28e5122829181de1898c2c97f81c0b3246d49f585f22743a1246420bb8d399 \ + --hash=sha256:4cd031e63bc5f05bdcda120646a0d32f6d729486d0067f09d79c8db5368f4586 \ + --hash=sha256:528927e63a70b4d5f3f5ccc1fa988a35456eb5d15f804d276709c33fc2f19bda \ + --hash=sha256:564c96b6076a98215af52f55efa90d8419cc2ef45d99e314fddefe816bc24f91 \ + --hash=sha256:5db385bacd0c43f24be92b60c857cf760b7f10d8234f4bd4be67b5b20a7c0b6b \ + --hash=sha256:5ef877fa3bbfb40b388a5ae1cb00636a624690dcb9a29a65267054c9ea86d88a \ + --hash=sha256:5f6e3cec44ba05ee5cbdebe92d052f69b63ae792e7d05f1020ac5e964394080c \ + --hash=sha256:5fc13b44de6419d1e7a7e592a4885b323fbc2f46e1f22151e3a8ed3b8b920405 \ + --hash=sha256:60748789e028d2a46fc1c70750454f83c6bdd0d05db50f5ae83e2db500b34da5 \ + --hash=sha256:60d9b630c8025b9458a9d114e3af579a2c54bd32df601c4581bd054e85258143 \ + --hash=sha256:619ca56a5468f933d940e1bf431c6f4e13bef8e688698b067ae68eb4f9b30e3a \ + --hash=sha256:630d3d8ea77eabd6cbcd2ea712e1c5cecb5b558d39547ac988351195db433f6c \ + --hash=sha256:63981feca3f110ed132fd217bf7768ee8ed738a55549883628ee3da75bb9cb78 \ + --hash=sha256:66420986c9afff67ef0c5d1e4cdc2d0e5262f53ad11e4f90e5e22448df485bf0 \ + --hash=sha256:675269d407a257b8c00a6b58205b72eec8231656506c56fd429d924ca00bb350 \ + --hash=sha256:6a4a535013aeeef13c5532f802708cecae8d66c282babb5cd916379b72110cf7 \ + --hash=sha256:6a727fd083009bc83eb83d6950f0c32b3c94c8b80a9b667c87f4bd1274ca30ba \ + --hash=sha256:6e1daf5bf6c2be39654beae83ee6b9a12347cb5aced9a29eecf12a2d25fff664 \ + --hash=sha256:6eea559077d29486c68218178ea946263b87f1c41ae7f996b1f30a983c476a5a \ + --hash=sha256:75a810b7664c17f24bf2ffd7f92416c00ec84b49bb68e6a0d93e542406336b56 \ + --hash=sha256:772cc1b2cd963e7e17e6cc55fe0371fb9c704d63e44cacec7b9b7f523b78919e \ + --hash=sha256:78884d155fd15d9f64f5d6124b486f3d3f7fd7cd71a78e9670a0f6f6ca06fb2d \ + --hash=sha256:79e8d804c2ccd618417e96720ad5cd076a86fa3f8cb310ea386a3e6229bae7d1 \ + --hash=sha256:7e80d375134ddb04231a53800503752093dbb65dad8dabacce2c84cccc78e964 \ + --hash=sha256:8097b3422d020ff1c44effc40ae58e67d93e60d540a65649d2cdaf9466030791 \ + --hash=sha256:8205ee14463248d3349131bb8099efe15cd3ce83b8ef3ace63c7e976998e7124 \ + --hash=sha256:8212ff58ac6dfde49946bea57474a386cca3f7706fc72c25b772b9ca4af6b79e \ + --hash=sha256:823e74ab6fbaa028ec89615ff6acb409e90ff45580c45920d4dfdddb069f2120 \ + --hash=sha256:84e0566f15cf4d769dade9b366b7b87c959be472c92dffb70462dd0844d7cbad \ + --hash=sha256:896c41007931217a343eff197c34513c154267636c8056fb409eafd494c3dcdc \ + --hash=sha256:8aa362811ccdc1f8dadcc916c6d47e554169ab79559319ae9fae7d7752d0d60c \ + --hash=sha256:8b3b397eefecec8e8e39fa65c630ef70a24b09141a6f9fc17b3c3a50bed6b50e \ + --hash=sha256:8ebc7e65ca4b111d928b669713865f021b7773350eeac4a31d3e70144297baba \ + --hash=sha256:9168764133fd919f8dcca2ead66de0105f4ef5659cbb4fa044f7014bed9a1797 \ + --hash=sha256:921ae54f9ecba3b6325df425cf72c074cd469dea843fb5743a26ca7fb2ccb149 \ + --hash=sha256:92558d37d872e808944c3c96d0423b8604879a3d1c86fdad508d7ed91ea547d5 \ + --hash=sha256:951cc481c0c395c4a08639a469d53b7d4afa252529a085418b82a6b43c45c240 \ + --hash=sha256:998c01b8e71cf051c28f5d6f1187abbdf5cf45fc0efce5da6c06447cba997034 \ + --hash=sha256:9abc80fe8c1f87218db116016de575a7998ab1629078c90840e8d11ab423ee25 \ + --hash=sha256:9be4f99bee42ac107870c61dfdb294d912bf81c3c6d45538aad7aecab468b6b7 \ + --hash=sha256:9c39438c55983d48f4bb3487734d040e22dad200dab22c41e331cee145e7a50d \ + --hash=sha256:9d7e8ce990ae17dda686f7e82fd41a055c668e13ddcf058e7fb5e9da20b57793 \ + --hash=sha256:9ea7f4174d2e4194289cb0c4e172d83e79a6404297ff95f2875cf9ac9bced8ba \ + --hash=sha256:a18fc371e900a21d7392517c6f60fe859e802547309e94313cd8181ad9db004d \ + --hash=sha256:a36b452abbf29f68527cf52e181fced56685731c86b52e852053e38d8b60bc8d \ + --hash=sha256:a5b66d1b201cc71bc3081bc2f1fc36b0c1f268b773e03bbc39066651b9e18391 \ + --hash=sha256:a824d2c7a703ba6daaca848f9c3d5cb93af0505be505de70e7e66829affd676e \ + --hash=sha256:a88c0d17d039333a41d9bf4616bd062f0bd7aa0edeb6cafe00a2fc2a804e944f \ + --hash=sha256:aa6800adc8204ce898c8a424303969b7aa6a5e4ad2789c13f8648739830323b7 \ + --hash=sha256:aad911555286884be1e427ef0dc0ba3929e6821cbeca2194b13dc415a462c7fd \ + --hash=sha256:afc6e35f344490faa8276b5f2f7cbf71f88bc2cda4328e00553bd451728c571f \ + --hash=sha256:b9a4df06c35465ef4d81799999bba810c68d29972bf1c31db61bfdb81dd9d5bb \ + --hash=sha256:bb2954155bb8f63bb19d56d80e5e5320b61d71084617ed89efedb861a684baea \ + --hash=sha256:bbc4362e06f950c62cad3d4abf1191021b2ffaf0b31ac230fbf0526453eee75e \ + --hash=sha256:c0145295ca415668420ad142ee42189f78d27af806fcf1f32a18e51d47dd2052 \ + --hash=sha256:c30ff468163a48535ee7e9bf21bd14c7a81147c0e58a36c1078289a8ca7af0bd \ + --hash=sha256:c347a20d79cedc0a7bd51c4d4b7dbc613ca4e65a756b5c3e57ec84bd43505b47 \ + --hash=sha256:c43583ea8517ed2e780a345dd9960896afc1327e8cf3ac8239c167530397440d \ + --hash=sha256:c61a2cb0085c8783906b2f8b1f16a7e65777823c7f4d0a6aaffe26dc0d358dd9 \ + --hash=sha256:c9ca89938dff18828a328af41ffdf3902405a19f4131c88e22e776a8e228c5a8 \ + --hash=sha256:cc31e13ce212e14a539d430428cd365e74f8b2d534f8bc22dd4c9c55b277b875 \ + --hash=sha256:cdabcd3beb2a6dca7027007473d8ef1c3b053347c76f685f5f060a00327b8b65 \ + --hash=sha256:cf86f72d705fc2ef776bb7dd9e5fbba79d7e1f3e258bf9377f8204ad0fc1c51e \ + --hash=sha256:d09dc82af2d3c17e7dd17120b202a79b578d79f2b5424bda209d9966efeed114 \ + --hash=sha256:d3aa13bdf38630da298f2e0d77aca967b200b8cc1473ea05248f6c5e9c9bdb44 \ + --hash=sha256:d69d003296df4840bd445a5d15fa5b6ff6ac40496f956a221c4d1f6f7b4bc4d9 \ + --hash=sha256:d6e109a454412ab82979c5b1b3aee0604eca4bbf9a02693bb9df027af2bfa91a \ + --hash=sha256:d8551e733626afec514b5d15befabea0dd70a343a9f23322860c4f16a9430205 \ + --hash=sha256:d8754d872a5dfc3c5bf9c0e059e8107451364a30d9fd50f1f1a85c4fb9481164 \ + --hash=sha256:d8f9a6e7fd5434817526815f09ea27f2746c4a51ee11bb3439065f5fc754db58 \ + --hash=sha256:dbcbb6db5582ea33ce46a5d20a5793134b5365110d84df4e30b9d37c6fd40ad3 \ + --hash=sha256:e0f3ef95795efcd3b2ec3fe0a5bcfb5dadf5e3996ea2117427e524d4fbf309c6 \ + --hash=sha256:e13ae74a8a3a0c2f22f450f773e35f893484fcfacb00bb4344a7e0f4f48e1f97 \ + --hash=sha256:e274f62cbd274359eff63e5c7e7274c913e8e09620f6a57aae66744b3df046d6 \ + --hash=sha256:e838bf2bb0b91ee67bf2b889a1a841e5ecac06dd7a2b1ef4e6151e2ce155c7ae \ + --hash=sha256:e8acd55bd5b071156bae57b555f5d33697998752673b9de554dd82f5b5352727 \ + --hash=sha256:e8e5ab32cf9eb3647450bc74eb201b27c185d3857276162c101c0f8c6374e098 \ + --hash=sha256:ebcb786b9ff30b994d5969213a8430cbb984cdd7ea9fd6df06663194bd3c450c \ + --hash=sha256:ebea2821cdb5f9fef44933617be76185b80150632736f3d76e54829ab4a3b4d1 \ + --hash=sha256:ed0ef550042a8dbcd657dfb284a8ee00f0ba269d3f2286b0493b15a5694f9fe8 \ + --hash=sha256:eda5c1e2a715a4cbbca2d6d304988460942551e4e5e3b7457b50943cd741626d \ + --hash=sha256:f5c0ed12926dec1dfe7d645333ea59cf93f4d07750986a586f511c0bc61fe103 \ + --hash=sha256:f6016bd950be4dcd047b7475fdf55fb1e1f59fc7403f387be0e8123e4a576d30 \ + --hash=sha256:f9e0057a509e096e47c87f753136c9b10d7a91842d8042c2ee6866899a717c0d \ + --hash=sha256:fc1c892b1ec1f8cbd5da8de287577b455e388d9c328ad592eabbdcb6fc93bee5 \ + --hash=sha256:fc2c1e1b00f88317d9de6b2c2b39b012ebbfe35fe5e7bef980fd2a91f6100a07 \ + --hash=sha256:fd822f019ccccd75c832deb7aa040bb02d70a92eb15a2f16c7987b7ad4ee8d83 + # via + # jsonschema + # referencing +ruamel-yaml==0.18.10 \ + --hash=sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58 \ + --hash=sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1 + # via -r src/requirements.in +ruamel-yaml-clib==0.2.12 \ + --hash=sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b \ + --hash=sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4 \ + --hash=sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef \ + --hash=sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5 \ + --hash=sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3 \ + --hash=sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632 \ + --hash=sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6 \ + --hash=sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7 \ + --hash=sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680 \ + --hash=sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf \ + --hash=sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da \ + --hash=sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6 \ + --hash=sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a \ + --hash=sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01 \ + --hash=sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519 \ + --hash=sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6 \ + --hash=sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f \ + --hash=sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd \ + --hash=sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2 \ + --hash=sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52 \ + --hash=sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd \ + --hash=sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d \ + --hash=sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c \ + --hash=sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6 \ + --hash=sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb \ + --hash=sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a \ + --hash=sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969 \ + --hash=sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28 \ + --hash=sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d \ + --hash=sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e \ + --hash=sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45 \ + --hash=sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4 \ + --hash=sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12 \ + --hash=sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31 \ + --hash=sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642 \ + --hash=sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e \ + --hash=sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285 \ + --hash=sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed \ + --hash=sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1 \ + --hash=sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7 \ + --hash=sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3 \ + --hash=sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475 \ + --hash=sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5 \ + --hash=sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76 \ + --hash=sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987 \ + --hash=sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df + # via ruamel-yaml +six==1.17.0 \ + --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ + --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 + # via python-dateutil +sniffio==1.3.1 \ + --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ + --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc + # via anyio +snowballstemmer==2.2.0 \ + --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ + --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a + # via sphinx +soupsieve==2.7 \ + --hash=sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4 \ + --hash=sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a + # via beautifulsoup4 +sphinx==8.2.3 \ + --hash=sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348 \ + --hash=sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3 + # via + # -r src/requirements.in + # esbonio + # pydata-sphinx-theme + # sphinx-autobuild + # sphinx-data-viewer + # sphinx-design + # sphinx-needs + # sphinxcontrib-jquery + # sphinxcontrib-plantuml +sphinx-autobuild==2024.10.3 \ + --hash=sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa \ + --hash=sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1 + # via -r src/requirements.in +sphinx-data-viewer==0.1.5 \ + --hash=sha256:a7d5e58613562bb745380bfe61ca8b69997998167fd6fa9aea55606c9a4b17e4 \ + --hash=sha256:b74b1d304c505c464d07c7b225ed0d84ea02dcc88bc1c49cdad7c2275fbbdad4 + # via sphinx-needs +sphinx-design==0.6.1 \ + --hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \ + --hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632 + # via -r src/requirements.in +sphinx-needs[plotting]==5.1.0 \ + --hash=sha256:23a0ca1dfe733a0a58e884b59ce53a8b63a530f0ac87ae5ab0d40f05f853fbe7 \ + --hash=sha256:7adf3763478e91171146918d8af4a22aa0fc062a73856f1ebeb6822a62cbe215 + # via -r src/requirements.in +sphinxcontrib-applehelp==2.0.0 \ + --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ + --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 + # via sphinx +sphinxcontrib-devhelp==2.0.0 \ + --hash=sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad \ + --hash=sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2 + # via sphinx +sphinxcontrib-htmlhelp==2.1.0 \ + --hash=sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8 \ + --hash=sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9 + # via sphinx +sphinxcontrib-jquery==4.1 \ + --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \ + --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae + # via sphinx-needs +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 + # via sphinx +sphinxcontrib-plantuml==0.30 \ + --hash=sha256:2a1266ca43bddf44640ae44107003df4490de2b3c3154a0d627cfb63e9a169bf + # via -r src/requirements.in +sphinxcontrib-qthelp==2.0.0 \ + --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ + --hash=sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb + # via sphinx +sphinxcontrib-serializinghtml==2.0.0 \ + --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ + --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d + # via sphinx +starlette==0.46.2 \ + --hash=sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35 \ + --hash=sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5 + # via sphinx-autobuild +typing-extensions==4.13.2 \ + --hash=sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c \ + --hash=sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef + # via + # anyio + # beautifulsoup4 + # pydata-sphinx-theme + # pygithub + # referencing +urllib3==2.4.0 \ + --hash=sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466 \ + --hash=sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813 + # via + # pygithub + # requests +uvicorn==0.34.2 \ + --hash=sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328 \ + --hash=sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403 + # via sphinx-autobuild +watchfiles==1.0.5 \ + --hash=sha256:0125f91f70e0732a9f8ee01e49515c35d38ba48db507a50c5bdcad9503af5827 \ + --hash=sha256:0a04059f4923ce4e856b4b4e5e783a70f49d9663d22a4c3b3298165996d1377f \ + --hash=sha256:0b289572c33a0deae62daa57e44a25b99b783e5f7aed81b314232b3d3c81a11d \ + --hash=sha256:10f6ae86d5cb647bf58f9f655fcf577f713915a5d69057a0371bc257e2553234 \ + --hash=sha256:13bb21f8ba3248386337c9fa51c528868e6c34a707f729ab041c846d52a0c69a \ + --hash=sha256:15ac96dd567ad6c71c71f7b2c658cb22b7734901546cd50a475128ab557593ca \ + --hash=sha256:18b3bd29954bc4abeeb4e9d9cf0b30227f0f206c86657674f544cb032296acd5 \ + --hash=sha256:1909e0a9cd95251b15bff4261de5dd7550885bd172e3536824bf1cf6b121e200 \ + --hash=sha256:1a2902ede862969077b97523987c38db28abbe09fb19866e711485d9fbf0d417 \ + --hash=sha256:1a7bac2bde1d661fb31f4d4e8e539e178774b76db3c2c17c4bb3e960a5de07a2 \ + --hash=sha256:237f9be419e977a0f8f6b2e7b0475ababe78ff1ab06822df95d914a945eac827 \ + --hash=sha256:266710eb6fddc1f5e51843c70e3bebfb0f5e77cf4f27129278c70554104d19ed \ + --hash=sha256:29c7fd632ccaf5517c16a5188e36f6612d6472ccf55382db6c7fe3fcccb7f59f \ + --hash=sha256:2b7a21715fb12274a71d335cff6c71fe7f676b293d322722fe708a9ec81d91f5 \ + --hash=sha256:2cfb371be97d4db374cba381b9f911dd35bb5f4c58faa7b8b7106c8853e5d225 \ + --hash=sha256:2cfcb3952350e95603f232a7a15f6c5f86c5375e46f0bd4ae70d43e3e063c13d \ + --hash=sha256:2f1fefb2e90e89959447bc0420fddd1e76f625784340d64a2f7d5983ef9ad246 \ + --hash=sha256:360a398c3a19672cf93527f7e8d8b60d8275119c5d900f2e184d32483117a705 \ + --hash=sha256:3e380c89983ce6e6fe2dd1e1921b9952fb4e6da882931abd1824c092ed495dec \ + --hash=sha256:4a8ec1e4e16e2d5bafc9ba82f7aaecfeec990ca7cd27e84fb6f191804ed2fcfc \ + --hash=sha256:4ab626da2fc1ac277bbf752446470b367f84b50295264d2d313e28dc4405d663 \ + --hash=sha256:4b6227351e11c57ae997d222e13f5b6f1f0700d84b8c52304e8675d33a808382 \ + --hash=sha256:554389562c29c2c182e3908b149095051f81d28c2fec79ad6c8997d7d63e0009 \ + --hash=sha256:5c40fe7dd9e5f81e0847b1ea64e1f5dd79dd61afbedb57759df06767ac719b40 \ + --hash=sha256:68b2dddba7a4e6151384e252a5632efcaa9bc5d1c4b567f3cb621306b2ca9f63 \ + --hash=sha256:7ee32c9a9bee4d0b7bd7cbeb53cb185cf0b622ac761efaa2eba84006c3b3a614 \ + --hash=sha256:830aa432ba5c491d52a15b51526c29e4a4b92bf4f92253787f9726fe01519487 \ + --hash=sha256:832ccc221927c860e7286c55c9b6ebcc0265d5e072f49c7f6456c7798d2b39aa \ + --hash=sha256:839ebd0df4a18c5b3c1b890145b5a3f5f64063c2a0d02b13c76d78fe5de34936 \ + --hash=sha256:852de68acd6212cd6d33edf21e6f9e56e5d98c6add46f48244bd479d97c967c6 \ + --hash=sha256:85fbb6102b3296926d0c62cfc9347f6237fb9400aecd0ba6bbda94cae15f2b3b \ + --hash=sha256:86c0df05b47a79d80351cd179893f2f9c1b1cae49d96e8b3290c7f4bd0ca0a92 \ + --hash=sha256:894342d61d355446d02cd3988a7326af344143eb33a2fd5d38482a92072d9563 \ + --hash=sha256:8c0db396e6003d99bb2d7232c957b5f0b5634bbd1b24e381a5afcc880f7373fb \ + --hash=sha256:8e637810586e6fe380c8bc1b3910accd7f1d3a9a7262c8a78d4c8fb3ba6a2b3d \ + --hash=sha256:9475b0093767e1475095f2aeb1d219fb9664081d403d1dff81342df8cd707034 \ + --hash=sha256:95cf944fcfc394c5f9de794ce581914900f82ff1f855326f25ebcf24d5397418 \ + --hash=sha256:974866e0db748ebf1eccab17862bc0f0303807ed9cda465d1324625b81293a18 \ + --hash=sha256:9848b21ae152fe79c10dd0197304ada8f7b586d3ebc3f27f43c506e5a52a863c \ + --hash=sha256:9f4571a783914feda92018ef3901dab8caf5b029325b5fe4558c074582815249 \ + --hash=sha256:a056c2f692d65bf1e99c41045e3bdcaea3cb9e6b5a53dcaf60a5f3bd95fc9763 \ + --hash=sha256:a0dbcb1c2d8f2ab6e0a81c6699b236932bd264d4cef1ac475858d16c403de74d \ + --hash=sha256:a16512051a822a416b0d477d5f8c0e67b67c1a20d9acecb0aafa3aa4d6e7d256 \ + --hash=sha256:a2014a2b18ad3ca53b1f6c23f8cd94a18ce930c1837bd891262c182640eb40a6 \ + --hash=sha256:a3904d88955fda461ea2531fcf6ef73584ca921415d5cfa44457a225f4a42bc1 \ + --hash=sha256:a74add8d7727e6404d5dc4dcd7fac65d4d82f95928bbee0cf5414c900e86773e \ + --hash=sha256:ab44e1580924d1ffd7b3938e02716d5ad190441965138b4aa1d1f31ea0877f04 \ + --hash=sha256:b551d4fb482fc57d852b4541f911ba28957d051c8776e79c3b4a51eb5e2a1b11 \ + --hash=sha256:b5eb568c2aa6018e26da9e6c86f3ec3fd958cee7f0311b35c2630fa4217d17f2 \ + --hash=sha256:b659576b950865fdad31fa491d31d37cf78b27113a7671d39f919828587b429b \ + --hash=sha256:b6e76ceb1dd18c8e29c73f47d41866972e891fc4cc7ba014f487def72c1cf096 \ + --hash=sha256:b7529b5dcc114679d43827d8c35a07c493ad6f083633d573d81c660abc5979e9 \ + --hash=sha256:b9dca99744991fc9850d18015c4f0438865414e50069670f5f7eee08340d8b40 \ + --hash=sha256:ba5552a1b07c8edbf197055bc9d518b8f0d98a1c6a73a293bc0726dce068ed01 \ + --hash=sha256:bfe0cbc787770e52a96c6fda6726ace75be7f840cb327e1b08d7d54eadc3bc85 \ + --hash=sha256:c0901429650652d3f0da90bad42bdafc1f9143ff3605633c455c999a2d786cac \ + --hash=sha256:cb1489f25b051a89fae574505cc26360c8e95e227a9500182a7fe0afcc500ce0 \ + --hash=sha256:cd47d063fbeabd4c6cae1d4bcaa38f0902f8dc5ed168072874ea11d0c7afc1ff \ + --hash=sha256:d363152c5e16b29d66cbde8fa614f9e313e6f94a8204eaab268db52231fe5358 \ + --hash=sha256:d5730f3aa35e646103b53389d5bc77edfbf578ab6dab2e005142b5b80a35ef25 \ + --hash=sha256:d6f9367b132078b2ceb8d066ff6c93a970a18c3029cea37bfd7b2d3dd2e5db8f \ + --hash=sha256:dfd6ae1c385ab481766b3c61c44aca2b3cd775f6f7c0fa93d979ddec853d29d5 \ + --hash=sha256:e0da39ff917af8b27a4bdc5a97ac577552a38aac0d260a859c1517ea3dc1a7c4 \ + --hash=sha256:ecf6cd9f83d7c023b1aba15d13f705ca7b7d38675c121f3cc4a6e25bd0857ee9 \ + --hash=sha256:ee0822ce1b8a14fe5a066f93edd20aada932acfe348bede8aa2149f1a4489512 \ + --hash=sha256:f2e55a9b162e06e3f862fb61e399fe9f05d908d019d87bf5b496a04ef18a970a \ + --hash=sha256:f436601594f15bf406518af922a89dcaab416568edb6f65c4e5bbbad1ea45c11 \ + --hash=sha256:f59b870db1f1ae5a9ac28245707d955c8721dd6565e7f411024fa374b5362d1d \ + --hash=sha256:fc533aa50664ebd6c628b2f30591956519462f5d27f951ed03d6c82b2dfd9965 \ + --hash=sha256:fe43139b2c0fdc4a14d4f8d5b5d967f7a2777fd3d38ecf5b1ec669b0d7e43c21 \ + --hash=sha256:fed1cd825158dcaae36acce7b2db33dcbfd12b30c34317a88b8ed80f0541cc57 + # via sphinx-autobuild +websockets==15.0.1 \ + --hash=sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2 \ + --hash=sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9 \ + --hash=sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5 \ + --hash=sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3 \ + --hash=sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8 \ + --hash=sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e \ + --hash=sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1 \ + --hash=sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256 \ + --hash=sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85 \ + --hash=sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880 \ + --hash=sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123 \ + --hash=sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375 \ + --hash=sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065 \ + --hash=sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed \ + --hash=sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41 \ + --hash=sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411 \ + --hash=sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597 \ + --hash=sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f \ + --hash=sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c \ + --hash=sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3 \ + --hash=sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb \ + --hash=sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e \ + --hash=sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee \ + --hash=sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f \ + --hash=sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf \ + --hash=sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf \ + --hash=sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4 \ + --hash=sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a \ + --hash=sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665 \ + --hash=sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22 \ + --hash=sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675 \ + --hash=sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4 \ + --hash=sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d \ + --hash=sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5 \ + --hash=sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65 \ + --hash=sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792 \ + --hash=sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57 \ + --hash=sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9 \ + --hash=sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3 \ + --hash=sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151 \ + --hash=sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d \ + --hash=sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475 \ + --hash=sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940 \ + --hash=sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431 \ + --hash=sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee \ + --hash=sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413 \ + --hash=sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8 \ + --hash=sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b \ + --hash=sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a \ + --hash=sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054 \ + --hash=sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb \ + --hash=sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205 \ + --hash=sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04 \ + --hash=sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4 \ + --hash=sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa \ + --hash=sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9 \ + --hash=sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122 \ + --hash=sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b \ + --hash=sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905 \ + --hash=sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770 \ + --hash=sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe \ + --hash=sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b \ + --hash=sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562 \ + --hash=sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561 \ + --hash=sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215 \ + --hash=sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931 \ + --hash=sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9 \ + --hash=sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f \ + --hash=sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7 + # via sphinx-autobuild +wrapt==1.17.2 \ + --hash=sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f \ + --hash=sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c \ + --hash=sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a \ + --hash=sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b \ + --hash=sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555 \ + --hash=sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c \ + --hash=sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b \ + --hash=sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6 \ + --hash=sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8 \ + --hash=sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662 \ + --hash=sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061 \ + --hash=sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998 \ + --hash=sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb \ + --hash=sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62 \ + --hash=sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984 \ + --hash=sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392 \ + --hash=sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2 \ + --hash=sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306 \ + --hash=sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7 \ + --hash=sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3 \ + --hash=sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9 \ + --hash=sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6 \ + --hash=sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192 \ + --hash=sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317 \ + --hash=sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f \ + --hash=sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda \ + --hash=sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563 \ + --hash=sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a \ + --hash=sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f \ + --hash=sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d \ + --hash=sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9 \ + --hash=sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8 \ + --hash=sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82 \ + --hash=sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9 \ + --hash=sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845 \ + --hash=sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82 \ + --hash=sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125 \ + --hash=sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504 \ + --hash=sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b \ + --hash=sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7 \ + --hash=sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc \ + --hash=sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6 \ + --hash=sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40 \ + --hash=sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a \ + --hash=sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3 \ + --hash=sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a \ + --hash=sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72 \ + --hash=sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681 \ + --hash=sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438 \ + --hash=sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae \ + --hash=sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2 \ + --hash=sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb \ + --hash=sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5 \ + --hash=sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a \ + --hash=sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3 \ + --hash=sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8 \ + --hash=sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2 \ + --hash=sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22 \ + --hash=sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72 \ + --hash=sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061 \ + --hash=sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f \ + --hash=sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9 \ + --hash=sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04 \ + --hash=sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98 \ + --hash=sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9 \ + --hash=sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f \ + --hash=sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b \ + --hash=sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925 \ + --hash=sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6 \ + --hash=sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0 \ + --hash=sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9 \ + --hash=sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c \ + --hash=sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991 \ + --hash=sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6 \ + --hash=sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000 \ + --hash=sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb \ + --hash=sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119 \ + --hash=sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b \ + --hash=sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58 + # via deprecated diff --git a/src/requirements_lock.txt b/src/requirements_lock.txt deleted file mode 100644 index 6b5859b0..00000000 --- a/src/requirements_lock.txt +++ /dev/null @@ -1,1336 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# bazel run //tooling:requirements.update -# ---extra-index-url https://pypi.org/simple/ - -accessible-pygments==0.0.5 \ - --hash=sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872 \ - --hash=sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7 - # via pydata-sphinx-theme -alabaster==1.0.0 \ - --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ - --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b - # via sphinx -anyio==4.8.0 \ - --hash=sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a \ - --hash=sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a - # via - # starlette - # watchfiles -attrs==24.3.0 \ - --hash=sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff \ - --hash=sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308 - # via - # cattrs - # jsonschema - # lsprotocol - # referencing -babel==2.16.0 \ - --hash=sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b \ - --hash=sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316 - # via - # pydata-sphinx-theme - # sphinx -beautifulsoup4==4.12.3 \ - --hash=sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051 \ - --hash=sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed - # via pydata-sphinx-theme -cattrs==24.1.2 \ - --hash=sha256:67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0 \ - --hash=sha256:8028cfe1ff5382df59dd36474a86e02d817b06eaf8af84555441bac915d2ef85 - # via - # lsprotocol - # pygls -certifi==2024.12.14 \ - --hash=sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 \ - --hash=sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db - # via requests -cffi==1.17.1 \ - --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ - --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ - --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ - --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ - --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ - --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ - --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ - --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ - --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ - --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ - --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ - --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ - --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ - --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ - --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ - --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ - --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ - --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ - --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ - --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ - --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ - --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ - --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ - --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ - --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ - --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ - --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ - --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ - --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ - --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ - --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ - --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ - --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ - --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ - --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ - --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ - --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ - --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ - --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ - --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ - --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ - --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ - --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ - --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ - --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ - --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ - --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ - --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ - --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ - --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ - --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ - --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ - --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ - --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ - --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ - --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ - --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ - --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ - --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ - --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ - --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ - --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ - --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ - --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ - --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ - --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ - --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b - # via - # cryptography - # pynacl -charset-normalizer==3.4.1 \ - --hash=sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537 \ - --hash=sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa \ - --hash=sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a \ - --hash=sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294 \ - --hash=sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b \ - --hash=sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd \ - --hash=sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601 \ - --hash=sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd \ - --hash=sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4 \ - --hash=sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d \ - --hash=sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2 \ - --hash=sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313 \ - --hash=sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd \ - --hash=sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa \ - --hash=sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8 \ - --hash=sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1 \ - --hash=sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2 \ - --hash=sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496 \ - --hash=sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d \ - --hash=sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b \ - --hash=sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e \ - --hash=sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a \ - --hash=sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4 \ - --hash=sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca \ - --hash=sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78 \ - --hash=sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408 \ - --hash=sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5 \ - --hash=sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3 \ - --hash=sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f \ - --hash=sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a \ - --hash=sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765 \ - --hash=sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6 \ - --hash=sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146 \ - --hash=sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6 \ - --hash=sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9 \ - --hash=sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd \ - --hash=sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c \ - --hash=sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f \ - --hash=sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545 \ - --hash=sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176 \ - --hash=sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770 \ - --hash=sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824 \ - --hash=sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f \ - --hash=sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf \ - --hash=sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487 \ - --hash=sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d \ - --hash=sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd \ - --hash=sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b \ - --hash=sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534 \ - --hash=sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f \ - --hash=sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b \ - --hash=sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9 \ - --hash=sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd \ - --hash=sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125 \ - --hash=sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9 \ - --hash=sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de \ - --hash=sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11 \ - --hash=sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d \ - --hash=sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35 \ - --hash=sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f \ - --hash=sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda \ - --hash=sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7 \ - --hash=sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a \ - --hash=sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971 \ - --hash=sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8 \ - --hash=sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41 \ - --hash=sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d \ - --hash=sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f \ - --hash=sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757 \ - --hash=sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a \ - --hash=sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886 \ - --hash=sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77 \ - --hash=sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76 \ - --hash=sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247 \ - --hash=sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85 \ - --hash=sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb \ - --hash=sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7 \ - --hash=sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e \ - --hash=sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6 \ - --hash=sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037 \ - --hash=sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1 \ - --hash=sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e \ - --hash=sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807 \ - --hash=sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407 \ - --hash=sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c \ - --hash=sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12 \ - --hash=sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3 \ - --hash=sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089 \ - --hash=sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd \ - --hash=sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e \ - --hash=sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00 \ - --hash=sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616 - # via requests -click==8.1.8 \ - --hash=sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2 \ - --hash=sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a - # via uvicorn -colorama==0.4.6 \ - --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ - --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 - # via sphinx-autobuild -contourpy==1.3.1 \ - --hash=sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1 \ - --hash=sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda \ - --hash=sha256:08d9d449a61cf53033612cb368f3a1b26cd7835d9b8cd326647efe43bca7568d \ - --hash=sha256:0ffa84be8e0bd33410b17189f7164c3589c229ce5db85798076a3fa136d0e509 \ - --hash=sha256:113231fe3825ebf6f15eaa8bc1f5b0ddc19d42b733345eae0934cb291beb88b6 \ - --hash=sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f \ - --hash=sha256:174e758c66bbc1c8576992cec9599ce8b6672b741b5d336b5c74e35ac382b18e \ - --hash=sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751 \ - --hash=sha256:19d40d37c1c3a4961b4619dd9d77b12124a453cc3d02bb31a07d58ef684d3d86 \ - --hash=sha256:1bf98051f1045b15c87868dbaea84f92408337d4f81d0e449ee41920ea121d3b \ - --hash=sha256:20914c8c973f41456337652a6eeca26d2148aa96dd7ac323b74516988bea89fc \ - --hash=sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546 \ - --hash=sha256:2ba94a401342fc0f8b948e57d977557fbf4d515f03c67682dd5c6191cb2d16ec \ - --hash=sha256:31c1b55c1f34f80557d3830d3dd93ba722ce7e33a0b472cba0ec3b6535684d8f \ - --hash=sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82 \ - --hash=sha256:3a04ecd68acbd77fa2d39723ceca4c3197cb2969633836ced1bea14e219d077c \ - --hash=sha256:3e8b974d8db2c5610fb4e76307e265de0edb655ae8169e8b21f41807ccbeec4b \ - --hash=sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c \ - --hash=sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c \ - --hash=sha256:44a29502ca9c7b5ba389e620d44f2fbe792b1fb5734e8b931ad307071ec58c53 \ - --hash=sha256:47734d7073fb4590b4a40122b35917cd77be5722d80683b249dac1de266aac80 \ - --hash=sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242 \ - --hash=sha256:4dbbc03a40f916a8420e420d63e96a1258d3d1b58cbdfd8d1f07b49fcbd38e85 \ - --hash=sha256:500360b77259914f7805af7462e41f9cb7ca92ad38e9f94d6c8641b089338124 \ - --hash=sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5 \ - --hash=sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2 \ - --hash=sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3 \ - --hash=sha256:61332c87493b00091423e747ea78200659dc09bdf7fd69edd5e98cef5d3e9a8d \ - --hash=sha256:805617228ba7e2cbbfb6c503858e626ab528ac2a32a04a2fe88ffaf6b02c32bc \ - --hash=sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342 \ - --hash=sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1 \ - --hash=sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1 \ - --hash=sha256:974d8145f8ca354498005b5b981165b74a195abfae9a8129df3e56771961d595 \ - --hash=sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30 \ - --hash=sha256:a045f341a77b77e1c5de31e74e966537bba9f3c4099b35bf4c2e3939dd54cdab \ - --hash=sha256:a0cffcbede75c059f535725c1680dfb17b6ba8753f0c74b14e6a9c68c29d7ea3 \ - --hash=sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2 \ - --hash=sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd \ - --hash=sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7 \ - --hash=sha256:ab29962927945d89d9b293eabd0d59aea28d887d4f3be6c22deaefbb938a7277 \ - --hash=sha256:abbb49fb7dac584e5abc6636b7b2a7227111c4f771005853e7d25176daaf8453 \ - --hash=sha256:ac4578ac281983f63b400f7fe6c101bedc10651650eef012be1ccffcbacf3697 \ - --hash=sha256:adce39d67c0edf383647a3a007de0a45fd1b08dedaa5318404f1a73059c2512b \ - --hash=sha256:ade08d343436a94e633db932e7e8407fe7de8083967962b46bdfc1b0ced39454 \ - --hash=sha256:b2bdca22a27e35f16794cf585832e542123296b4687f9fd96822db6bae17bfc9 \ - --hash=sha256:b2f926efda994cdf3c8d3fdb40b9962f86edbc4457e739277b961eced3d0b4c1 \ - --hash=sha256:b457d6430833cee8e4b8e9b6f07aa1c161e5e0d52e118dc102c8f9bd7dd060d6 \ - --hash=sha256:c414fc1ed8ee1dbd5da626cf3710c6013d3d27456651d156711fa24f24bd1291 \ - --hash=sha256:cb76c1a154b83991a3cbbf0dfeb26ec2833ad56f95540b442c73950af2013750 \ - --hash=sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699 \ - --hash=sha256:e914a8cb05ce5c809dd0fe350cfbb4e881bde5e2a38dc04e3afe1b3e58bd158e \ - --hash=sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81 \ - --hash=sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9 \ - --hash=sha256:f611e628ef06670df83fce17805c344710ca5cde01edfdc72751311da8585375 - # via matplotlib -cryptography==44.0.2 \ - --hash=sha256:04abd71114848aa25edb28e225ab5f268096f44cf0127f3d36975bdf1bdf3390 \ - --hash=sha256:0529b1d5a0105dd3731fa65680b45ce49da4d8115ea76e9da77a875396727b41 \ - --hash=sha256:1bc312dfb7a6e5d66082c87c34c8a62176e684b6fe3d90fcfe1568de675e6688 \ - --hash=sha256:268e4e9b177c76d569e8a145a6939eca9a5fec658c932348598818acf31ae9a5 \ - --hash=sha256:29ecec49f3ba3f3849362854b7253a9f59799e3763b0c9d0826259a88efa02f1 \ - --hash=sha256:2bf7bf75f7df9715f810d1b038870309342bff3069c5bd8c6b96128cb158668d \ - --hash=sha256:3b721b8b4d948b218c88cb8c45a01793483821e709afe5f622861fc6182b20a7 \ - --hash=sha256:3c00b6b757b32ce0f62c574b78b939afab9eecaf597c4d624caca4f9e71e7843 \ - --hash=sha256:3dc62975e31617badc19a906481deacdeb80b4bb454394b4098e3f2525a488c5 \ - --hash=sha256:4973da6ca3db4405c54cd0b26d328be54c7747e89e284fcff166132eb7bccc9c \ - --hash=sha256:4e389622b6927d8133f314949a9812972711a111d577a5d1f4bee5e58736b80a \ - --hash=sha256:51e4de3af4ec3899d6d178a8c005226491c27c4ba84101bfb59c901e10ca9f79 \ - --hash=sha256:5f6f90b72d8ccadb9c6e311c775c8305381db88374c65fa1a68250aa8a9cb3a6 \ - --hash=sha256:6210c05941994290f3f7f175a4a57dbbb2afd9273657614c506d5976db061181 \ - --hash=sha256:6f101b1f780f7fc613d040ca4bdf835c6ef3b00e9bd7125a4255ec574c7916e4 \ - --hash=sha256:7bdcd82189759aba3816d1f729ce42ffded1ac304c151d0a8e89b9996ab863d5 \ - --hash=sha256:7ca25849404be2f8e4b3c59483d9d3c51298a22c1c61a0e84415104dacaf5562 \ - --hash=sha256:81276f0ea79a208d961c433a947029e1a15948966658cf6710bbabb60fcc2639 \ - --hash=sha256:8cadc6e3b5a1f144a039ea08a0bdb03a2a92e19c46be3285123d32029f40a922 \ - --hash=sha256:8e0ddd63e6bf1161800592c71ac794d3fb8001f2caebe0966e77c5234fa9efc3 \ - --hash=sha256:909c97ab43a9c0c0b0ada7a1281430e4e5ec0458e6d9244c0e821bbf152f061d \ - --hash=sha256:96e7a5e9d6e71f9f4fca8eebfd603f8e86c5225bb18eb621b2c1e50b290a9471 \ - --hash=sha256:9a1e657c0f4ea2a23304ee3f964db058c9e9e635cc7019c4aa21c330755ef6fd \ - --hash=sha256:9eb9d22b0a5d8fd9925a7764a054dca914000607dff201a24c791ff5c799e1fa \ - --hash=sha256:af4ff3e388f2fa7bff9f7f2b31b87d5651c45731d3e8cfa0944be43dff5cfbdb \ - --hash=sha256:b042d2a275c8cee83a4b7ae30c45a15e6a4baa65a179a0ec2d78ebb90e4f6699 \ - --hash=sha256:bc821e161ae88bfe8088d11bb39caf2916562e0a2dc7b6d56714a48b784ef0bb \ - --hash=sha256:c505d61b6176aaf982c5717ce04e87da5abc9a36a5b39ac03905c4aafe8de7aa \ - --hash=sha256:c63454aa261a0cf0c5b4718349629793e9e634993538db841165b3df74f37ec0 \ - --hash=sha256:c7362add18b416b69d58c910caa217f980c5ef39b23a38a0880dfd87bdf8cd23 \ - --hash=sha256:d03806036b4f89e3b13b6218fefea8d5312e450935b1a2d55f0524e2ed7c59d9 \ - --hash=sha256:d1b3031093a366ac767b3feb8bcddb596671b3aaff82d4050f984da0c248b615 \ - --hash=sha256:d1c3572526997b36f245a96a2b1713bf79ce99b271bbcf084beb6b9b075f29ea \ - --hash=sha256:efcfe97d1b3c79e486554efddeb8f6f53a4cdd4cf6086642784fa31fc384e1d7 \ - --hash=sha256:f514ef4cd14bb6fb484b4a60203e912cfcb64f2ab139e88c2274511514bf7308 - # via pyjwt -cycler==0.12.1 \ - --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ - --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c - # via matplotlib -debugpy==1.8.12 \ - --hash=sha256:086b32e233e89a2740c1615c2f775c34ae951508b28b308681dbbb87bba97d06 \ - --hash=sha256:22a11c493c70413a01ed03f01c3c3a2fc4478fc6ee186e340487b2edcd6f4180 \ - --hash=sha256:274b6a2040349b5c9864e475284bce5bb062e63dce368a394b8cc865ae3b00c6 \ - --hash=sha256:2ae5df899732a6051b49ea2632a9ea67f929604fd2b036613a9f12bc3163b92d \ - --hash=sha256:36f4829839ef0afdfdd208bb54f4c3d0eea86106d719811681a8627ae2e53dd5 \ - --hash=sha256:39dfbb6fa09f12fae32639e3286112fc35ae976114f1f3d37375f3130a820969 \ - --hash=sha256:4703575b78dd697b294f8c65588dc86874ed787b7348c65da70cfc885efdf1e1 \ - --hash=sha256:4ad9a94d8f5c9b954e0e3b137cc64ef3f579d0df3c3698fe9c3734ee397e4abb \ - --hash=sha256:557cc55b51ab2f3371e238804ffc8510b6ef087673303890f57a24195d096e61 \ - --hash=sha256:5cc45235fefac57f52680902b7d197fb2f3650112379a6fa9aa1b1c1d3ed3f02 \ - --hash=sha256:646530b04f45c830ceae8e491ca1c9320a2d2f0efea3141487c82130aba70dce \ - --hash=sha256:696d8ae4dff4cbd06bf6b10d671e088b66669f110c7c4e18a44c43cf75ce966f \ - --hash=sha256:7e94b643b19e8feb5215fa508aee531387494bf668b2eca27fa769ea11d9f498 \ - --hash=sha256:88a77f422f31f170c4b7e9ca58eae2a6c8e04da54121900651dfa8e66c29901a \ - --hash=sha256:898fba72b81a654e74412a67c7e0a81e89723cfe2a3ea6fcd3feaa3395138ca9 \ - --hash=sha256:9649eced17a98ce816756ce50433b2dd85dfa7bc92ceb60579d68c053f98dff9 \ - --hash=sha256:9af40506a59450f1315168d47a970db1a65aaab5df3833ac389d2899a5d63b3f \ - --hash=sha256:a28ed481d530e3138553be60991d2d61103ce6da254e51547b79549675f539b7 \ - --hash=sha256:a2ba7ffe58efeae5b8fad1165357edfe01464f9aef25e814e891ec690e7dd82a \ - --hash=sha256:a4042edef80364239f5b7b5764e55fd3ffd40c32cf6753da9bda4ff0ac466018 \ - --hash=sha256:b0232cd42506d0c94f9328aaf0d1d0785f90f87ae72d9759df7e5051be039738 \ - --hash=sha256:b202f591204023b3ce62ff9a47baa555dc00bb092219abf5caf0e3718ac20e7c \ - --hash=sha256:b5c6c967d02fee30e157ab5227706f965d5c37679c687b1e7bbc5d9e7128bd41 \ - --hash=sha256:cbbd4149c4fc5e7d508ece083e78c17442ee13b0e69bfa6bd63003e486770f45 \ - --hash=sha256:f30b03b0f27608a0b26c75f0bb8a880c752c0e0b01090551b9d87c7d783e2069 \ - --hash=sha256:fdb3c6d342825ea10b90e43d7f20f01535a72b3a1997850c0c3cefa5c27a4a2c - # via -r tooling/requirements.in -deprecated==1.2.18 \ - --hash=sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d \ - --hash=sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec - # via pygithub -docutils==0.21.2 \ - --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ - --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 - # via - # pydata-sphinx-theme - # sphinx -esbonio==0.16.5 \ - --hash=sha256:04ba926e3603f7b1fde1abc690b47afd60749b64b1029b6bce8e1de0bb284921 \ - --hash=sha256:acab2e16c6cf8f7232fb04e0d48514ce50566516b1f6fcf669ccf2f247e8b10f - # via -r tooling/requirements.in -fonttools==4.56.0 \ - --hash=sha256:003548eadd674175510773f73fb2060bb46adb77c94854af3e0cc5bc70260049 \ - --hash=sha256:0073b62c3438cf0058488c002ea90489e8801d3a7af5ce5f7c05c105bee815c3 \ - --hash=sha256:1088182f68c303b50ca4dc0c82d42083d176cba37af1937e1a976a31149d4d14 \ - --hash=sha256:133bedb9a5c6376ad43e6518b7e2cd2f866a05b1998f14842631d5feb36b5786 \ - --hash=sha256:14a3e3e6b211660db54ca1ef7006401e4a694e53ffd4553ab9bc87ead01d0f05 \ - --hash=sha256:17f39313b649037f6c800209984a11fc256a6137cbe5487091c6c7187cae4685 \ - --hash=sha256:193b86e9f769320bc98ffdb42accafb5d0c8c49bd62884f1c0702bc598b3f0a2 \ - --hash=sha256:2d351275f73ebdd81dd5b09a8b8dac7a30f29a279d41e1c1192aedf1b6dced40 \ - --hash=sha256:300c310bb725b2bdb4f5fc7e148e190bd69f01925c7ab437b9c0ca3e1c7cd9ba \ - --hash=sha256:331954d002dbf5e704c7f3756028e21db07097c19722569983ba4d74df014000 \ - --hash=sha256:38b947de71748bab150259ee05a775e8a0635891568e9fdb3cdd7d0e0004e62f \ - --hash=sha256:3cf4f8d2a30b454ac682e12c61831dcb174950c406011418e739de592bbf8f76 \ - --hash=sha256:3fd3fccb7b9adaaecfa79ad51b759f2123e1aba97f857936ce044d4f029abd71 \ - --hash=sha256:442ad4122468d0e47d83bc59d0e91b474593a8c813839e1872e47c7a0cb53b10 \ - --hash=sha256:47b5e4680002ae1756d3ae3b6114e20aaee6cc5c69d1e5911f5ffffd3ee46c6b \ - --hash=sha256:53f5e9767978a4daf46f28e09dbeb7d010319924ae622f7b56174b777258e5ba \ - --hash=sha256:62b4c6802fa28e14dba010e75190e0e6228513573f1eeae57b11aa1a39b7e5b1 \ - --hash=sha256:62cc1253827d1e500fde9dbe981219fea4eb000fd63402283472d38e7d8aa1c6 \ - --hash=sha256:654ac4583e2d7c62aebc6fc6a4c6736f078f50300e18aa105d87ce8925cfac31 \ - --hash=sha256:661a8995d11e6e4914a44ca7d52d1286e2d9b154f685a4d1f69add8418961563 \ - --hash=sha256:6c1d38642ca2dddc7ae992ef5d026e5061a84f10ff2b906be5680ab089f55bb8 \ - --hash=sha256:6e81c1cc80c1d8bf071356cc3e0e25071fbba1c75afc48d41b26048980b3c771 \ - --hash=sha256:705837eae384fe21cee5e5746fd4f4b2f06f87544fa60f60740007e0aa600311 \ - --hash=sha256:7ef04bc7827adb7532be3d14462390dd71287644516af3f1e67f1e6ff9c6d6df \ - --hash=sha256:86b2a1013ef7a64d2e94606632683f07712045ed86d937c11ef4dde97319c086 \ - --hash=sha256:8d1613abd5af2f93c05867b3a3759a56e8bf97eb79b1da76b2bc10892f96ff16 \ - --hash=sha256:965d0209e6dbdb9416100123b6709cb13f5232e2d52d17ed37f9df0cc31e2b35 \ - --hash=sha256:96a4271f63a615bcb902b9f56de00ea225d6896052c49f20d0c91e9f43529a29 \ - --hash=sha256:9d94449ad0a5f2a8bf5d2f8d71d65088aee48adbe45f3c5f8e00e3ad861ed81a \ - --hash=sha256:9da650cb29bc098b8cfd15ef09009c914b35c7986c8fa9f08b51108b7bc393b4 \ - --hash=sha256:a05d1f07eb0a7d755fbe01fee1fd255c3a4d3730130cf1bfefb682d18fd2fcea \ - --hash=sha256:a114d1567e1a1586b7e9e7fc2ff686ca542a82769a296cef131e4c4af51e58f4 \ - --hash=sha256:a1af375734018951c31c0737d04a9d5fd0a353a0253db5fbed2ccd44eac62d8c \ - --hash=sha256:b23d30a2c0b992fb1c4f8ac9bfde44b5586d23457759b6cf9a787f1a35179ee0 \ - --hash=sha256:bc871904a53a9d4d908673c6faa15689874af1c7c5ac403a8e12d967ebd0c0dc \ - --hash=sha256:bce60f9a977c9d3d51de475af3f3581d9b36952e1f8fc19a1f2254f1dda7ce9c \ - --hash=sha256:bd9825822e7bb243f285013e653f6741954d8147427aaa0324a862cdbf4cbf62 \ - --hash=sha256:ca7962e8e5fc047cc4e59389959843aafbf7445b6c08c20d883e60ced46370a5 \ - --hash=sha256:d0cb73ccf7f6d7ca8d0bc7ea8ac0a5b84969a41c56ac3ac3422a24df2680546f \ - --hash=sha256:d54a45d30251f1d729e69e5b675f9a08b7da413391a1227781e2a297fa37f6d2 \ - --hash=sha256:d6ca96d1b61a707ba01a43318c9c40aaf11a5a568d1e61146fafa6ab20890793 \ - --hash=sha256:d6f195c14c01bd057bc9b4f70756b510e009c83c5ea67b25ced3e2c38e6ee6e9 \ - --hash=sha256:e2cad98c94833465bcf28f51c248aaf07ca022efc6a3eba750ad9c1e0256d278 \ - --hash=sha256:e2e993e8db36306cc3f1734edc8ea67906c55f98683d6fd34c3fc5593fdbba4c \ - --hash=sha256:e9270505a19361e81eecdbc2c251ad1e1a9a9c2ad75fa022ccdee533f55535dc \ - --hash=sha256:f20e2c0dfab82983a90f3d00703ac0960412036153e5023eed2b4641d7d5e692 \ - --hash=sha256:f36a0868f47b7566237640c026c65a86d09a3d9ca5df1cd039e30a1da73098a0 \ - --hash=sha256:f59746f7953f69cc3290ce2f971ab01056e55ddd0fb8b792c31a8acd7fee2d28 \ - --hash=sha256:fa760e5fe8b50cbc2d71884a1eff2ed2b95a005f02dda2fa431560db0ddd927f \ - --hash=sha256:ffda9b8cd9cb8b301cae2602ec62375b59e2e2108a117746f12215145e3f786c - # via matplotlib -h11==0.14.0 \ - --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ - --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 - # via uvicorn -idna==3.10 \ - --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ - --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 - # via - # anyio - # requests -imagesize==1.4.1 \ - --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ - --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a - # via sphinx -iniconfig==2.1.0 \ - --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ - --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 - # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt - # pytest -jinja2==3.1.5 \ - --hash=sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb \ - --hash=sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb - # via sphinx -jsonschema==4.23.0 \ - --hash=sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4 \ - --hash=sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566 - # via sphinx-needs -jsonschema-specifications==2024.10.1 \ - --hash=sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272 \ - --hash=sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf - # via jsonschema -kiwisolver==1.4.8 \ - --hash=sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50 \ - --hash=sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c \ - --hash=sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8 \ - --hash=sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc \ - --hash=sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f \ - --hash=sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79 \ - --hash=sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6 \ - --hash=sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2 \ - --hash=sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605 \ - --hash=sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09 \ - --hash=sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab \ - --hash=sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e \ - --hash=sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc \ - --hash=sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8 \ - --hash=sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7 \ - --hash=sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880 \ - --hash=sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b \ - --hash=sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b \ - --hash=sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff \ - --hash=sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3 \ - --hash=sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c \ - --hash=sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0 \ - --hash=sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6 \ - --hash=sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30 \ - --hash=sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47 \ - --hash=sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0 \ - --hash=sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1 \ - --hash=sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90 \ - --hash=sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d \ - --hash=sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b \ - --hash=sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c \ - --hash=sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a \ - --hash=sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e \ - --hash=sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc \ - --hash=sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16 \ - --hash=sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a \ - --hash=sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712 \ - --hash=sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c \ - --hash=sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3 \ - --hash=sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc \ - --hash=sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561 \ - --hash=sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d \ - --hash=sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc \ - --hash=sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db \ - --hash=sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed \ - --hash=sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751 \ - --hash=sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957 \ - --hash=sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165 \ - --hash=sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2 \ - --hash=sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476 \ - --hash=sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84 \ - --hash=sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246 \ - --hash=sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4 \ - --hash=sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25 \ - --hash=sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d \ - --hash=sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271 \ - --hash=sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb \ - --hash=sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31 \ - --hash=sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e \ - --hash=sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85 \ - --hash=sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b \ - --hash=sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7 \ - --hash=sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03 \ - --hash=sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b \ - --hash=sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d \ - --hash=sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a \ - --hash=sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d \ - --hash=sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3 \ - --hash=sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67 \ - --hash=sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f \ - --hash=sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c \ - --hash=sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502 \ - --hash=sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062 \ - --hash=sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954 \ - --hash=sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb \ - --hash=sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a \ - --hash=sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b \ - --hash=sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed \ - --hash=sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34 \ - --hash=sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794 - # via matplotlib -lsprotocol==2023.0.1 \ - --hash=sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2 \ - --hash=sha256:cc5c15130d2403c18b734304339e51242d3018a05c4f7d0f198ad6e0cd21861d - # via pygls -markupsafe==3.0.2 \ - --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ - --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ - --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ - --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ - --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ - --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ - --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ - --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ - --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ - --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ - --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ - --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ - --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ - --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ - --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ - --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ - --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ - --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ - --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ - --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ - --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ - --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ - --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ - --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ - --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ - --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ - --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ - --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ - --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ - --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ - --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ - --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ - --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ - --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ - --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ - --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ - --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ - --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ - --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ - --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ - --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ - --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ - --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ - --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ - --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ - --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ - --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ - --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ - --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ - --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ - --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ - --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ - --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ - --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ - --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ - --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ - --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ - --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ - --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ - --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ - --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 - # via jinja2 -matplotlib==3.10.0 \ - --hash=sha256:01d2b19f13aeec2e759414d3bfe19ddfb16b13a1250add08d46d5ff6f9be83c6 \ - --hash=sha256:12eaf48463b472c3c0f8dbacdbf906e573013df81a0ab82f0616ea4b11281908 \ - --hash=sha256:2c5829a5a1dd5a71f0e31e6e8bb449bc0ee9dbfb05ad28fc0c6b55101b3a4be6 \ - --hash=sha256:2fbbabc82fde51391c4da5006f965e36d86d95f6ee83fb594b279564a4c5d0d2 \ - --hash=sha256:3547d153d70233a8496859097ef0312212e2689cdf8d7ed764441c77604095ae \ - --hash=sha256:359f87baedb1f836ce307f0e850d12bb5f1936f70d035561f90d41d305fdacea \ - --hash=sha256:3b427392354d10975c1d0f4ee18aa5844640b512d5311ef32efd4dd7db106ede \ - --hash=sha256:4659665bc7c9b58f8c00317c3c2a299f7f258eeae5a5d56b4c64226fca2f7c59 \ - --hash=sha256:4673ff67a36152c48ddeaf1135e74ce0d4bce1bbf836ae40ed39c29edf7e2765 \ - --hash=sha256:503feb23bd8c8acc75541548a1d709c059b7184cde26314896e10a9f14df5f12 \ - --hash=sha256:5439f4c5a3e2e8eab18e2f8c3ef929772fd5641876db71f08127eed95ab64683 \ - --hash=sha256:5cdbaf909887373c3e094b0318d7ff230b2ad9dcb64da7ade654182872ab2593 \ - --hash=sha256:5e6c6461e1fc63df30bf6f80f0b93f5b6784299f721bc28530477acd51bfc3d1 \ - --hash=sha256:5fd41b0ec7ee45cd960a8e71aea7c946a28a0b8a4dcee47d2856b2af051f334c \ - --hash=sha256:607b16c8a73943df110f99ee2e940b8a1cbf9714b65307c040d422558397dac5 \ - --hash=sha256:7e8632baebb058555ac0cde75db885c61f1212e47723d63921879806b40bec6a \ - --hash=sha256:81713dd0d103b379de4516b861d964b1d789a144103277769238c732229d7f03 \ - --hash=sha256:845d96568ec873be63f25fa80e9e7fae4be854a66a7e2f0c8ccc99e94a8bd4ef \ - --hash=sha256:95b710fea129c76d30be72c3b38f330269363fbc6e570a5dd43580487380b5ff \ - --hash=sha256:96f2886f5c1e466f21cc41b70c5a0cd47bfa0015eb2d5793c88ebce658600e25 \ - --hash=sha256:994c07b9d9fe8d25951e3202a68c17900679274dadfc1248738dcfa1bd40d7f3 \ - --hash=sha256:9ade1003376731a971e398cc4ef38bb83ee8caf0aee46ac6daa4b0506db1fd06 \ - --hash=sha256:9b0558bae37f154fffda54d779a592bc97ca8b4701f1c710055b609a3bac44c8 \ - --hash=sha256:a2a43cbefe22d653ab34bb55d42384ed30f611bcbdea1f8d7f431011a2e1c62e \ - --hash=sha256:a994f29e968ca002b50982b27168addfd65f0105610b6be7fa515ca4b5307c95 \ - --hash=sha256:ad2e15300530c1a94c63cfa546e3b7864bd18ea2901317bae8bbf06a5ade6dcf \ - --hash=sha256:ae80dc3a4add4665cf2faa90138384a7ffe2a4e37c58d83e115b54287c4f06ef \ - --hash=sha256:b886d02a581b96704c9d1ffe55709e49b4d2d52709ccebc4be42db856e511278 \ - --hash=sha256:c40ba2eb08b3f5de88152c2333c58cee7edcead0a2a0d60fcafa116b17117adc \ - --hash=sha256:c55b20591ced744aa04e8c3e4b7543ea4d650b6c3c4b208c08a05b4010e8b442 \ - --hash=sha256:c58a9622d5dbeb668f407f35f4e6bfac34bb9ecdcc81680c04d0258169747997 \ - --hash=sha256:d44cb942af1693cced2604c33a9abcef6205601c445f6d0dc531d813af8a2f5a \ - --hash=sha256:d907fddb39f923d011875452ff1eca29a9e7f21722b873e90db32e5d8ddff12e \ - --hash=sha256:fd44fc75522f58612ec4a33958a7e5552562b7705b42ef1b4f8c0818e304a363 - # via sphinx-needs -numpy==2.2.3 \ - --hash=sha256:0391ea3622f5c51a2e29708877d56e3d276827ac5447d7f45e9bc4ade8923c52 \ - --hash=sha256:12c045f43b1d2915eca6b880a7f4a256f59d62df4f044788c8ba67709412128d \ - --hash=sha256:136553f123ee2951bfcfbc264acd34a2fc2f29d7cdf610ce7daf672b6fbaa693 \ - --hash=sha256:1402da8e0f435991983d0a9708b779f95a8c98c6b18a171b9f1be09005e64d9d \ - --hash=sha256:16372619ee728ed67a2a606a614f56d3eabc5b86f8b615c79d01957062826ca8 \ - --hash=sha256:1ad78ce7f18ce4e7df1b2ea4019b5817a2f6a8a16e34ff2775f646adce0a5027 \ - --hash=sha256:1b416af7d0ed3271cad0f0a0d0bee0911ed7eba23e66f8424d9f3dfcdcae1304 \ - --hash=sha256:1f45315b2dc58d8a3e7754fe4e38b6fce132dab284a92851e41b2b344f6441c5 \ - --hash=sha256:2376e317111daa0a6739e50f7ee2a6353f768489102308b0d98fcf4a04f7f3b5 \ - --hash=sha256:23c9f4edbf4c065fddb10a4f6e8b6a244342d95966a48820c614891e5059bb50 \ - --hash=sha256:246535e2f7496b7ac85deffe932896a3577be7af8fb7eebe7146444680297e9a \ - --hash=sha256:2e8da03bd561504d9b20e7a12340870dfc206c64ea59b4cfee9fceb95070ee94 \ - --hash=sha256:34c1b7e83f94f3b564b35f480f5652a47007dd91f7c839f404d03279cc8dd021 \ - --hash=sha256:39261798d208c3095ae4f7bc8eaeb3481ea8c6e03dc48028057d3cbdbdb8937e \ - --hash=sha256:3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe \ - --hash=sha256:3c2ec8a0f51d60f1e9c0c5ab116b7fc104b165ada3f6c58abf881cb2eb16044d \ - --hash=sha256:435e7a933b9fda8126130b046975a968cc2d833b505475e588339e09f7672890 \ - --hash=sha256:4d8335b5f1b6e2bce120d55fb17064b0262ff29b459e8493d1785c18ae2553b8 \ - --hash=sha256:4d9828d25fb246bedd31e04c9e75714a4087211ac348cb39c8c5f99dbb6683fe \ - --hash=sha256:52659ad2534427dffcc36aac76bebdd02b67e3b7a619ac67543bc9bfe6b7cdb1 \ - --hash=sha256:5266de33d4c3420973cf9ae3b98b54a2a6d53a559310e3236c4b2b06b9c07d4e \ - --hash=sha256:5521a06a3148686d9269c53b09f7d399a5725c47bbb5b35747e1cb76326b714b \ - --hash=sha256:596140185c7fa113563c67c2e894eabe0daea18cf8e33851738c19f70ce86aeb \ - --hash=sha256:5b732c8beef1d7bc2d9e476dbba20aaff6167bf205ad9aa8d30913859e82884b \ - --hash=sha256:5ebeb7ef54a7be11044c33a17b2624abe4307a75893c001a4800857956b41094 \ - --hash=sha256:712a64103d97c404e87d4d7c47fb0c7ff9acccc625ca2002848e0d53288b90ea \ - --hash=sha256:7678556eeb0152cbd1522b684dcd215250885993dd00adb93679ec3c0e6e091c \ - --hash=sha256:77974aba6c1bc26e3c205c2214f0d5b4305bdc719268b93e768ddb17e3fdd636 \ - --hash=sha256:783145835458e60fa97afac25d511d00a1eca94d4a8f3ace9fe2043003c678e4 \ - --hash=sha256:7bfdb06b395385ea9b91bf55c1adf1b297c9fdb531552845ff1d3ea6e40d5aba \ - --hash=sha256:7c8dde0ca2f77828815fd1aedfdf52e59071a5bae30dac3b4da2a335c672149a \ - --hash=sha256:83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d \ - --hash=sha256:87eed225fd415bbae787f93a457af7f5990b92a334e346f72070bf569b9c9c95 \ - --hash=sha256:8fb62fe3d206d72fe1cfe31c4a1106ad2b136fcc1606093aeab314f02930fdf2 \ - --hash=sha256:95172a21038c9b423e68be78fd0be6e1b97674cde269b76fe269a5dfa6fadf0b \ - --hash=sha256:9f48ba6f6c13e5e49f3d3efb1b51c8193215c42ac82610a04624906a9270be6f \ - --hash=sha256:a0c03b6be48aaf92525cccf393265e02773be8fd9551a2f9adbe7db1fa2b60f1 \ - --hash=sha256:a5ae282abe60a2db0fd407072aff4599c279bcd6e9a2475500fc35b00a57c532 \ - --hash=sha256:aee2512827ceb6d7f517c8b85aa5d3923afe8fc7a57d028cffcd522f1c6fd082 \ - --hash=sha256:c8b0451d2ec95010d1db8ca733afc41f659f425b7f608af569711097fd6014e2 \ - --hash=sha256:c9aa4496fd0e17e3843399f533d62857cef5900facf93e735ef65aa4bbc90ef0 \ - --hash=sha256:cbc6472e01952d3d1b2772b720428f8b90e2deea8344e854df22b0618e9cce71 \ - --hash=sha256:cdfe0c22692a30cd830c0755746473ae66c4a8f2e7bd508b35fb3b6a0813d787 \ - --hash=sha256:cf802eef1f0134afb81fef94020351be4fe1d6681aadf9c5e862af6602af64ef \ - --hash=sha256:d42f9c36d06440e34226e8bd65ff065ca0963aeecada587b937011efa02cdc9d \ - --hash=sha256:d5b47c440210c5d1d67e1cf434124e0b5c395eee1f5806fdd89b553ed1acd0a3 \ - --hash=sha256:d9b4a8148c57ecac25a16b0e11798cbe88edf5237b0df99973687dd866f05e1b \ - --hash=sha256:daf43a3d1ea699402c5a850e5313680ac355b4adc9770cd5cfc2940e7861f1bf \ - --hash=sha256:dbdc15f0c81611925f382dfa97b3bd0bc2c1ce19d4fe50482cb0ddc12ba30020 \ - --hash=sha256:deaa09cd492e24fd9b15296844c0ad1b3c976da7907e1c1ed3a0ad21dded6f76 \ - --hash=sha256:e37242f5324ffd9f7ba5acf96d774f9276aa62a966c0bad8dae692deebec7716 \ - --hash=sha256:ed2cf9ed4e8ebc3b754d398cba12f24359f018b416c380f577bbae112ca52fc9 \ - --hash=sha256:f2712c5179f40af9ddc8f6727f2bd910ea0eb50206daea75f58ddd9fa3f715bb \ - --hash=sha256:f4ca91d61a4bf61b0f2228f24bbfa6a9facd5f8af03759fe2a655c50ae2c6610 \ - --hash=sha256:f6b3dfc7661f8842babd8ea07e9897fe3d9b69a1d7e5fbb743e4160f9387833b - # via - # contourpy - # matplotlib -packaging==24.2 \ - --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ - --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f - # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt - # matplotlib - # pytest - # sphinx -pillow==11.1.0 \ - --hash=sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83 \ - --hash=sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96 \ - --hash=sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65 \ - --hash=sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a \ - --hash=sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352 \ - --hash=sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f \ - --hash=sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20 \ - --hash=sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c \ - --hash=sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114 \ - --hash=sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49 \ - --hash=sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91 \ - --hash=sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0 \ - --hash=sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2 \ - --hash=sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5 \ - --hash=sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884 \ - --hash=sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e \ - --hash=sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c \ - --hash=sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196 \ - --hash=sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756 \ - --hash=sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861 \ - --hash=sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269 \ - --hash=sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1 \ - --hash=sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb \ - --hash=sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a \ - --hash=sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081 \ - --hash=sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1 \ - --hash=sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8 \ - --hash=sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90 \ - --hash=sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc \ - --hash=sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5 \ - --hash=sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1 \ - --hash=sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3 \ - --hash=sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35 \ - --hash=sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f \ - --hash=sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c \ - --hash=sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2 \ - --hash=sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2 \ - --hash=sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf \ - --hash=sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65 \ - --hash=sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b \ - --hash=sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442 \ - --hash=sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2 \ - --hash=sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade \ - --hash=sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482 \ - --hash=sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe \ - --hash=sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc \ - --hash=sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a \ - --hash=sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec \ - --hash=sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3 \ - --hash=sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a \ - --hash=sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07 \ - --hash=sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6 \ - --hash=sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f \ - --hash=sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e \ - --hash=sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192 \ - --hash=sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0 \ - --hash=sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6 \ - --hash=sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73 \ - --hash=sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f \ - --hash=sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6 \ - --hash=sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547 \ - --hash=sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9 \ - --hash=sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457 \ - --hash=sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8 \ - --hash=sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26 \ - --hash=sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5 \ - --hash=sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab \ - --hash=sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070 \ - --hash=sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71 \ - --hash=sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9 \ - --hash=sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761 - # via matplotlib -platformdirs==4.3.6 \ - --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ - --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb - # via esbonio -pluggy==1.5.0 \ - --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ - --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 - # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt - # pytest -pycparser==2.22 \ - --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ - --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc - # via cffi -pydata-sphinx-theme==0.16.1 \ - --hash=sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde \ - --hash=sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7 - # via -r tooling/requirements.in -pygithub==2.6.1 \ - --hash=sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3 \ - --hash=sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf - # via -r tooling/requirements.in -pygls==1.3.1 \ - --hash=sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018 \ - --hash=sha256:6e00f11efc56321bdeb6eac04f6d86131f654c7d49124344a9ebb968da3dd91e - # via esbonio -pygments==2.19.1 \ - --hash=sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f \ - --hash=sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c - # via - # accessible-pygments - # pydata-sphinx-theme - # sphinx -pyjwt[crypto]==2.10.1 \ - --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ - --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb - # via pygithub -pynacl==1.5.0 \ - --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ - --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ - --hash=sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93 \ - --hash=sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1 \ - --hash=sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92 \ - --hash=sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff \ - --hash=sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba \ - --hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \ - --hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \ - --hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543 - # via pygithub -pyparsing==3.2.1 \ - --hash=sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1 \ - --hash=sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a - # via matplotlib -pyspellchecker==0.8.2 \ - --hash=sha256:2b026be14a162ba810bdda8e5454c56e364f42d3b9e14aeff31706e5ebcdc78f \ - --hash=sha256:4fee22e1859c5153c3bc3953ac3041bf07d4541520b7e01901e955062022290a - # via esbonio -pytest==8.3.5 \ - --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ - --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r /home/maxi/.cache/bazel/_bazel_maxi/b15c1004cf2362548ae969eb240791a7/external/score_python_basics~/requirements.txt -python-dateutil==2.9.0.post0 \ - --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ - --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 - # via matplotlib -referencing==0.36.1 \ - --hash=sha256:363d9c65f080d0d70bc41c721dce3c7f3e77fc09f269cd5c8813da18069a6794 \ - --hash=sha256:ca2e6492769e3602957e9b831b94211599d2aade9477f5d44110d2530cf9aade - # via - # jsonschema - # jsonschema-specifications -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 - # via - # pygithub - # requests-file - # sphinx - # sphinx-needs -requests-file==2.1.0 \ - --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ - --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c - # via sphinx-needs -rpds-py==0.22.3 \ - --hash=sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518 \ - --hash=sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059 \ - --hash=sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61 \ - --hash=sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5 \ - --hash=sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9 \ - --hash=sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543 \ - --hash=sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2 \ - --hash=sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a \ - --hash=sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d \ - --hash=sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56 \ - --hash=sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d \ - --hash=sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd \ - --hash=sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b \ - --hash=sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4 \ - --hash=sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99 \ - --hash=sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d \ - --hash=sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd \ - --hash=sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe \ - --hash=sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1 \ - --hash=sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e \ - --hash=sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f \ - --hash=sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3 \ - --hash=sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca \ - --hash=sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d \ - --hash=sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e \ - --hash=sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc \ - --hash=sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea \ - --hash=sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38 \ - --hash=sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b \ - --hash=sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c \ - --hash=sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff \ - --hash=sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723 \ - --hash=sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e \ - --hash=sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493 \ - --hash=sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6 \ - --hash=sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83 \ - --hash=sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091 \ - --hash=sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1 \ - --hash=sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627 \ - --hash=sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1 \ - --hash=sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728 \ - --hash=sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16 \ - --hash=sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c \ - --hash=sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45 \ - --hash=sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7 \ - --hash=sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a \ - --hash=sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730 \ - --hash=sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967 \ - --hash=sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25 \ - --hash=sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24 \ - --hash=sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055 \ - --hash=sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d \ - --hash=sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0 \ - --hash=sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e \ - --hash=sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7 \ - --hash=sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c \ - --hash=sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f \ - --hash=sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd \ - --hash=sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652 \ - --hash=sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8 \ - --hash=sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11 \ - --hash=sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333 \ - --hash=sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96 \ - --hash=sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64 \ - --hash=sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b \ - --hash=sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e \ - --hash=sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c \ - --hash=sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9 \ - --hash=sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec \ - --hash=sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb \ - --hash=sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37 \ - --hash=sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad \ - --hash=sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9 \ - --hash=sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c \ - --hash=sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf \ - --hash=sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4 \ - --hash=sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f \ - --hash=sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d \ - --hash=sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09 \ - --hash=sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d \ - --hash=sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566 \ - --hash=sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74 \ - --hash=sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338 \ - --hash=sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15 \ - --hash=sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c \ - --hash=sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648 \ - --hash=sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84 \ - --hash=sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3 \ - --hash=sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123 \ - --hash=sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520 \ - --hash=sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831 \ - --hash=sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e \ - --hash=sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf \ - --hash=sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b \ - --hash=sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2 \ - --hash=sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3 \ - --hash=sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130 \ - --hash=sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b \ - --hash=sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de \ - --hash=sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5 \ - --hash=sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d \ - --hash=sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00 \ - --hash=sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e - # via - # jsonschema - # referencing -ruamel-yaml==0.18.10 \ - --hash=sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58 \ - --hash=sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1 - # via -r tooling/requirements.in -ruamel-yaml-clib==0.2.12 \ - --hash=sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b \ - --hash=sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4 \ - --hash=sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef \ - --hash=sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5 \ - --hash=sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3 \ - --hash=sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632 \ - --hash=sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6 \ - --hash=sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7 \ - --hash=sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680 \ - --hash=sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf \ - --hash=sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da \ - --hash=sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6 \ - --hash=sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a \ - --hash=sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01 \ - --hash=sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519 \ - --hash=sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6 \ - --hash=sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f \ - --hash=sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd \ - --hash=sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2 \ - --hash=sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52 \ - --hash=sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd \ - --hash=sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d \ - --hash=sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c \ - --hash=sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6 \ - --hash=sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb \ - --hash=sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a \ - --hash=sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969 \ - --hash=sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28 \ - --hash=sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d \ - --hash=sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e \ - --hash=sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45 \ - --hash=sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4 \ - --hash=sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12 \ - --hash=sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31 \ - --hash=sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642 \ - --hash=sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e \ - --hash=sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285 \ - --hash=sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed \ - --hash=sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1 \ - --hash=sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7 \ - --hash=sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3 \ - --hash=sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475 \ - --hash=sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5 \ - --hash=sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76 \ - --hash=sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987 \ - --hash=sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df - # via ruamel-yaml -six==1.17.0 \ - --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ - --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 - # via python-dateutil -sniffio==1.3.1 \ - --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ - --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc - # via anyio -snowballstemmer==2.2.0 \ - --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ - --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a - # via sphinx -soupsieve==2.6 \ - --hash=sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb \ - --hash=sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9 - # via beautifulsoup4 -sphinx==8.1.3 \ - --hash=sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2 \ - --hash=sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927 - # via - # -r tooling/requirements.in - # esbonio - # pydata-sphinx-theme - # sphinx-autobuild - # sphinx-data-viewer - # sphinx-design - # sphinx-needs - # sphinxcontrib-jquery - # sphinxcontrib-plantuml -sphinx-autobuild==2024.10.3 \ - --hash=sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa \ - --hash=sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1 - # via -r tooling/requirements.in -sphinx-data-viewer==0.1.5 \ - --hash=sha256:a7d5e58613562bb745380bfe61ca8b69997998167fd6fa9aea55606c9a4b17e4 \ - --hash=sha256:b74b1d304c505c464d07c7b225ed0d84ea02dcc88bc1c49cdad7c2275fbbdad4 - # via sphinx-needs -sphinx-design==0.6.1 \ - --hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \ - --hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632 - # via -r tooling/requirements.in -sphinx-needs[plotting]==4.2.0 \ - --hash=sha256:f1ae86afb3d1d3f3c5d8cecffe740ae03f32a908212b4471866dff1a0738b252 \ - --hash=sha256:f1f1f76adb30da787a472dff4b0da13b0e1a9c602e628501294cc9ae84d58357 - # via -r tooling/requirements.in -sphinxcontrib-applehelp==2.0.0 \ - --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ - --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 - # via sphinx -sphinxcontrib-devhelp==2.0.0 \ - --hash=sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad \ - --hash=sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2 - # via sphinx -sphinxcontrib-htmlhelp==2.1.0 \ - --hash=sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8 \ - --hash=sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9 - # via sphinx -sphinxcontrib-jquery==4.1 \ - --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \ - --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae - # via sphinx-needs -sphinxcontrib-jsmath==1.0.1 \ - --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ - --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 - # via sphinx -sphinxcontrib-plantuml==0.30 \ - --hash=sha256:2a1266ca43bddf44640ae44107003df4490de2b3c3154a0d627cfb63e9a169bf - # via -r tooling/requirements.in -sphinxcontrib-qthelp==2.0.0 \ - --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ - --hash=sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb - # via sphinx -sphinxcontrib-serializinghtml==2.0.0 \ - --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ - --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d - # via sphinx -starlette==0.45.3 \ - --hash=sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f \ - --hash=sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d - # via sphinx-autobuild -typing-extensions==4.12.2 \ - --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ - --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 - # via - # anyio - # pydata-sphinx-theme - # pygithub - # referencing -urllib3==2.3.0 \ - --hash=sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df \ - --hash=sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d - # via - # pygithub - # requests -uvicorn==0.34.0 \ - --hash=sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4 \ - --hash=sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9 - # via sphinx-autobuild -watchfiles==1.0.4 \ - --hash=sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27 \ - --hash=sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74 \ - --hash=sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1 \ - --hash=sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712 \ - --hash=sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1 \ - --hash=sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a \ - --hash=sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2 \ - --hash=sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1 \ - --hash=sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3 \ - --hash=sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2 \ - --hash=sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90 \ - --hash=sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899 \ - --hash=sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19 \ - --hash=sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303 \ - --hash=sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202 \ - --hash=sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3 \ - --hash=sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d \ - --hash=sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590 \ - --hash=sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee \ - --hash=sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12 \ - --hash=sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a \ - --hash=sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8 \ - --hash=sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff \ - --hash=sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105 \ - --hash=sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226 \ - --hash=sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af \ - --hash=sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9 \ - --hash=sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a \ - --hash=sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42 \ - --hash=sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407 \ - --hash=sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205 \ - --hash=sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3 \ - --hash=sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff \ - --hash=sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0 \ - --hash=sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d \ - --hash=sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9 \ - --hash=sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733 \ - --hash=sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161 \ - --hash=sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f \ - --hash=sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2 \ - --hash=sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb \ - --hash=sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a \ - --hash=sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e \ - --hash=sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235 \ - --hash=sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1 \ - --hash=sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3 \ - --hash=sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b \ - --hash=sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff \ - --hash=sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43 \ - --hash=sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60 \ - --hash=sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b \ - --hash=sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6 \ - --hash=sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80 \ - --hash=sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94 \ - --hash=sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c \ - --hash=sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08 \ - --hash=sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0 \ - --hash=sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c \ - --hash=sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e \ - --hash=sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f \ - --hash=sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf \ - --hash=sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18 \ - --hash=sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21 \ - --hash=sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc \ - --hash=sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817 \ - --hash=sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a \ - --hash=sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902 \ - --hash=sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d \ - --hash=sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49 \ - --hash=sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844 \ - --hash=sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317 - # via sphinx-autobuild -websockets==14.2 \ - --hash=sha256:02687db35dbc7d25fd541a602b5f8e451a238ffa033030b172ff86a93cb5dc2a \ - --hash=sha256:065ce275e7c4ffb42cb738dd6b20726ac26ac9ad0a2a48e33ca632351a737267 \ - --hash=sha256:091ab63dfc8cea748cc22c1db2814eadb77ccbf82829bac6b2fbe3401d548eda \ - --hash=sha256:0a52a6d7cf6938e04e9dceb949d35fbdf58ac14deea26e685ab6368e73744e4c \ - --hash=sha256:0a6f3efd47ffd0d12080594f434faf1cd2549b31e54870b8470b28cc1d3817d9 \ - --hash=sha256:0d8c3e2cdb38f31d8bd7d9d28908005f6fa9def3324edb9bf336d7e4266fd397 \ - --hash=sha256:1979bee04af6a78608024bad6dfcc0cc930ce819f9e10342a29a05b5320355d0 \ - --hash=sha256:1a5a20d5843886d34ff8c57424cc65a1deda4375729cbca4cb6b3353f3ce4142 \ - --hash=sha256:1c9b6535c0e2cf8a6bf938064fb754aaceb1e6a4a51a80d884cd5db569886910 \ - --hash=sha256:1f20522e624d7ffbdbe259c6b6a65d73c895045f76a93719aa10cd93b3de100c \ - --hash=sha256:2066dc4cbcc19f32c12a5a0e8cc1b7ac734e5b64ac0a325ff8353451c4b15ef2 \ - --hash=sha256:20e6dd0984d7ca3037afcb4494e48c74ffb51e8013cac71cf607fffe11df7205 \ - --hash=sha256:22441c81a6748a53bfcb98951d58d1af0661ab47a536af08920d129b4d1c3473 \ - --hash=sha256:2c6c0097a41968b2e2b54ed3424739aab0b762ca92af2379f152c1aef0187e1c \ - --hash=sha256:2dddacad58e2614a24938a50b85969d56f88e620e3f897b7d80ac0d8a5800258 \ - --hash=sha256:2e20c5f517e2163d76e2729104abc42639c41cf91f7b1839295be43302713661 \ - --hash=sha256:34277a29f5303d54ec6468fb525d99c99938607bc96b8d72d675dee2b9f5bf1d \ - --hash=sha256:3bdc8c692c866ce5fefcaf07d2b55c91d6922ac397e031ef9b774e5b9ea42166 \ - --hash=sha256:3c1426c021c38cf92b453cdf371228d3430acd775edee6bac5a4d577efc72365 \ - --hash=sha256:44bba1a956c2c9d268bdcdf234d5e5ff4c9b6dc3e300545cbe99af59dda9dcce \ - --hash=sha256:4b27ece32f63150c268593d5fdb82819584831a83a3f5809b7521df0685cd5d8 \ - --hash=sha256:4da98b72009836179bb596a92297b1a61bb5a830c0e483a7d0766d45070a08ad \ - --hash=sha256:4daa0faea5424d8713142b33825fff03c736f781690d90652d2c8b053345b0e7 \ - --hash=sha256:5059ed9c54945efb321f097084b4c7e52c246f2c869815876a69d1efc4ad6eb5 \ - --hash=sha256:577a4cebf1ceaf0b65ffc42c54856214165fb8ceeba3935852fc33f6b0c55e7f \ - --hash=sha256:647b573f7d3ada919fd60e64d533409a79dcf1ea21daeb4542d1d996519ca967 \ - --hash=sha256:669c3e101c246aa85bc8534e495952e2ca208bd87994650b90a23d745902db9a \ - --hash=sha256:6af6a4b26eea4fc06c6818a6b962a952441e0e39548b44773502761ded8cc1d4 \ - --hash=sha256:6af99a38e49f66be5a64b1e890208ad026cda49355661549c507152113049990 \ - --hash=sha256:6d7ff794c8b36bc402f2e07c0b2ceb4a2424147ed4785ff03e2a7af03711d60a \ - --hash=sha256:6f1372e511c7409a542291bce92d6c83320e02c9cf392223272287ce55bc224e \ - --hash=sha256:714a9b682deb4339d39ffa674f7b674230227d981a37d5d174a4a83e3978a610 \ - --hash=sha256:75862126b3d2d505e895893e3deac0a9339ce750bd27b4ba515f008b5acf832d \ - --hash=sha256:7a570862c325af2111343cc9b0257b7119b904823c675b22d4ac547163088d0d \ - --hash=sha256:7a6ceec4ea84469f15cf15807a747e9efe57e369c384fa86e022b3bea679b79b \ - --hash=sha256:7cd5706caec1686c5d233bc76243ff64b1c0dc445339bd538f30547e787c11fe \ - --hash=sha256:80c8efa38957f20bba0117b48737993643204645e9ec45512579132508477cfc \ - --hash=sha256:862e9967b46c07d4dcd2532e9e8e3c2825e004ffbf91a5ef9dde519ee2effb0b \ - --hash=sha256:86cf1aaeca909bf6815ea714d5c5736c8d6dd3a13770e885aafe062ecbd04f1f \ - --hash=sha256:89a71173caaf75fa71a09a5f614f450ba3ec84ad9fca47cb2422a860676716f0 \ - --hash=sha256:9f05702e93203a6ff5226e21d9b40c037761b2cfb637187c9802c10f58e40473 \ - --hash=sha256:a39d7eceeea35db85b85e1169011bb4321c32e673920ae9c1b6e0978590012a3 \ - --hash=sha256:a3c4aa3428b904d5404a0ed85f3644d37e2cb25996b7f096d77caeb0e96a3b42 \ - --hash=sha256:a9b0f6c3ba3b1240f602ebb3971d45b02cc12bd1845466dd783496b3b05783a5 \ - --hash=sha256:a9e72fb63e5f3feacdcf5b4ff53199ec8c18d66e325c34ee4c551ca748623bbc \ - --hash=sha256:ab95d357cd471df61873dadf66dd05dd4709cae001dd6342edafc8dc6382f307 \ - --hash=sha256:ad1c1d02357b7665e700eca43a31d52814ad9ad9b89b58118bdabc365454b574 \ - --hash=sha256:b374e8953ad477d17e4851cdc66d83fdc2db88d9e73abf755c94510ebddceb95 \ - --hash=sha256:b439ea828c4ba99bb3176dc8d9b933392a2413c0f6b149fdcba48393f573377f \ - --hash=sha256:b4c8cef610e8d7c70dea92e62b6814a8cd24fbd01d7103cc89308d2bfe1659ef \ - --hash=sha256:bbe03eb853e17fd5b15448328b4ec7fb2407d45fb0245036d06a3af251f8e48f \ - --hash=sha256:bc63cee8596a6ec84d9753fd0fcfa0452ee12f317afe4beae6b157f0070c6c7f \ - --hash=sha256:c3ecadc7ce90accf39903815697917643f5b7cfb73c96702318a096c00aa71f5 \ - --hash=sha256:c76193c1c044bd1e9b3316dcc34b174bbf9664598791e6fb606d8d29000e070c \ - --hash=sha256:c93215fac5dadc63e51bcc6dceca72e72267c11def401d6668622b47675b097f \ - --hash=sha256:cc45afb9c9b2dc0852d5c8b5321759cf825f82a31bfaf506b65bf4668c96f8b2 \ - --hash=sha256:d7d9cafbccba46e768be8a8ad4635fa3eae1ffac4c6e7cb4eb276ba41297ed29 \ - --hash=sha256:da85651270c6bfb630136423037dd4975199e5d4114cae6d3066641adcc9d1c7 \ - --hash=sha256:dec254fcabc7bd488dab64846f588fc5b6fe0d78f641180030f8ea27b76d72c3 \ - --hash=sha256:e3fbd68850c837e57373d95c8fe352203a512b6e49eaae4c2f4088ef8cf21980 \ - --hash=sha256:e8179f95323b9ab1c11723e5d91a89403903f7b001828161b480a7810b334885 \ - --hash=sha256:e9d0e53530ba7b8b5e389c02282f9d2aa47581514bd6049d3a7cffe1385cf5fe \ - --hash=sha256:eabdb28b972f3729348e632ab08f2a7b616c7e53d5414c12108c29972e655b20 \ - --hash=sha256:ec607328ce95a2f12b595f7ae4c5d71bf502212bddcea528290b35c286932b12 \ - --hash=sha256:efd9b868d78b194790e6236d9cbc46d68aba4b75b22497eb4ab64fa640c3af56 \ - --hash=sha256:f2e53c72052f2596fb792a7acd9704cbc549bf70fcde8a99e899311455974ca3 \ - --hash=sha256:f390024a47d904613577df83ba700bd189eedc09c57af0a904e5c39624621270 \ - --hash=sha256:f8a86a269759026d2bde227652b87be79f8a734e582debf64c9d302faa1e9f03 \ - --hash=sha256:fd475a974d5352390baf865309fe37dec6831aafc3014ffac1eea99e84e83fc2 - # via sphinx-autobuild -wrapt==1.17.2 \ - --hash=sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f \ - --hash=sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c \ - --hash=sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a \ - --hash=sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b \ - --hash=sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555 \ - --hash=sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c \ - --hash=sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b \ - --hash=sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6 \ - --hash=sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8 \ - --hash=sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662 \ - --hash=sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061 \ - --hash=sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998 \ - --hash=sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb \ - --hash=sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62 \ - --hash=sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984 \ - --hash=sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392 \ - --hash=sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2 \ - --hash=sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306 \ - --hash=sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7 \ - --hash=sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3 \ - --hash=sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9 \ - --hash=sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6 \ - --hash=sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192 \ - --hash=sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317 \ - --hash=sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f \ - --hash=sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda \ - --hash=sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563 \ - --hash=sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a \ - --hash=sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f \ - --hash=sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d \ - --hash=sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9 \ - --hash=sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8 \ - --hash=sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82 \ - --hash=sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9 \ - --hash=sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845 \ - --hash=sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82 \ - --hash=sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125 \ - --hash=sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504 \ - --hash=sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b \ - --hash=sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7 \ - --hash=sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc \ - --hash=sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6 \ - --hash=sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40 \ - --hash=sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a \ - --hash=sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3 \ - --hash=sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a \ - --hash=sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72 \ - --hash=sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681 \ - --hash=sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438 \ - --hash=sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae \ - --hash=sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2 \ - --hash=sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb \ - --hash=sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5 \ - --hash=sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a \ - --hash=sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3 \ - --hash=sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8 \ - --hash=sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2 \ - --hash=sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22 \ - --hash=sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72 \ - --hash=sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061 \ - --hash=sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f \ - --hash=sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9 \ - --hash=sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04 \ - --hash=sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98 \ - --hash=sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9 \ - --hash=sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f \ - --hash=sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b \ - --hash=sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925 \ - --hash=sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6 \ - --hash=sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0 \ - --hash=sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9 \ - --hash=sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c \ - --hash=sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991 \ - --hash=sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6 \ - --hash=sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000 \ - --hash=sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb \ - --hash=sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119 \ - --hash=sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b \ - --hash=sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58 - # via deprecated From a391da499b34501b4897b2f514718acfa53030c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 30 Apr 2025 11:57:53 +0200 Subject: [PATCH 005/231] Initial cleanup (#5) * Renamed vars for readability * small improvements and cleanup * Moving of BUILD files * Moving string_links option * Formatting Preparation commit to move all py_libraries to their own BUILD files. This solution at the moment is WIP as the pre-fix needed is unacceptable. But this will serve as a base to be able to test if further changes work Moved BUILD files to separate folders for better Separation Cleaned up some smaller stuff Conf.py is cleaner again --- docs.bzl | 25 +++++-- process-docs/BUILD | 67 +++++++---------- process-docs/conf.py | 15 +--- pyproject.toml | 14 ++++ src/BUILD | 26 ++++--- src/extensions/BUILD | 75 ++----------------- src/extensions/score_draw_uml_funcs/BUILD | 25 +++++++ src/extensions/score_header_service/BUILD | 35 +++++++++ .../score_header_service/__init__.py | 2 +- .../test/test_header_service.py | 43 ++++++----- .../test/test_header_service_integration.py | 12 +-- src/extensions/score_layout/BUILD | 25 +++++++ src/extensions/score_layout/sphinx_options.py | 2 +- src/extensions/score_metamodel/BUILD | 36 +++++++++ src/extensions/score_metamodel/__init__.py | 20 +++++ src/extensions/score_source_code_linker/BUILD | 44 +++++++++++ .../score_source_code_linker/__init__.py | 12 ++- .../tests/test_requirement_links.py | 2 +- .../tests/test_source_link.py | 2 +- src/incremental.py | 16 ++-- 20 files changed, 321 insertions(+), 177 deletions(-) create mode 100644 src/extensions/score_draw_uml_funcs/BUILD create mode 100644 src/extensions/score_header_service/BUILD create mode 100644 src/extensions/score_layout/BUILD create mode 100644 src/extensions/score_metamodel/BUILD create mode 100644 src/extensions/score_source_code_linker/BUILD diff --git a/docs.bzl b/docs.bzl index 17ee5cfa..923231af 100644 --- a/docs.bzl +++ b/docs.bzl @@ -42,11 +42,16 @@ load("@pip_process//:requirements.bzl", "all_requirements", "requirement") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_python_basics//:defs.bzl", "score_virtualenv") -load("//src/extensions:score_source_code_linker/collect_source_files.bzl", "parse_source_files_for_needs_links") +load("//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") sphinx_requirements = all_requirements + [ "//src:plantuml_for_python", - "//src/extensions:score_extensions", + "//src/extensions:score_plantuml", + "//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", + "//src/extensions/score_header_service:score_header_service", + "//src/extensions/score_layout:score_layout", + "//src/extensions/score_metamodel:score_metamodel", + "//src/extensions/score_source_code_linker:score_source_code_linker", ] def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = []): @@ -56,6 +61,11 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ Current restrictions: * only callable from 'docs/BUILD' """ + sphinx_build_binary( + name = "sphinx_build", + visibility = ["//visibility:public"], + deps = sphinx_requirements, + ) # Parse source files for needs links # This needs to be created to generate a target, otherwise it won't execute as dependency for other macros @@ -64,7 +74,8 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ srcs_and_deps = source_files_to_scan_for_needs_links if source_files_to_scan_for_needs_links else [], ) - # TODO: Explain what this does / how it works? + # We are iterating over all provided 'targets' in order to allow for automatic generation of them without + # needing to modify the underlying 'docs.bzl' file. for target in docs_targets: suffix = "_" + target["suffix"] if target["suffix"] else "" external_needs_deps = target.get("target", []) @@ -105,12 +116,12 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s extra_dependencies: Additional dependencies besides the centrally maintained "sphinx_requirements". """ - dependencies = sphinx_requirements + extra_dependencies + dependencies = sphinx_requirements + extra_dependencies + ["@rules_python//python/runfiles"] py_binary( name = incremental_name, srcs = ["//src:incremental.py"], deps = dependencies, - data = [":score_source_code_parser"] + external_needs_deps, + data = [":score_source_code_parser"] + dependencies, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -144,7 +155,6 @@ def _ide_support(): def _docs(name = "docs", format = "html", external_needs_deps = list(), external_needs_def = dict()): ext_needs_arg = "--define=external_needs_source=" + json.encode(external_needs_def) - #fail(ext_needs_arg) sphinx_docs( name = name, srcs = native.glob([ @@ -157,7 +167,6 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external "**/*.need", # Include the docs src itself # Note: we don't use py_library here to make it as close as possible to docs:incremental. - "**/*.py", "**/*.yaml", "**/*.json", "**/*.csv", @@ -170,7 +179,7 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external formats = [ format, ], - sphinx = "//src:sphinx_build", + sphinx = ":sphinx_build", tags = [ "manual", ], diff --git a/process-docs/BUILD b/process-docs/BUILD index 282f9f38..bb5e4214 100644 --- a/process-docs/BUILD +++ b/process-docs/BUILD @@ -25,49 +25,38 @@ docs( { "suffix": "", # local without external needs }, - - # ╭───────────────────────────────────────╮ - # │ This is commented out until local │ - # │ multi-repo testing is implemented │ - # ╰───────────────────────────────────────╯ - - # { - # "suffix": "release", # The version imported from MODULE.bazel - # "target": ["@score_platform//docs:docs"], - # "external_needs_info": [ - # { - # "base_url": "https://eclipse-score.github.io/score/pr-980/", - # "json_path": "/score_platform~/docs/docs/_build/html/needs.json", - # "version": "0.1", - # }, - # ], - # }, - # { - # "suffix": "latest", # latest main branch documentation build - # "external_needs_info": [ - # { - # "base_url": "https://eclipse-score.github.io/score/main/", - # "json_url": "https://maximiliansoerenpollak.github.io/score/needs.json", - # "version": "0.1", - # }, - # ], - # }, ], source_dir = "process-docs", source_files_to_scan_for_needs_links = [ # Note: you can add filegroups, globs, or entire targets here. - "//src/extensions:score_extensions", - ":score_extensions", - ":score_source_code_linker", + "//src:score_extension_files", + "//src:plantuml_for_python", ], ) -py_library( - name = "score_source_code_linker", - srcs = glob( - ["src/extensions/score_source_code_linker/**/*.py"], - exclude = ["src/extensions/score_source_code_linker/tests/*.py"], - ), - imports = ["src/extensions"], - visibility = ["//visibility:public"], -) +# ╭───────────────────────────────────────╮ +# │ This is commented out until local │ +# │ multi-repo testing is implemented │ +# ╰───────────────────────────────────────╯ + +# { +# "suffix": "release", # The version imported from MODULE.bazel +# "target": ["@score_platform//docs:docs"], +# "external_needs_info": [ +# { +# "base_url": "https://eclipse-score.github.io/score/pr-980/", +# "json_path": "/score_platform~/docs/docs/_build/html/needs.json", +# "version": "0.1", +# }, +# ], +# }, +# { +# "suffix": "latest", # latest main branch documentation build +# "external_needs_info": [ +# { +# "base_url": "https://eclipse-score.github.io/score/main/", +# "json_url": "https://maximiliansoerenpollak.github.io/score/needs.json", +# "version": "0.1", +# }, +# ], +# }, diff --git a/process-docs/conf.py b/process-docs/conf.py index e0267d70..cd7b28b6 100644 --- a/process-docs/conf.py +++ b/process-docs/conf.py @@ -32,6 +32,7 @@ logger = logging.getLogger("process-docs") logger.debug("Loading docs-as-code conf.py") + extensions = [ "sphinx_design", "sphinx_needs", @@ -42,6 +43,7 @@ "score_source_code_linker", "score_layout", ] + logger.debug("After loading extensions") exclude_patterns = [ @@ -58,19 +60,6 @@ # Enable numref numfig = True - -# -- sphinx-needs configuration -------------------------------------------- -# Setting the needs layouts -needs_global_options = {"collapse": True} -needs_string_links = { - "source_code_linker": { - "regex": r"(?P[^,]+)", - "link_url": "{{value}}", - "link_name": "Source Code Link", - "options": ["source_code_link"], - }, -} - # TODO: Fixing this in all builds html_static_path = ["../src/assets"] diff --git a/pyproject.toml b/pyproject.toml index 7c055b77..d7ec00ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,5 +3,19 @@ [tool.pyright] extends = "bazel-bin/process-docs/ide_support.runfiles/score_python_basics~/pyproject.toml" +exclude = [ + "**/__pycache__", + "**/.*", + "**/bazel-*", + "venv/**", +] + [tool.ruff] extend = "bazel-bin/process-docs/ide_support.runfiles/score_python_basics~/pyproject.toml" + +extend-exclude = [ + "**/__pycache__", + "/.*", + "bazel-*", + "venv/**", +] diff --git a/src/BUILD b/src/BUILD index 2c0cc69f..0a313773 100644 --- a/src/BUILD +++ b/src/BUILD @@ -20,24 +20,15 @@ load("@rules_python//python:pip.bzl", "compile_pip_requirements") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary") load("@score_dash_license_checker//:dash.bzl", "dash_license_checker") -sphinx_requirements = all_requirements + [ - ":plantuml_for_python", - "//src/extensions:score_extensions", -] - -sphinx_build_binary( - name = "sphinx_build", - visibility = ["//visibility:public"], - deps = sphinx_requirements, -) - # These are only exported because they're passed as files to the //docs.bzl # macros, and thus must be visible to other packages. They should only be # referenced by the //docs.bzl macros. + exports_files( [ "requirements.txt", "incremental.py", + "dummy.py", ], visibility = ["//visibility:public"], ) @@ -98,6 +89,19 @@ py_library( visibility = ["//visibility:public"], ) +filegroup( + name = "score_extension_files", + srcs = glob( + ["*/**"], + exclude = [ + "**/test/**", + "**/tests/**", + "**/__pycache__/**", + ], + ), + visibility = ["//visibility:public"], +) + # Running this executes the `collect_source_files.bzl` aspect. # Collects all source files from specified targets in 'deps', and makes them available for parsing for the source_code_linker diff --git a/src/extensions/BUILD b/src/extensions/BUILD index 703d9a7a..4529763f 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -15,74 +15,15 @@ load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_python_basics//:defs.bzl", "score_py_pytest", "score_virtualenv") -score_virtualenv( - name = "process_venv", - reqs = all_requirements, - venv_name = ".venv_process", -) - -py_library( - name = "score_extensions", - srcs = glob( - ["**/*.py"], - exclude = ["**/tests/**/*"], - ), - data = glob([ - "score_layout/assets/**", - "score_metamodel/*.yaml", # Needed to remove 'resolving of symlink' in score_metamodel.__init__ - ]), - imports = ["."], - visibility = ["//visibility:public"], - deps = [ - "@rules_python//python/runfiles", - ], -) +# TODO: This probably can be deleted +# score_virtualenv( +# name = "process_venv", +# reqs = all_requirements, +# venv_name = ".venv_process", +# ) -# Dedicated metamodel target only for pytest. -# It's required to define the imports for bazel pytest, so that python/pytest can -# import "from score_metamodel" without issues. py_library( - name = "score_metamodel", - srcs = glob( - ["score_metamodel/**/*.py"], - exclude = ["**/tests/**/*"], - ), - imports = ["extensions"], - visibility = ["//visibility:public"], -) - -score_py_pytest( - name = "score_metamodel_test", - size = "small", - srcs = glob(["score_metamodel/tests/**/*.py"]), - visibility = ["//visibility:public"], - deps = [":score_metamodel"] + all_requirements, -) - -# ───────────────────────── Source code linker ──────────────────────── -# For more information see documentation at score_source_code_linker/README.md -py_library( - name = "score_source_code_linker", - srcs = glob( - ["score_source_code_linker/**/*.py"], - exclude = ["score_source_code_linker/tests/*.py"], - ), - imports = ["."], - visibility = ["//visibility:public"], -) - -score_py_pytest( - name = "score_source_code_linker_test", - size = "small", - srcs = glob(["score_source_code_linker/tests/**/*.py"]), - deps = [ - ":score_source_code_linker", - ] + all_requirements, -) - -# Needed to make the file parser executeable and findable for the source_code_linker aspect -py_binary( - name = "parsed_source_files_for_source_code_linker", - srcs = ["score_source_code_linker/parse_source_files.py"], + name = "score_plantuml", + srcs = ["score_plantuml.py"], visibility = ["//visibility:public"], ) diff --git a/src/extensions/score_draw_uml_funcs/BUILD b/src/extensions/score_draw_uml_funcs/BUILD new file mode 100644 index 00000000..21d8622e --- /dev/null +++ b/src/extensions/score_draw_uml_funcs/BUILD @@ -0,0 +1,25 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") + +py_library( + name = "score_draw_uml_funcs", + srcs = glob( + ["*.py"], + ), + imports = ["."], + visibility = ["//visibility:public"], + # TODO: Figure out if all requirements are needed or if we can break it down a bit + deps = all_requirements, +) diff --git a/src/extensions/score_header_service/BUILD b/src/extensions/score_header_service/BUILD new file mode 100644 index 00000000..c9e136ef --- /dev/null +++ b/src/extensions/score_header_service/BUILD @@ -0,0 +1,35 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_python_basics//:defs.bzl", "score_py_pytest") + +py_library( + name = "score_header_service", + srcs = glob( + ["*.py"], + exclude = ["test/**"], + ), + imports = ["."], + visibility = ["//visibility:public"], + # TODO: Figure out if all requirements are needed or if we can break it down a bit + deps = all_requirements, +) + +score_py_pytest( + name = "score_header_service_test", + size = "small", + srcs = glob(["test/**/*.py"]), + # All requirements already in the library so no need to have it double + deps = [":score_header_service"], +) diff --git a/src/extensions/score_header_service/__init__.py b/src/extensions/score_header_service/__init__.py index 6e5a7b8a..829d5154 100644 --- a/src/extensions/score_header_service/__init__.py +++ b/src/extensions/score_header_service/__init__.py @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* from sphinx.application import Sphinx -from score_header_service.header_service import register +from src.extensions.score_header_service.header_service import register def setup(app: Sphinx) -> dict[str, str | bool]: diff --git a/src/extensions/score_header_service/test/test_header_service.py b/src/extensions/score_header_service/test/test_header_service.py index 478dfa84..3c232642 100644 --- a/src/extensions/score_header_service/test/test_header_service.py +++ b/src/extensions/score_header_service/test/test_header_service.py @@ -15,7 +15,7 @@ from unittest.mock import ANY, MagicMock, patch import pytest -import score_header_service.header_service as hs +import src.extensions.score_header_service.header_service as hs from sphinx.util.docutils import SphinxDirective @@ -40,7 +40,8 @@ def test_register(): mock_env.app = mock_service mock_data.env = mock_env with patch( - "score_header_service.header_service.SphinxNeedsData", return_value=mock_data + "src.extensions.score_header_service.header_service.SphinxNeedsData", + return_value=mock_data, ): hs.register(mock_app, mock_env, None) mock_service.register.assert_called_once_with("header-service", ANY) @@ -52,8 +53,8 @@ def test_generate_hash(randint_mock: MagicMock): assert hs.generate_hash() == "73475cb4" -@patch("score_header_service.header_service.generate_hash") -@patch("score_header_service.header_service._extract_github_data") +@patch("src.extensions.score_header_service.header_service.generate_hash") +@patch("src.extensions.score_header_service.header_service._extract_github_data") @patch("sphinx.application.Sphinx") def test_request_from_directive_github_data( mock_app: MagicMock, @@ -84,8 +85,8 @@ def test_request_from_directive_github_data( ] -@patch("score_header_service.header_service.generate_hash") -@patch("score_header_service.header_service._extract_merge_commit_data") +@patch("src.extensions.score_header_service.header_service.generate_hash") +@patch("src.extensions.score_header_service.header_service._extract_merge_commit_data") @patch("sphinx.application.Sphinx") def test_request_from_directive_commit_data( mock_app: MagicMock, @@ -120,7 +121,9 @@ def test_request_from_directive_commit_data( mock_extract_merge_commit_data.assert_called_once_with("file1.rst") -@patch("score_header_service.header_service.HeaderService.request_from_directive") +@patch( + "src.extensions.score_header_service.header_service.HeaderService.request_from_directive" +) @patch("sphinx.application.Sphinx") def test_debug(mock_app: MagicMock, mock_request_from_directive: MagicMock): debug_data = [{"key": "value"}] @@ -130,7 +133,7 @@ def test_debug(mock_app: MagicMock, mock_request_from_directive: MagicMock): assert header_service.debug(mock_directive) == debug_data -@patch("score_header_service.header_service.subprocess.run") +@patch("src.extensions.score_header_service.header_service.subprocess.run") def test_extract_merge_commit_data(run_mock: MagicMock): lines = """abcdef John Doe @@ -166,7 +169,7 @@ def test_extract_merge_commit_data(run_mock: MagicMock): ) -@patch("score_header_service.header_service.subprocess.run") +@patch("src.extensions.score_header_service.header_service.subprocess.run") def test_extract_merge_commit_data_error(run_mock: MagicMock): result_mock = MagicMock() result_mock.returncode = 1 @@ -180,15 +183,15 @@ def test_extract_merge_commit_data_error(run_mock: MagicMock): } -@patch("score_header_service.header_service._extract_approvers") -@patch("score_header_service.header_service._extract_reviewers") -@patch("score_header_service.header_service._extract_team_info") -@patch("score_header_service.header_service._extract_org") -@patch("score_header_service.header_service._extract_repo") -@patch("score_header_service.header_service._extract_pull_request") -@patch("score_header_service.header_service._extract_github_token") -@patch("score_header_service.header_service.Auth.Token") -@patch("score_header_service.header_service.Github") +@patch("src.extensions.score_header_service.header_service._extract_approvers") +@patch("src.extensions.score_header_service.header_service._extract_reviewers") +@patch("src.extensions.score_header_service.header_service._extract_team_info") +@patch("src.extensions.score_header_service.header_service._extract_org") +@patch("src.extensions.score_header_service.header_service._extract_repo") +@patch("src.extensions.score_header_service.header_service._extract_pull_request") +@patch("src.extensions.score_header_service.header_service._extract_github_token") +@patch("src.extensions.score_header_service.header_service.Auth.Token") +@patch("src.extensions.score_header_service.header_service.Github") def test_extract_github_data( mock_github: MagicMock, mock_auth_token: MagicMock, @@ -229,7 +232,7 @@ def test_extract_github_data( } -@patch("score_header_service.header_service.Auth.Token") +@patch("src.extensions.score_header_service.header_service.Auth.Token") def test_extract_github_data_exception(mock_auth_token: MagicMock): mock_auth_token.side_effect = Exception("Error") assert hs._extract_github_data() == { # type: ignore @@ -283,7 +286,7 @@ def test_extract_team_info(): } -@patch("score_header_service.header_service._append_approver_teams") +@patch("src.extensions.score_header_service.header_service._append_approver_teams") def test_extract_approvers(mock_append_approver_teams: MagicMock): mock_review1 = MagicMock() mock_review2 = MagicMock() diff --git a/src/extensions/score_header_service/test/test_header_service_integration.py b/src/extensions/score_header_service/test/test_header_service_integration.py index 53c213c1..ef7d9f83 100644 --- a/src/extensions/score_header_service/test/test_header_service_integration.py +++ b/src/extensions/score_header_service/test/test_header_service_integration.py @@ -16,7 +16,7 @@ from unittest.mock import MagicMock, patch import pytest -import score_header_service.header_service as hs +import src.extensions.score_header_service.header_service as hs from pytest import TempPathFactory from sphinx.testing.util import SphinxTestApp @@ -82,7 +82,7 @@ def wrapper(use_github_data: bool = True): return f""" extensions = [ "sphinx_needs", - "score_header_service", + "src.extensions.score_header_service", ] needs_types = [ dict(title = "Review Header", directive = "review_header", color="#BFD8D2", style="node", @@ -158,8 +158,8 @@ def template_needs(): """ -@patch("score_header_service.header_service.generate_hash") -@patch("score_header_service.header_service._extract_github_data") +@patch("src.extensions.score_header_service.header_service.generate_hash") +@patch("src.extensions.score_header_service.header_service._extract_github_data") def test_header_service_integration_github_data( mock_extract_github_data: MagicMock, mock_generate_hash: MagicMock, @@ -192,8 +192,8 @@ def test_header_service_integration_github_data( raise AssertionError(f"Build failed: {err}") from err -@patch("score_header_service.header_service.generate_hash") -@patch("score_header_service.header_service._extract_merge_commit_data") +@patch("src.extensions.score_header_service.header_service.generate_hash") +@patch("src.extensions.score_header_service.header_service._extract_merge_commit_data") def test_header_service_integration_commit_data( mock_extract_merge_commit_data: MagicMock, mock_generate_hash: MagicMock, diff --git a/src/extensions/score_layout/BUILD b/src/extensions/score_layout/BUILD new file mode 100644 index 00000000..cbdc4a47 --- /dev/null +++ b/src/extensions/score_layout/BUILD @@ -0,0 +1,25 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") + +py_library( + name = "score_layout", + srcs = glob( + ["*.py"], + ), + imports = ["."], + visibility = ["//visibility:public"], + # TODO: Figure out if all requirements are needed or if we can break it down a bit + deps = all_requirements, +) diff --git a/src/extensions/score_layout/sphinx_options.py b/src/extensions/score_layout/sphinx_options.py index b9f4b74a..663f3104 100644 --- a/src/extensions/score_layout/sphinx_options.py +++ b/src/extensions/score_layout/sphinx_options.py @@ -55,4 +55,4 @@ class SingleLayout(TypedDict): }, } -needs_global_options = {"layout": "score"} +needs_global_options = {"layout": {"default": "score"}} diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD new file mode 100644 index 00000000..9f1009f8 --- /dev/null +++ b/src/extensions/score_metamodel/BUILD @@ -0,0 +1,36 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_python_basics//:defs.bzl", "score_py_pytest") + +py_library( + name = "score_metamodel", + srcs = glob( + ["**/*.py"], + exclude = ["**/tests/**"], + ), + data = glob(["*.yaml"]), # Needed to remove 'resolving of symlink' in score_metamodel.__init__ + imports = [".."], + visibility = ["//visibility:public"], + # TODO: Figure out if all requirements are needed or if we can break it down a bit + deps = all_requirements, +) + +score_py_pytest( + name = "score_metamodel_tests", + size = "small", + srcs = glob(["tests/**/*.py"]), + # All requirements already in the library so no need to have it double + deps = [":score_metamodel"], +) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index e9cb3e02..092c20e4 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -12,9 +12,13 @@ # ******************************************************************************* import importlib import pkgutil +import json +import os + from collections.abc import Callable from pathlib import Path + from ruamel.yaml import YAML from sphinx.application import Sphinx from sphinx_needs import logging @@ -257,7 +261,22 @@ def default_options() -> list[str]: ] +def parse_external_needs_sources(app: Sphinx, config): + # HACK: mabye there is a nicer way for this + if app.config.external_needs_source != "[]": + x = None + x = json.loads(app.config.external_needs_source) + if r := os.getenv("RUNFILES_DIR"): + if x[0].get("json_path", None): + for a in x: + # This is needed to allow for the needs.json to be found locally + if "json_path" in a.keys(): + a["json_path"] = r + a["json_path"] + app.config.needs_external_needs = x + + def setup(app: Sphinx) -> dict[str, str | bool]: + app.add_config_value("external_needs_source", "", rebuild="env") app.config.needs_id_required = True app.config.needs_id_regex = "^[A-Za-z0-9_-]{6,}" @@ -271,6 +290,7 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.graph_checks = metamodel["needs_graph_check"] app.config.stop_words = metamodel["stop_words"] app.config.weak_words = metamodel["weak_words"] + app.connect("config-inited", parse_external_needs_sources) discover_checks() diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD new file mode 100644 index 00000000..2c0402cf --- /dev/null +++ b/src/extensions/score_source_code_linker/BUILD @@ -0,0 +1,44 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_python_basics//:defs.bzl", "score_py_pytest") + +#exports_files(["parse_source_files.py"]) + +py_library( + name = "score_source_code_linker", + srcs = glob( + ["**/*.py"], + exclude = ["tests/*.py"], + ), + imports = ["."], + visibility = ["//visibility:public"], +) + +score_py_pytest( + name = "score_source_code_linker_test", + size = "small", + srcs = glob(["tests/**/*.py"]), + deps = [ + ":score_source_code_linker", + ] + all_requirements, +) + +# Needed to make the file parser executeable and findable for the source_code_linker aspect +py_binary( + name = "parsed_source_files_for_source_code_linker", + srcs = ["parse_source_files.py"], + main = "parse_source_files.py", + visibility = ["//visibility:public"], +) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 7619680c..22e6ac70 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -16,7 +16,7 @@ from pathlib import Path from pprint import pprint -from score_source_code_linker.parse_source_files import GITHUB_BASE_URL +from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment from sphinx_needs.data import SphinxNeedsData @@ -31,6 +31,16 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.add_config_value("disable_source_code_linker", False, rebuild="env") app.add_config_value("score_source_code_linker_file_overwrite", "", rebuild="env") # TODO: can we detect live_preview & esbonio here? Until then we have a flag: + + # Define need_string_links here to not have it in conf.py + app.config.needs_string_links = { + "source_code_linker": { + "regex": r"(?P[^,]+)", + "link_url": "{{value}}", + "link_name": "Source Code Link", + "options": ["source_code_link"], + }, + } if app.config.disable_source_code_linker: LOGGER.info( "INFO: Disabled source code linker. Not loading extension.", diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index 8ecaffbd..43f03f57 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -16,7 +16,7 @@ import pytest from pytest import TempPathFactory -from score_source_code_linker.parse_source_files import ( +from src.extensions.score_source_code_linker.parse_source_files import ( GITHUB_BASE_URL, extract_requirements, get_git_hash, diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index 2b95d705..fd33a542 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -16,7 +16,7 @@ import pytest from pytest import TempPathFactory -from score_source_code_linker.parse_source_files import GITHUB_BASE_URL +from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL from sphinx.testing.util import SphinxTestApp from sphinx_needs.data import SphinxNeedsData diff --git a/src/incremental.py b/src/incremental.py index 1ac6cb17..34e1d868 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -46,20 +46,20 @@ def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str] Transforms the 'string' we get from 'docs.bzl' back into something we can parse easliy inside sphinx/python !! HACK: This truly isn't great !! """ - l_dict = [] - x = [ + transformed_dicts: list[dict[str, str]] = [] + dict_list = [ x.split(",") for x in external_needs_source.replace("]", "") .replace("[", "") .replace("{", "") .split("}") ] - for d in x: - b = [a.split(":", 1) for a in d if len(d) > 1] - l = {a[0]: a[1] for a in b} - if l: - l_dict.append(l) - return l_dict + for inner_dict in dict_list: + kv_splits = [kv.split(":", 1) for kv in inner_dict if len(inner_dict) > 1] + single_dict = {key_value[0]: key_value[1] for key_value in kv_splits} + if single_dict: + transformed_dicts.append(single_dict) + return transformed_dicts if __name__ == "__main__": From 3d129d442c79923d1341a08a4945e922a4ba8a12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Sat, 3 May 2025 23:43:22 +0200 Subject: [PATCH 006/231] add README (#6) --- README.md | 164 +++++++++++++++++++++++- src/extensions/score_layout/__init__.py | 3 +- 2 files changed, 164 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 05a8a446..23479dad 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,162 @@ -# docs-as-code -Contains docs-as-code tooling +# Bazel Sphinx Documentation Builder + +A Bazel module providing comprehensive tools and extensions for building Sphinx documentation within Bazel projects. + +## Overview + +This module allows you to easily integrate Sphinx documentation generation into your Bazel build system. It provides a collection of utilities, extensions, and themes specifically designed to enhance documentation capabilities while maintaining Bazel's reproducible build environment. + +## Features + +- Seamless integration with Bazel build system +- Custom Sphinx extensions for enhanced documentation +- Support for PlantUML diagrams +- Source code linking capabilities +- Metamodel validation and checks +- Custom layouts and themes +- Header service for consistent documentation styling + +## Getting Started + +### Installation + +Add the module to your `MODULE.bazel` file: + +```starlark +bazel_dep(name = "docs-as-code", version = "0.1.0") +``` + +And make sure to also add the S-core bazel registry to your `.bazelrc` file + +```starlark +common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ +common --registry=https://bcr.bazel.build +``` + +______________________________________________________________________ + +### Basic Usage + +#### 1. Import the `docs()` macro in your BUILD file: + +```python +load("@docs-as-code//docs.bzl", "docs") + +docs( + conf_dir = "", + source_dir = "", + docs_targets = [ + { + # For more detailed explenation look at the 'docs_targets' section + "suffix": "", # This creates the normal 'incremental' and 'docs' target + }, + ], + source_files_to_scan_for_needs_links = [ + # Note: you can add filegroups, globs, or entire targets here. + "" + ], +) +``` + +#### 2. Adapt your conf.py if needed + +```python +# ... +extensions = [ + "sphinx_design", + "sphinx_needs", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] +# ... +``` + +Make sure that your conf.py imports all of the extensions you want to enable.\ +For a full example look at [This repos conf.py](process-docs/conf.py) + +#### 3. Run a documentation build: + +```bash +bazel run //path/to/BUILD-file:incremental # documentation at '_build/' +bazel build //path/to/BUILD-file:docs # documentation at 'bazel-bin/ +``` + +#### 4. Access your documentation at + +- `_build/` for incremental +- `bazel-bin/bazel-bin//docs/_build/html` + +______________________________________________________________________ + +### Available Targets + +Using the `docs` macro enables multiple targets which are now useable. + +| Target Name | What it does | How to execute | +|---------------|-----------------------------------------------------------|-----------------| +| docs | Builds documentation in sandbox | `bazel build` | +| incremental | Builds documentation incrementally (faster) | `bazel run` | +| live_preview | Creates a live_preview of the documentation viewable in a local server | `bazel run` | +| ide_support | Creates virtual environment under '.venv_docs' | `bazel run` | + +______________________________________________________________________ + +## Configuration Options + +The `docs()` macro accepts the following arguments: + +| Parameter | Description | Required | Default | +|-----------|-------------|----------|---------| +| `conf_dir` | Path to the 'conf.py' containing folder | No | 'docs' | +| `source_dir` | Documentation source files (RST, MD) | No | 'docs' | +| `build_dir_for_incremental` | Output folder for the incremental build | No | '\_build' | +| `docs_targets` | List of dictionaries which allows multi-repo setup | Yes | - | +| `source_files_to_scan_for_needs_links` | List of targets,globs,filegroups that the 'source_code_linker' should parse | No | `[]` | +| `visibility` | Bazel visibility | No | `None` | + +## Advanced Usage + +### Custom Configuration + +#### Docs-targets + +!! TODO !! +This should be filled out after the local mutli-repo tests are integrated and we have examples of different configurations + +## Available Extensions + +This module includes several custom Sphinx extensions to enhance your documentation: + +### Score Layout Extension + +Custom layout options for Sphinx HTML output. +[Learn more](src/extensions/score_layout/README.md) + +### Score Header Service + +Consistent header styling across documentation pages. +[Learn more](src/extensions/score_header_service/README.md) + +### Score Metamodel + +Validation and checking of documentation structure against a defined metamodel. +[Learn more](src/extensions/score_metamodel/README.md) + +### Score Source Code Linker + +Links between requirements documentation and source code implementations. +[Learn more](src/extensions/score_source_code_linker/README.md) + +### Score PlantUML + +Integration with PlantUML for generating diagrams. +[Learn more](src/extensions/README.md) + +### Score Draw UML Functions + +Helper functions for creating UML diagrams. +[Learn more](src/extensions/score_draw_uml_funcs/README.md) diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index a287dbb0..240a6b45 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -14,7 +14,8 @@ from sphinx.application import Sphinx -from score_layout import html_options, sphinx_options +import html_options +import sphinx_options def setup(app: Sphinx) -> dict[str, str | bool]: From 312dd89b7b63bc91335181dc7be5f2b01dd2aa7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 8 May 2025 12:46:17 +0200 Subject: [PATCH 007/231] Make docs-as-code useable (#7) Plucking 'docs/_tooling' from 'eclipse-score/score' to enable it as standalone 'Module'. * Adding commits from 'score' Some changes have been made by 'mmr1909'. The original/referenced commit is this one: https://github.com/eclipse-score/score/commit/8e8646b3ccb6a1ce92d691f7457900d7e3b999bd Adding commits from 'score' regarding the 'docs/_tooling' folder as is. The original commits were by: hoe-jo The commits can be found here: https://github.com/eclipse-score/score/commit/233c03d42fa2224eeb7a39e17312a0269eb3b32b https://github.com/eclipse-score/score/commit/5d1200e1bedf0bea984ef589e79237fd47f8e3da --- MODULE.bazel | 4 +- docs.bzl | 35 +- process-docs/BUILD | 2 +- process-docs/conf.py | 2 - src/BUILD | 61 ++-- src/__init__.py | 0 src/extensions/BUILD | 3 +- .../score_draw_uml_funcs/__init__.py | 342 +++++++++--------- .../score_draw_uml_funcs/helpers.py | 22 +- src/extensions/score_layout/__init__.py | 16 +- src/extensions/score_metamodel/BUILD | 8 +- src/extensions/score_metamodel/__init__.py | 2 +- .../score_metamodel/checks/check_options.py | 12 +- .../score_metamodel/checks/traceability.py | 108 ------ src/extensions/score_metamodel/metamodel.yaml | 71 ++-- .../rst/options/test_options_options.rst | 37 +- .../tests/test_attributes_format.py | 196 ---------- .../tests/test_check_options.py | 241 +++--------- .../tests/test_id_contains_feature.py | 39 -- .../tests/test_rules_file_based.py | 2 + .../tests/test_traceability.py | 229 ------------ src/extensions/score_plantuml.py | 18 +- src/find_runfiles/BUILD | 31 ++ src/incremental.py | 4 - 24 files changed, 465 insertions(+), 1020 deletions(-) create mode 100644 src/__init__.py delete mode 100644 src/extensions/score_metamodel/checks/traceability.py delete mode 100644 src/extensions/score_metamodel/tests/test_attributes_format.py delete mode 100644 src/extensions/score_metamodel/tests/test_id_contains_feature.py delete mode 100644 src/extensions/score_metamodel/tests/test_traceability.py create mode 100644 src/find_runfiles/BUILD diff --git a/MODULE.bazel b/MODULE.bazel index cbe4a5fd..c60b2d70 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -89,7 +89,7 @@ http_file( bazel_dep(name = "score_python_basics", version = "0.3.0") # Checker rule for CopyRight checks/fixes -bazel_dep(name = "score_cr_checker", version = "0.2.0", dev_dependency = True) +bazel_dep(name = "score_cr_checker", version = "0.2.2") # Grab dash -bazel_dep(name = "score_dash_license_checker", version = "0.1.1", dev_dependency = True) +bazel_dep(name = "score_dash_license_checker", version = "0.1.1") diff --git a/docs.bzl b/docs.bzl index 923231af..b5926760 100644 --- a/docs.bzl +++ b/docs.bzl @@ -37,21 +37,23 @@ # # For user-facing documentation, refer to `/README.md`. -load("@aspect_rules_py//py:defs.bzl", "py_binary") +load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") +load("@docs-as-code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") load("@pip_process//:requirements.bzl", "all_requirements", "requirement") +load("@rules_java//java:java_binary.bzl", "java_binary") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_python_basics//:defs.bzl", "score_virtualenv") -load("//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") sphinx_requirements = all_requirements + [ - "//src:plantuml_for_python", - "//src/extensions:score_plantuml", - "//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", - "//src/extensions/score_header_service:score_header_service", - "//src/extensions/score_layout:score_layout", - "//src/extensions/score_metamodel:score_metamodel", - "//src/extensions/score_source_code_linker:score_source_code_linker", + "@docs-as-code//src:plantuml_for_python", + "@docs-as-code//src/extensions:score_plantuml", + "@docs-as-code//src/find_runfiles:find_runfiles", + "@docs-as-code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", + "@docs-as-code//src/extensions/score_header_service:score_header_service", + "@docs-as-code//src/extensions/score_layout:score_layout", + "@docs-as-code//src/extensions/score_metamodel:score_metamodel", + "@docs-as-code//src/extensions/score_source_code_linker:score_source_code_linker", ] def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = []): @@ -61,9 +63,11 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ Current restrictions: * only callable from 'docs/BUILD' """ + sphinx_build_binary( name = "sphinx_build", visibility = ["//visibility:public"], + data = ["@docs-as-code//src:docs_assets", "@docs-as-code//src:score_extension_files"], deps = sphinx_requirements, ) @@ -117,11 +121,13 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s """ dependencies = sphinx_requirements + extra_dependencies + ["@rules_python//python/runfiles"] + py_binary( name = incremental_name, - srcs = ["//src:incremental.py"], + srcs = ["@docs-as-code//src:incremental.py"], deps = dependencies, - data = [":score_source_code_parser"] + dependencies, + # TODO: Figure out if we need all dependencies as data here or not. + data = [":score_source_code_parser", "@docs-as-code//src:plantuml", "@docs-as-code//src:docs_assets"] + dependencies, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -133,9 +139,9 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s py_binary( name = live_name, - srcs = ["//src:incremental.py"], + srcs = ["@docs-as-code//src:incremental.py"], deps = dependencies, - data = external_needs_deps, + data = ["@docs-as-code//src:plantuml", "@docs-as-code//src:docs_assets"] + dependencies, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -185,7 +191,8 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external ], tools = [ ":score_source_code_parser", - "//src:plantuml", + "@docs-as-code//src:plantuml", + "@docs-as-code//src:docs_assets", ] + external_needs_deps, visibility = ["//visibility:public"], ) diff --git a/process-docs/BUILD b/process-docs/BUILD index bb5e4214..3dfd25f8 100644 --- a/process-docs/BUILD +++ b/process-docs/BUILD @@ -19,6 +19,7 @@ load("//:docs.bzl", "docs") # - `docs:live_preview` for live preview in the browser without an IDE # - `docs:ide_support` for creating python virtualenv for IDE support # - `docs:docs` for building documentation at build-time + docs( conf_dir = "process-docs", docs_targets = [ @@ -30,7 +31,6 @@ docs( source_files_to_scan_for_needs_links = [ # Note: you can add filegroups, globs, or entire targets here. "//src:score_extension_files", - "//src:plantuml_for_python", ], ) diff --git a/process-docs/conf.py b/process-docs/conf.py index cd7b28b6..9d52ec80 100644 --- a/process-docs/conf.py +++ b/process-docs/conf.py @@ -60,7 +60,5 @@ # Enable numref numfig = True -# TODO: Fixing this in all builds -html_static_path = ["../src/assets"] logger.debug("After loading S-CORE conf.py") diff --git a/src/BUILD b/src/BUILD index 0a313773..3b5e8907 100644 --- a/src/BUILD +++ b/src/BUILD @@ -33,6 +33,27 @@ exports_files( visibility = ["//visibility:public"], ) +java_binary( + name = "plantuml", + jvm_flags = ["-Djava.awt.headless=true"], + main_class = "net.sourceforge.plantuml.Run", + visibility = ["//visibility:public"], + runtime_deps = [ + "@plantuml//jar", + ], +) + +# This makes it possible for py_venv to depend on plantuml. +# Note: py_venv can only depend on py_library. +# TODO: This can be removed with the next +# upgrade of `aspect_rules_py` since the py_venv rule now supports a data field +py_library( + name = "plantuml_for_python", + srcs = ["@docs-as-code//src:dummy.py"], + data = ["@docs-as-code//src:plantuml"], + visibility = ["//visibility:public"], +) + # In order to update the requirements, change the `requirements.txt` file and run: # `bazel run //docs:requirements`. # This will update the `requirements_lock.txt` file. @@ -68,27 +89,6 @@ pkg_tar( srcs = [":html_files"], ) -java_binary( - name = "plantuml", - jvm_flags = ["-Djava.awt.headless=true"], - main_class = "net.sourceforge.plantuml.Run", - visibility = ["//visibility:public"], - runtime_deps = [ - "@plantuml//jar", - ], -) - -# This makes it possible for py_venv to depend on plantuml. -# Note: py_venv can only depend on py_library. -# TODO: This can be removed with the next -# upgrade of `aspect_rules_py` since the py_venv rule now supports a data field -py_library( - name = "plantuml_for_python", - srcs = ["dummy.py"], - data = [":plantuml"], - visibility = ["//visibility:public"], -) - filegroup( name = "score_extension_files", srcs = glob( @@ -102,6 +102,14 @@ filegroup( visibility = ["//visibility:public"], ) +filegroup( + name = "docs_assets", + srcs = glob([ + "assets/**/*", + ]), + visibility = ["//visibility:public"], +) + # Running this executes the `collect_source_files.bzl` aspect. # Collects all source files from specified targets in 'deps', and makes them available for parsing for the source_code_linker @@ -114,6 +122,17 @@ filegroup( visibility = ["//visibility:public"], ) +# Needed for 'test_rules_file_based' +filegroup( + name = "test_rst_files", + srcs = glob([ + "extensions/**/*.rst", + "extensions/**/*.py", + "conf.py", + ]), + visibility = ["//visibility:public"], +) + dash_license_checker( src = ":requirements_lock", file_type = "requirements", # let it auto-detect based on project_config diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/extensions/BUILD b/src/extensions/BUILD index 4529763f..0cb13c76 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -24,6 +24,7 @@ load("@score_python_basics//:defs.bzl", "score_py_pytest", "score_virtualenv") py_library( name = "score_plantuml", - srcs = ["score_plantuml.py"], + srcs = ["@docs-as-code//src/extensions:score_plantuml.py"], + imports = ["."], visibility = ["//visibility:public"], ) diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index d7c8774f..27df4a31 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -42,10 +42,11 @@ gen_struct_element, get_alias, get_hierarchy_text, - get_impl_comp_from_real_iface, + get_impl_comp_from_logic_iface, get_interface_from_component, get_interface_from_int, get_logical_interface_real, + get_module, get_real_interface_logical, ) @@ -90,41 +91,43 @@ def draw_comp_incl_impl_int( all_needs: dict[str, dict[str, str]], proc_impl_interfaces: dict[str, str], proc_used_interfaces: dict[str, list[str]], + white_box_view: bool = False, ) -> tuple[str, str, dict[str, str], dict[str, list[str]]]: """This function draws a component including any interfaces which are implemented by the component :param dict[str,str] need: Component which should be drawn :param dict all_needs: Dictionary containing all needs - :param dict[str,dict] proc_impl_interfaces: Dictionary containing all implemented - interfaces which were already processed during this cycle - :param dict[str,dict] proc_used_interfaces: Dictionary containing - all used interfaces which were already processed during this cycle + :param dict[str,dict] proc_impl_interfaces: Dictionary containing all implemented interfaces + which were already processed during this cycle + :param dict[str,dict] proc_used_interfaces: Dictionary containing all used interfaces + which were already processed during this cycle """ # Draw outer component structure_text = f"{gen_struct_element('component', need)} {{\n" linkage_text = "" - # Draw inner (sub)components recursively - for need_inc in need.get("includes", []): - curr_need = all_needs.get(need_inc, {}) + # Draw inner (sub)components recursively if requested + if white_box_view: + for need_inc in need.get("includes", []): + curr_need = all_needs.get(need_inc, {}) - # check for misspelled include - if not curr_need: - logger.info(f"{need}: include {need_inc} could not be found") - continue + # check for misspelled include + if not curr_need: + logger.info(f"{need}: include {need_inc} could not be found") + continue - if curr_need["type"] != "comp_arc_sta": - continue + if curr_need["type"] != "comp_arc_sta": + continue - sub_structure, sub_linkage, proc_impl_interfaces, proc_used_interfaces = ( - draw_comp_incl_impl_int( - curr_need, all_needs, proc_impl_interfaces, proc_used_interfaces + sub_structure, sub_linkage, proc_impl_interfaces, proc_used_interfaces = ( + draw_comp_incl_impl_int( + curr_need, all_needs, proc_impl_interfaces, proc_used_interfaces + ) ) - ) - structure_text += sub_structure - linkage_text += sub_linkage + structure_text += sub_structure + linkage_text += sub_linkage # close outer component structure_text += f"}} /' {need['title']} '/ \n\n" @@ -142,7 +145,6 @@ def draw_comp_incl_impl_int( continue if not proc_impl_interfaces.get(iface, []): - structure_text += gen_interface_element(iface, all_needs, True) linkage_text += f"{ gen_link_text( need, @@ -168,10 +170,36 @@ def draw_comp_incl_impl_int( return structure_text, linkage_text, proc_impl_interfaces, proc_used_interfaces +def draw_impl_interface( + need: dict[str, str], + all_needs: dict[str, dict[str, str]], + local_impl_interfaces: set[str], +) -> set[str]: + # At First Logical Implemented Interfaces outside the Module + for need_inc in need.get("includes", []): + curr_need = all_needs.get(need_inc, {}) + + # check for misspelled include + if not curr_need: + logger.info(f"{need}: include with id {need_inc} could not be found") + continue + + draw_impl_interface(curr_need, all_needs, local_impl_interfaces) + + # Find implemented logical interface of the components inside the module + local_impl_interfaces.update( + get_interface_from_component(need, "implements", all_needs) + ) + + return local_impl_interfaces + + def draw_module( need: dict[str, str], all_needs: dict[str, dict[str, str]], -) -> tuple[str, str]: + proc_impl_interfaces: dict[str, str], + proc_used_interfaces: dict[str, list[str]], +) -> tuple[str, str, dict[str, str], dict[str, list[str]]]: """ Drawing and parsing function of a component. @@ -224,131 +252,85 @@ def draw_module( (Structure Text, Linkage Text, Processed (Real Interfaces), Processed Logical Interfaces) """ - # Store all Elements which have already been processed - proc_impl_interfaces: dict[str, str] = dict() - proc_used_interfaces: dict[str, list[str]] = dict() - proc_logical_interfaces: dict[str, str] = dict() - - structure_text = f"{gen_struct_element('package', need)} {{\n" linkage_text = "" + structure_text = "" - structure_text, linkage_text, proc_impl_interfaces, proc_used_interfaces = ( - process_included_components( - need, - all_needs, - structure_text, - linkage_text, - proc_impl_interfaces, - proc_used_interfaces, - ) - ) - structure_text += f"}} /' {need['title']} '/ \n\n" + # Draw all implemented interfaces outside the boxes + local_impl_interfaces = draw_impl_interface(need, all_needs, set()) - structure_text, linkage_text, proc_logical_interfaces = add_logical_interfaces( - proc_impl_interfaces, - proc_logical_interfaces, - all_needs, - structure_text, - linkage_text, - ) - structure_text, linkage_text = add_used_interfaces( - proc_used_interfaces, - proc_impl_interfaces, - all_needs, - structure_text, - linkage_text, - ) - linkage_text = "\n".join(set(linkage_text.split("\n"))) + "\n" + # Add all interfaces which are implemented by component to global list + # and provide implementation + for iface in local_impl_interfaces: + # check for misspelled implements + if not all_needs.get(iface, []): + logger.info(f"{need}: implements {iface} could not be found") + continue - return structure_text, linkage_text + if not proc_impl_interfaces.get(iface, []): + structure_text += gen_interface_element(iface, all_needs, True) + # Draw outer module + structure_text += f"{gen_struct_element('package', need)} {{\n" -def process_included_components( - need: dict[str, str], - all_needs: dict[str, dict[str, str]], - structure_text: str, - linkage_text: str, - proc_impl_interfaces: dict[str, str], - proc_used_interfaces: dict[str, list[str]], -) -> tuple[str, str, dict[str, str], dict[str, list[str]]]: + # Draw inner components recursively for need_inc in need.get("includes", []): curr_need = all_needs.get(need_inc, {}) + + # check for misspelled include if not curr_need: logger.info(f"{need}: include with id {need_inc} could not be found") continue + if curr_need["type"] not in ["comp_arc_sta", "mod_view_sta"]: continue + sub_structure, sub_linkage, proc_impl_interfaces, proc_used_interfaces = ( draw_comp_incl_impl_int( - curr_need, - all_needs, - proc_impl_interfaces, - proc_used_interfaces, + curr_need, all_needs, proc_impl_interfaces, proc_used_interfaces ) ) + structure_text += sub_structure linkage_text += sub_linkage - return structure_text, linkage_text, proc_impl_interfaces, proc_used_interfaces - - -def add_logical_interfaces( - proc_impl_interfaces: dict[str, str], - proc_logical_interfaces: dict[str, str], - all_needs: dict[str, dict[str, str]], - structure_text: str, - linkage_text: str, -) -> tuple[str, str, dict[str, str]]: - for iface in proc_impl_interfaces: - if not proc_logical_interfaces.get(iface, []): - logical_iface_tmp = get_logical_interface_real(iface, all_needs) - if len(logical_iface_tmp) > 1: - logger.warning( - f"{logical_iface_tmp}: only one logical interface per real " - "interface supported" - ) - if logical_iface_tmp: - logical_iface = logical_iface_tmp[0] - proc_logical_interfaces[logical_iface] = iface - structure_text += gen_interface_element(logical_iface, all_needs, True) - linkage_text += f"{ - gen_link_text( - all_needs[iface], '-u->', all_needs[logical_iface], 'implements' - ) - }\n" - else: - print(f"{iface}: Not connected to any virtual interface") - return structure_text, linkage_text, proc_logical_interfaces - + # close outer component + structure_text += f"}} /' {need['title']} '/ \n\n" -def add_used_interfaces( - proc_used_interfaces: dict[str, list[str]], - proc_impl_interfaces: dict[str, str], - all_needs: dict[str, dict[str, str]], - structure_text: str, - linkage_text: str, -) -> tuple[str, str]: + # Add all interfaces which are used by component for iface, comps in proc_used_interfaces.items(): if iface not in proc_impl_interfaces: - impl_comp_str = get_impl_comp_from_real_iface(iface, all_needs) + # Add implementing components and modules + impl_comp_str = get_impl_comp_from_logic_iface(iface, all_needs) + impl_comp = all_needs.get(impl_comp_str[0], {}) if impl_comp_str else "" if impl_comp: retval = get_hierarchy_text(impl_comp_str[0], all_needs) - structure_text += retval[2] + retval[0] + retval[1] + retval[3] - structure_text += gen_interface_element(iface, all_needs, True) - linkage_text += f"{ - gen_link_text(impl_comp, '-u->', all_needs[iface], 'implements') - } \n" + structure_text += retval[2] # module open + structure_text += retval[0] # rest open + + structure_text += retval[1] # rest close + structure_text += retval[3] # module close + if iface not in local_impl_interfaces: + structure_text += gen_interface_element(iface, all_needs, True) + + # Draw connection between implementing components and interface + linkage_text += f"{gen_link_text(impl_comp, '-u->', all_needs[iface], 'implements')} \n" + else: + # Add only interface if component not defined print(f"{iface}: No implementing component defined") structure_text += gen_interface_element(iface, all_needs, True) + # Interface can be used by multiple components for comp in comps: - linkage_text += f"{ - gen_link_text(all_needs[comp], '-d[#green]->', all_needs[iface], 'uses') - } \n" - return structure_text, linkage_text + # Draw connection between used interfaces and components + linkage_text += f"{gen_link_text(all_needs[comp], '-d[#green]->', all_needs[iface], 'uses')} \n" + + # Remove duplicate links + linkage_text = "\n".join(set(linkage_text.split("\n"))) + "\n" + + return structure_text, linkage_text, proc_impl_interfaces, proc_used_interfaces # ╭──────────────────────────────────────────────────────────────────────────────╮ @@ -363,72 +345,90 @@ def __repr__(self): def __call__( self, need: dict[str, str], all_needs: dict[str, dict[str, str]] ) -> str: - interfacelist = self._get_interface_list(need, all_needs) - structure_text, impl_comp = self._generate_structure_and_components( - interfacelist, all_needs + interfacelist: list[str] = [] + impl_comp: dict[str, str] = dict() + # Store all Elements which have already been processed + proc_impl_interfaces: dict[str, str] = dict() + proc_used_interfaces: dict[str, list[str]] = dict() + proc_modules: list[str] = list() + + link_text = "" + structure_text = ( + f'actor "Feature User" as {get_alias({"id": "Feature_User"})} \n' ) - link_text = self._generate_links(interfacelist, impl_comp, all_needs, need) - return gen_header() + structure_text + link_text - def _get_interface_list( - self, need: dict[str, str], all_needs: dict[str, dict[str, str]] - ) -> list[str]: - interfacelist: list[str] = [] + # Define Feature as a package + # structure_text += f"{gen_struct_element('package', need)} {{\n" + + # Add logical Interfaces / Interface Operations (aka includes) for need_inc in need.get("includes", []): + # Generate list of interfaces since both interfaces + # and interface operations can be included iface = get_interface_from_int(need_inc, all_needs) if iface not in interfacelist: interfacelist.append(iface) - return interfacelist - - def _generate_structure_and_components( - self, - interfacelist: list[str], - all_needs: dict[str, dict[str, str]], - ) -> tuple[str, dict[str, str]]: - structure_text = ( - f'actor "Feature User" as {get_alias({"id": "Feature_User"})} \n' - ) - impl_comp: dict[str, str] = {} for iface in interfacelist: - if all_needs.get(iface): - structure_text += gen_interface_element(iface, all_needs, True) - real_iface = get_real_interface_logical(iface, all_needs) - if real_iface: - comps = get_impl_comp_from_real_iface(real_iface[0], all_needs) + if iface_need := all_needs.get(iface): + if iface: + comps = get_impl_comp_from_logic_iface(iface, all_needs) + if comps: impl_comp[iface] = comps[0] - if im := impl_comp.get(iface): - structure_text += ( - f"{gen_struct_element('component', all_needs[im])}\n" - ) + + if imcomp := impl_comp.get(iface, {}): + module = get_module(imcomp, all_needs) + + if module not in proc_modules: + tmp, link_text, proc_impl_interfaces, proc_used_interfaces = ( + draw_module( + all_needs[module], + all_needs, + proc_impl_interfaces, + proc_used_interfaces, + ) + ) + structure_text += tmp + proc_modules.append(module) + else: - logger.info(f"Interface {iface} could not be found") - return structure_text, impl_comp - - def _generate_links( - self, - interfacelist: list[str], - impl_comp: dict[str, str], - all_needs: dict[str, dict[str, str]], - need: dict[str, str], - ) -> str: - link_text = "" + logger.info(f"{need}: Interface {iface} could not be found") + continue + + # Close Package + # structure_text += f"}} /' {need['title']} '/ \n\n" + for iface in interfacelist: if imcomp := impl_comp.get(iface): + # Add relation between Actor and Interfaces link_text += f"{ gen_link_text( {'id': 'Feature_User'}, '-d->', all_needs[iface], 'use' ) } \n" - link_text += f"{ - gen_link_text( - all_needs[imcomp], '-u->', all_needs[iface], 'implements' + + # Add relation between interface and component + if imcomp := impl_comp.get(iface): + link_text += f"{ + gen_link_text( + all_needs[imcomp], + '-u->', + all_needs[iface], + 'implements', + ) + } \n" + else: + logger.info( + f"Interface {iface} is not implemented by any component" ) - } \n" else: logger.info(f"{need}: Interface {iface} could not be found") - return link_text + continue + + # Remove duplicate links + link_text = "\n".join(set(link_text.split("\n"))) + "\n" + + return gen_header() + structure_text + link_text class draw_full_module: @@ -438,7 +438,12 @@ def __repr__(self): def __call__( self, need: dict[str, str], all_needs: dict[str, dict[str, str]] ) -> str: - structure_text, linkage_text = draw_module(need, all_needs) + # Store all Elements which have already been processed + proc_impl_interfaces: dict[str, str] = dict() + proc_used_interfaces: dict[str, list[str]] = dict() + structure_text, linkage_text, proc_impl_interfaces, proc_used_interfaces = ( + draw_module(need, all_needs, proc_impl_interfaces, proc_used_interfaces) + ) return gen_header() + structure_text + linkage_text @@ -451,9 +456,22 @@ def __call__( self, need: dict[str, str], all_needs: dict[str, dict[str, str]] ) -> str: structure_text, linkage_text, _, _ = draw_comp_incl_impl_int( - need, all_needs, dict(), dict() + need, all_needs, dict(), dict(), True ) + # Draw all implemented interfaces outside the boxes + local_impl_interfaces = draw_impl_interface(need, all_needs, set()) + + # Add all interfaces which are implemented by component to global list + # and provide implementation + for iface in local_impl_interfaces: + # check for misspelled implements + if not all_needs.get(iface, []): + logger.info(f"{need}: implements {iface} could not be found") + continue + + structure_text += gen_interface_element(iface, all_needs, True) + return gen_header() + structure_text + linkage_text diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index c2e3918e..ef59ebbe 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -31,7 +31,7 @@ def gen_format(need: dict[str, str]) -> str: if "comp_arc_sta" in need["type"] and need["safety"] == "ASIL_B": style = "<>" - if "comp_arc_int" in need["type"]: + if "real_arc_int" in need["type"]: style = "" if need["language"] == "rust" else "" return style @@ -122,6 +122,20 @@ def get_need_link(need: dict[str, str]) -> str: return f"[[{link}]]" +def get_module(component: str, all_needs: dict[str, dict[str, str]]) -> str: + need = all_needs.get(component, {}) + + if need: + module = need.get("includes_back", "") + + if module: + return module[0] + else: + logger.warning(f"{component}: not defined, misspelled?") + + return "" + + def get_hierarchy_text( component: str, all_needs: dict[str, dict[str, str]] ) -> tuple[str, str, str, str]: @@ -272,12 +286,12 @@ def get_logical_interface_real( return logical_ifaces -def get_impl_comp_from_real_iface( +def get_impl_comp_from_logic_iface( real_iface: str, all_needs: dict[str, dict[str, str]] ) -> list[str]: """Get implementing component of the interface""" - value = all_needs[real_iface].get("implements_back", []) - implcomp = value if isinstance(value, list) else [] + implcomp: list[str] = all_needs[real_iface].get("implements_back", []) + if not implcomp: logger.info( f"{all_needs[real_iface]['id']}: Implementing Component not specified!" diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 240a6b45..4ebc685f 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -13,7 +13,8 @@ from typing import Any from sphinx.application import Sphinx - +import os +from pathlib import Path import html_options import sphinx_options @@ -34,6 +35,19 @@ def update_config(app: Sphinx, _config: Any): app.config.html_context = html_options.html_context app.config.html_theme_options = html_options.return_html_theme_options(app) + # Setting HTML static path + # For now this seems the only place this is used / needed. + # In the future it might be a good idea to make this available in other places, maybe via the 'find_runfiles' lib + if r := os.getenv("RUNFILES_DIR"): + dirs = [str(x) for x in Path(r).glob("*docs-as-code~")] + if dirs: + # Happens if 'docs-as-code' is used as Module + p = str(r) + "/docs-as-code~/src/assets" + else: + # Only happens in 'docs-as-code' repository + p = str(r) + "/_main/src/assets" + app.config.html_static_path = app.config.html_static_path + [p] + app.add_css_file("css/score.css", priority=500) app.add_css_file("css/score_needs.css", priority=500) app.add_css_file("css/score_design.css", priority=500) diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index 9f1009f8..f180e555 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -18,10 +18,11 @@ py_library( name = "score_metamodel", srcs = glob( ["**/*.py"], - exclude = ["**/tests/**"], ), data = glob(["*.yaml"]), # Needed to remove 'resolving of symlink' in score_metamodel.__init__ - imports = [".."], + imports = [ + ".", + ], visibility = ["//visibility:public"], # TODO: Figure out if all requirements are needed or if we can break it down a bit deps = all_requirements, @@ -30,7 +31,8 @@ py_library( score_py_pytest( name = "score_metamodel_tests", size = "small", - srcs = glob(["tests/**/*.py"]), + srcs = glob(["tests/*.py"]), # All requirements already in the library so no need to have it double + data = ["//src:test_rst_files"] + glob(["**/*.rst"]), deps = [":score_metamodel"], ) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 092c20e4..70519d80 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -263,7 +263,7 @@ def default_options() -> list[str]: def parse_external_needs_sources(app: Sphinx, config): # HACK: mabye there is a nicer way for this - if app.config.external_needs_source != "[]": + if app.config.external_needs_source not in ["[]", ""]: x = None x = json.loads(app.config.external_needs_source) if r := os.getenv("RUNFILES_DIR"): diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index fa7b898b..ea1d0d9e 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -70,9 +70,16 @@ def validate_fields( values = [str(raw_value)] for value in values: - if not re.match(pattern, value): + try: + if not re.match(pattern, value): + log.warning_for_option( + need, field, f"does not follow pattern `{pattern}`." + ) + except TypeError: log.warning_for_option( - need, field, f"does not follow pattern `{pattern}`." + need, + field, + f"pattern `{pattern}` is not a valid regex pattern.", ) @@ -102,6 +109,7 @@ def check_options( if not need_options.get("mandatory_options", {}): log.warning_for_option(need, "type", "no type info defined for semantic check.") + return # Validate Options and Links checking_dict: CheckingDictType = { diff --git a/src/extensions/score_metamodel/checks/traceability.py b/src/extensions/score_metamodel/checks/traceability.py deleted file mode 100644 index 1db8ac32..00000000 --- a/src/extensions/score_metamodel/checks/traceability.py +++ /dev/null @@ -1,108 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -# ╭──────────────────────────────────────╮ -# │ CHECKS DISABLED ON PURPOSE │ -# ╰──────────────────────────────────────╯ - - -# from sphinx.application import Sphinx -# from sphinx_needs.data import NeedsInfoType -# -# from score_metamodel import CheckLogger, graph_check -# -# -# @graph_check -# def check_linkage_parent(app: Sphinx, needs: list[NeedsInfoType], log: CheckLogger): -# """ -# Checking if all linked parent requirements have status valid. -# """ -# # Convert list to dictionary for easy lookup -# needs_dict = {need["id"]: need for need in needs} -# -# for need in needs: -# parents_not_correct = [] -# for satisfie_need in need.get("satisfies", []): -# if needs_dict.get(satisfie_need, {}).get("status") != "valid": -# parents_not_correct.append(satisfie_need) -# -# if parents_not_correct: -# formatted_parents_not_correct = ", ".join( -# f"`{parent}`" for parent in parents_not_correct -# ) -# msg = ( -# f"has a parent requirement(s): {formatted_parents_not_correct} " -# f"with an invalid status." -# ) -# log.warning_for_need(need, msg) -# -# -# @graph_check -# def check_linkage_safety(app: Sphinx, needs: list[NeedsInfoType], log: CheckLogger): -# """ -# Checking if for feature, component and tool requirements it shall be checked -# if at least one parent requirement contains the same or lower ASIL compared -# to the ASIL of the current requirement then it will return False. -# """ -# # Convert list to dictionary for easy lookup -# needs_dict = {need["id"]: need for need in needs} -# -# # Mapping of 'Need safety: Allowed parent safety' -# allowed_values_map = {"ASIL_B": -# ["ASIL_B", "ASIL_D"], -# "ASIL_D": -# ["ASIL_D"] -# } -# -# for need in needs: -# # We can skip anything that has no satisfies or is safety level 'QM' -# if not need["satisfies"] or not need["safety"]: -# continue -# -# if need["safety"] == "QM": -# continue -# -# -# allowed_values = allowed_values_map.get(need["safety"], []) -# unsafe_parents = [] -# for satisfie_need in need.get("satisfies", []): -# parent = needs_dict.get(satisfie_need, {}) -# parent_safety = parent.get("safety", "") -# if not parent_safety or parent_safety not in allowed_values: -# unsafe_parents.append(parent) -# if unsafe_parents: -# msg = ( -# f"`{need['id']}` parents: {unsafe_parents} have either no, or not " -# "allowed safety ASIL values. " -# f"Allowed ASIL values: " -# f"{', '.join(f'`{value}`' for value in allowed_values)}. \n" -# ) -# log.warning_for_need(need, msg) -# -# -# @graph_check -# def check_linkage_status(app: Sphinx, needs: list[NeedsInfoType], log: CheckLogger): -# """ -# Checking if for valid feature, component and tool requirements it shall be checked -# if the status of the parent requirement is also valid. -# """ -# needs_dict = {need["id"]: need for need in needs} -# for need in needs: -# if need["status"] == "valid": -# for satisfie_need in need.get("satisfies", []): -# parent_need = needs_dict.get(satisfie_need) # Get parent requirement -# -# if not parent_need or parent_need.get("status") != "valid": -# msg = f"has a valid status but one of its parents: -# "`{satisfie_need}` has an invalid status. \n" -# log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 7ec02645..734619dd 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -158,9 +158,9 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" realizes: "^wp__.+$" # The following 3 guidance requirements enforce the requirement structure and attributes: - # req-#Id: gd_req__req__structure - # req-#Id: gd_req__requirements_attr_description - # req-#Id: gd_req__req__linkage + # req-Id: gd_req__req__structure + # req-Id: gd_req__requirements_attr_description + # req-Id: gd_req__req__linkage # Requirements stkh_req: title: "Stakeholder Requirement" @@ -190,7 +190,7 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: - # req-#id: gd_req__req__linkage_fulfill + # req-Id: gd_req__req__linkage_fulfill satisfies: "^stkh_req__.*$" optional_options: codelink: "^.*$" @@ -264,8 +264,9 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: + includes: "^logic_arc_int(_op)*__.+$" + optional_links: fulfils: "^feat_req__.+$" - includes: "^feat_arc_int(_op)*__.+$" feat_arc_dyn: title: "Feature Architecture Dynamic View" @@ -280,33 +281,32 @@ needs_types: mandatory_links: fulfils: "^feat_req__.+$" - feat_arc_int: - title: "Feature Architecture Interfaces" - prefix: "feat_arc_int__" + logic_arc_int: + title: "Logical Architecture Interfaces" + prefix: "logic_arc_int__" color: "#FEDCD2" style: "card" mandatory_options: - id: "^feat_arc_int__[0-9a-z_]+$" + id: "^logic_arc_int__[0-9a-z_]+$" security: "^(YES|NO)$" safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" - mandatory_links: - fulfils: "^feat_req__.+$" optional_links: - includes: "^feat_arc_int_op__.+$" + includes: "^logic_arc_int_op__.+$" + fulfils: "^comp_req__.+$" - feat_arc_int_op: - title: "Feature Architecture Interface Operation" - prefix: "feat_arc_int_op__" + logic_arc_int_op: + title: "Logical Architecture Interface Operation" + prefix: "logic_arc_int_op__" color: "#FEDCD2" style: "card" mandatory_options: - id: "^feat_arc_int_op__[0-9a-z_]+$" + id: "^logic_arc_int_op__[0-9a-z_]+$" security: "^(YES|NO)$" safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: - included_by: "^feat_arc_int__.+$" + included_by: "^logic_arc_int__.+$" mod_view_sta: title: "Module Architecture Static View" @@ -336,12 +336,11 @@ needs_types: security: "^(YES|NO)$" safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" - mandatory_links: - fulfils: "^comp_req__.+$" optional_links: - implements: "^comp_arc_int(_op)*__.+$" + implements: "^real_arc_int(_op)*__.+$" includes: "^comp_arc_sta__.+$" - uses: "^comp_arc_int(_op)*__.+$" + uses: "^real_arc_int(_op)*__.+$" + fulfils: "^comp_req__.+$" comp_arc_dyn: title: "Component Architecture Dynamic View" @@ -353,36 +352,38 @@ needs_types: security: "^(YES|NO)$" safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" - mandatory_links: + optional_links: fulfils: "^comp_req__.+$" - comp_arc_int: + real_arc_int: title: "Component Architecture Interfaces" - prefix: "comp_arc_int__" + prefix: "real_arc_int__" color: "#FEDCD2" style: "card" mandatory_options: - id: "^comp_arc_int__[0-9a-z_]+$" + id: "^real_arc_int__[0-9a-z_]+$" security: "^(YES|NO)$" safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" language: "^(cpp|rust)$" - mandatory_links: + optional_links: fulfils: "^comp_req__.+$" - comp_arc_int_op: + real_arc_int_op: title: "Component Architecture Interface Operation" - prefix: "comp_arc_int_op__" + prefix: "real_arc_int_op__" color: "#FEDCD2" style: "card" mandatory_options: - id: "^comp_arc_int_op__[0-9a-z_]+$" + id: "^real_arc_int_op__[0-9a-z_]+$" security: "^(YES|NO)$" safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: - implements: "^feat_arc_int_op__.+$" - included_by: "^comp_arc_int__.+$" + included_by: "^real_arc_int__.+$" + optional_links: + implements: "^logic_arc_int_op__.+$" + review_header: prefix: "review__header" @@ -530,10 +531,10 @@ needs_extra_links: # - condition: defines the condition that should be checked # - [and / or / xor / not] ############################################################## -# req-#id: gd_req__req__linkage_architecture -# req-#id: gd_req__req__linkage_safety +# req-Id: gd_req__req__linkage_architecture +# req-Id: gd_req__req__linkage_safety graph_checks: - # req-#id: gd_req__req__linkage_safety + # req-Id: gd_req__req__linkage_safety req_safety_linkage: needs: include: "comp_req, feat_req" @@ -552,7 +553,7 @@ graph_checks: condition: "status == valid" check: satisfies: "status == valid" - # req-#id: gd_req__req__linkage_architecture + # req-Id: gd_req__req__linkage_architecture arch_safety_linkage: needs: include: "comp_req, feat_req" diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index ee8ce08c..c829fe92 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -26,9 +26,44 @@ :id: std_wp__test__abce :status: active -.. Satisfies link refers to wrong requirement type +.. Required link `satisfies` refers to wrong requirement type #EXPECT: feat_req__abce.satisfies (['std_wp__test__abce']): does not follow pattern `^stkh_req__.*$`. .. feat_req:: Child requirement :id: feat_req__abce :satisfies: std_wp__test__abce + +.. Optional link `supported_by` refers to wrong requirement type + This check is disabled in check_options.py:114 + #EXPECT: wf__abcd.supported_by (['feat_req__abce']): does not follow pattern `^rl__.*$`. + + .. std_wp:: This is a test + :id: wf__abcd + :supported_by: feat_req__abce + +.. Optional link `supported_by` refers to the correct requirement type + This check is disabled in check_options.py:114 + #EXPECT-NOT: does not follow pattern `^rl__.*$`. + + .. std_wp:: This is a test + :id: wf__abcd + :supported_by: rl__abcd + + .. rl:: This is a test + :id: rl__abcd + + .. Required link: `satisfies` is missing + #EXPECT: feat_req__abcf: is missing required link: `satisfies`. + + .. feat_req:: Child requirement + :id: feat_req__abcf + +.. All required links are present +#EXPECT-NOT: feat_req__abcg: is missing required link + +.. feat_req:: Child requirement + :id: feat_req__abcg + :satisfies: stkh_req__abcd + +.. stkh_req:: Parent requirement + :id: stkh_req__abcd diff --git a/src/extensions/score_metamodel/tests/test_attributes_format.py b/src/extensions/score_metamodel/tests/test_attributes_format.py deleted file mode 100644 index 16ae228a..00000000 --- a/src/extensions/score_metamodel/tests/test_attributes_format.py +++ /dev/null @@ -1,196 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -from unittest.mock import Mock - -from sphinx.application import Sphinx - -from src.extensions.score_metamodel.checks.attributes_format import ( - check_description, - check_id_format, - check_id_length, - check_title, -) -from src.extensions.score_metamodel.tests import fake_check_logger, need - - -class TestId: - STOP_WORDS = ["shall", "must", "will"] - WEAK_WORDS = ["just", "that", "about", "really", "some", "thing", "absolutely"] - - def test_check_id_format_positive(self): - """ - Test check_id_length function with a positive case. - """ - - need_1 = need( - id="gd_req__attribute_satisfies", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - check_id_format(app, need_1, logger) - assert not logger.has_warnings - - def test_check_id_format_two_mendatory_substrings_parts_negative(self): - """ - Test check_id_length function with a negative case. - """ - - need_1 = need( - id="gd_req_attribute_satisfies", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - check_id_format(app, need_1, logger) - - logger.assert_warning( - "expected to consisting of one of these 2 formats:" - "`__` or " - "`____`.", - expect_location=False, - ) - - def test_check_id_format_three_mendatory_substrings_parts_negative(self): - """ - Test check_id_length function with a negative case. - """ - - need_1 = need( - id="feat_req__1", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - check_id_format(app, need_1, logger) - - logger.assert_warning( - "expected to consisting of this format: " - "`____`.", - expect_location=False, - ) - - def test_check_id_length_positive(self): - """ - Test check_id_length function with a positive case. - """ - - need_1 = need( - id="std_req__iso26262__rq_8_6432", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - check_id_length(app, need_1, logger) - assert not logger.has_warnings - - def test_check_id_length_negative(self): - """ - Test check_id_length function with a negative case. - """ - - need_1 = need( - id="std_req__iso26262__rq_8_6432_0000000000000000000000", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - check_id_length(app, need_1, logger) - logger.assert_warning( - f"exceeds the maximum allowed length of 45 characters " - f"(current length: {len(need_1['id'])}).", - expect_location=False, - ) - - def test_check_title_positive(self): - need_1 = need( - id="std_req__iso26262__rq_8_6432", - title="std_req iso26262", - type="feat_req", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.stop_words = self.STOP_WORDS - - check_title(app, need_1, logger) - assert not logger.has_warnings - - def test_check_title_negative(self): - """ - Test check_title function with a negative case. - """ - - need_1 = need( - id="gd_req__doc_shall_approver", - title="gd_req doc shall approver", - type="feat_req", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.stop_words = self.STOP_WORDS - - check_title(app, need_1, logger) - logger.assert_warning( - ( - "contains a stop word: `shall`. The title is meant to provide a short " - "summary, not to repeat the requirement statement. Please revise " - "the title for clarity and brevity." - ), - expect_location=False, - ) - - def test_check_description_positive(self): - need_1 = need( - id="std_req__iso26262__rq_8_6432", - content="This is the description of the requirement", - type="feat_req", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.weak_words = self.WEAK_WORDS - - check_description(app, need_1, logger) - assert not logger.has_warnings - - def test_check_description_negative(self): - """ - Test check_description function with a negative case. - """ - - need_1 = need( - id="gd_req__doc_shall_approver", - content="This is just the description of the requirement", - type="feat_req", - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.weak_words = self.WEAK_WORDS - - check_description(app, need_1, logger) - logger.assert_warning( - "contains a weak word: `just`. Please revise the description.", - expect_location=False, - ) diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 24138aee..00128717 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -17,11 +17,11 @@ import pytest from sphinx.application import Sphinx -from src.extensions.score_metamodel.checks.check_options import ( +from score_metamodel.checks.check_options import ( check_extra_options, check_options, ) -from src.extensions.score_metamodel.tests import fake_check_logger, need +from score_metamodel.tests import fake_check_logger, need @pytest.mark.metadata( @@ -34,26 +34,14 @@ TestType="Requirements-based test", DerivationTechnique="Analysis of requirements", ) -class NeedTypeWithReqLink(TypedDict): +class NeedTypeDict(TypedDict, total=False): directive: str - mandatory_options: dict[str, str] - req_link: list[tuple[str, str]] - - -class NeedTypeWithOptLink(TypedDict): - directive: str - mandatory_options: dict[str, str] - opt_link: list[tuple[str, str]] - - -class NeedTypeWithOptOpt(TypedDict, total=False): - directive: str - mandatory_options: dict[str, str] - opt_opt: dict[str, str] + mandatory_options: dict[str, str | int] | None + opt_opt: dict[str, str] | None class TestCheckOptions: - NEED_TYPE_INFO: list[NeedTypeWithOptOpt] = [ + NEED_TYPE_INFO: list[NeedTypeDict] = [ { "directive": "tool_req", "mandatory_options": { @@ -62,7 +50,7 @@ class TestCheckOptions: }, } ] - NEED_TYPE_INFO_WITH_OPT_OPT: list[NeedTypeWithOptOpt] = [ + NEED_TYPE_INFO_WITH_OPT_OPT: list[NeedTypeDict] = [ { "directive": "tool_req", "mandatory_options": { @@ -75,78 +63,25 @@ class TestCheckOptions: } ] - NEED_TYPE_INFO_WITH_REQ_LINK: list[NeedTypeWithReqLink] = [ + NEED_TYPE_INFO_WITHOUT_MANDATORY_OPTIONS: list[NeedTypeDict] = [ { "directive": "workflow", - "mandatory_options": { - "id": "wf__.*$", - "status": "^(valid|draft)$", - }, - "req_link": [ - ("input", "^wp__.*$"), - ], - } + "mandatory_options": None, + }, ] - NEED_TYPE_INFO_WITH_OPT_LINK: list[NeedTypeWithOptLink] = [ + NEED_TYPE_INFO_WITH_INVALID_OPTION_TYPE: list[NeedTypeDict] = [ { "directive": "workflow", "mandatory_options": { - "id": "wf__.*$", - "status": "^(valid|draft)$", + "id": "^wf_req__.*$", + "some_invalid_option": 42, }, - "opt_link": [ - ("supported_by", "^rl__.*$"), - ], } ] - def test_known_directive_with_mandatory_option_and_allowed_value(self): - # Given a need with a type that is listed in the required options - # and mandatory options present - # and with correct values - need_1 = need( - target_id="tool_req__001", - id="tool_req__001", - type="tool_req", - some_required_option="some_value__001", - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO - # Expect that the checks pass - check_options(app, need_1, logger) - logger.assert_no_warnings() - - def test_known_directive_with_optional_and_mandatory_option_and_allowed_value(self): - # Given a need with a type that is listed in the optional options - # and optional options present - # and with correct values - need_1 = need( - target_id="tool_req__001", - id="tool_req__001", - type="tool_req", - some_required_option="some_value__001", - some_optional_option="some_value__001", - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO_WITH_OPT_OPT - # Expect that the checks pass - check_options(app, need_1, logger) - - logger.assert_no_warnings() - def test_unknown_directive(self): - # Given a need with a an unknown type it should raise an error + # Given a need with an unknown type, should raise an error need_1 = need( target_id="tool_req__001", id="tool_req__001", @@ -167,14 +102,13 @@ def test_unknown_directive(self): expect_location=False, ) - def test_unknown_option_present_in_req_opt(self): - # Given a need with an option that is not listed in the required options + def test_unknown_directive_extra_option(self): + # Given a need an unknown/undefined type, should raise an error need_1 = need( target_id="tool_req__001", - id="tool_req__0011", - type="tool_req", + type="unknown_type", + id="tool_req__001", some_required_option="some_value__001", - other_option="some_other_value", docname=None, lineno=None, ) @@ -186,20 +120,19 @@ def test_unknown_option_present_in_req_opt(self): # Expect that the checks pass check_extra_options(app, need_1, logger) logger.assert_warning( - "has these extra options: `other_option`.", + "no type info defined for semantic check.", expect_location=False, ) - def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): - # Given a need with an option that is not listed - # in the required and optional options + def test_missing_mandatory_options_info(self): + # Given any need of known type + # with missing mandatory options info + # it should raise an error need_1 = need( - target_id="tool_req__001", - id="tool_req__0011", - type="tool_req", - some_required_option="some_value__001", - some_optional_option="some_value__001", - other_option="some_other_value", + target_id="wf_req__001", + id="wf_req__001", + type="workflow", + some_required_option=None, docname=None, lineno=None, ) @@ -207,44 +140,23 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): logger = fake_check_logger() app = Mock(spec=Sphinx) app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO_WITH_OPT_OPT + app.config.needs_types = self.NEED_TYPE_INFO_WITHOUT_MANDATORY_OPTIONS # Expect that the checks pass - check_extra_options(app, need_1, logger) - - logger.assert_warning( - "has these extra options: `other_option`.", - expect_location=False, - ) - - def test_known_required_option_missing(self): - # Given a need without an option that is listed in the required options - need_1 = need( - target_id="tool_req__001", - id="tool_req__001", - type="tool_req", - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO - # Expect that the checks fail and a warning is logged check_options(app, need_1, logger) logger.assert_warning( - "is missing required option: `some_required_option`.", + "no type info defined for semantic check.", expect_location=False, ) - def test_value_violates_pattern_for_required_option(self): - # Given a need with an option that is listed in the required - # options but the value violates the pattern + def test_invalid_option_type(self): + # Given any need of known type + # with missing mandatory options info + # it should raise an error need_1 = need( - target_id="tool_req__001", - id="tool_req__001", - type="tool_req", - some_required_option="some_value_001", + target_id="wf_req__001", + id="wf_req__001", + type="workflow", + some_invalid_option=42, docname=None, lineno=None, ) @@ -252,28 +164,24 @@ def test_value_violates_pattern_for_required_option(self): logger = fake_check_logger() app = Mock(spec=Sphinx) app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO - # Expect that the checks fail and a warning is logged + app.config.needs_types = self.NEED_TYPE_INFO_WITH_INVALID_OPTION_TYPE + # Expect that the checks pass check_options(app, need_1, logger) - pattern = ( - self.NEED_TYPE_INFO_WITH_OPT_OPT[0] - .get("mandatory_options", {}) - .get("some_required_option") - ) logger.assert_warning( - f"does not follow pattern `{pattern}`.", + "pattern `42` is not a valid regex pattern.", expect_location=False, ) - def test_value_violates_pattern_for_optional_option(self): - # Given a need with an option that is listed in the optional - # options but the value violates the pattern + def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): + # Given a need with an option that is not listed + # in the required and optional options need_1 = need( target_id="tool_req__001", - id="tool_req__001", + id="tool_req__0011", type="tool_req", some_required_option="some_value__001", - some_optional_option="some_value_001", + some_optional_option="some_value__001", + other_option="some_other_value", docname=None, lineno=None, ) @@ -282,61 +190,10 @@ def test_value_violates_pattern_for_optional_option(self): app = Mock(spec=Sphinx) app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO_WITH_OPT_OPT - # Expect that the checks fail and a warning is logged - check_options(app, need_1, logger) - pattern = ( - self.NEED_TYPE_INFO_WITH_OPT_OPT[0] - .get("opt_opt", {}) - .get("some_optional_option") - ) - logger.assert_warning( - f"does not follow pattern `{pattern}`.", - expect_location=False, - ) - - def test_known_required_link_missing(self): - # Given a need without an option that is listed in the required options - need_1 = need( - target_id="wf__p_confirm_rv", - id="wf__p_confirm_rv", - status="valid", - type="workflow", - docname=None, - lineno=None, - ) + # Expect that the checks pass + check_extra_options(app, need_1, logger) - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO_WITH_REQ_LINK - # Expect that the checks fail and a warning is logged - check_options(app, need_1, logger) logger.assert_warning( - "is missing required link: `input`.", + "has these extra options: `other_option`.", expect_location=False, ) - - # TODO: Remove commented code when re - - # def test_value_violates_pattern_for_optional_link(self): - # # Given a need without an option that is listed in the required options - # need_1 = need( - # target_id="wf__p_confirm_rv", - # id="wf__p_confirm_rv", - # status="valid", - # type="workflow", - # supported_by="rl_process_community", - # docname=None, - # lineno=None, - # ) - - # logger = fake_check_logger() - # app = Mock(spec=Sphinx) - # app.config = Mock() - # app.config.needs_types = self.NEED_TYPE_INFO - # # Expect that the checks fail and a warning is logged - # check_options(app, need_1, logger, self.NEED_TYPE_INFO_WITH_OPT_LINK) - # logger.assert_warning( - # "does not follow pattern", - # expect_location=False, - # ) diff --git a/src/extensions/score_metamodel/tests/test_id_contains_feature.py b/src/extensions/score_metamodel/tests/test_id_contains_feature.py deleted file mode 100644 index b93d8b44..00000000 --- a/src/extensions/score_metamodel/tests/test_id_contains_feature.py +++ /dev/null @@ -1,39 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -from unittest.mock import Mock - -from sphinx.application import Sphinx - -from score_metamodel.checks.id_contains_feature import id_contains_feature -from score_metamodel.tests import fake_check_logger, need - - -def test_feature_ok(): - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - id_contains_feature( - app, need(id="req__feature17__title", docname="path/to/feature17/index"), logger - ) - logger._log.warning.assert_not_called() # type: ignore - - -def test_feature_not_ok(): - logger = fake_check_logger() - app = Mock(spec=Sphinx) - - id_contains_feature( - app, need(id="req__feature17__title", docname="path/to/feature15/index"), logger - ) - logger.assert_warning("feature15", expect_location=False) diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index 38301747..fa640229 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -30,6 +30,8 @@ TOOLING_DIR_NAME = "src" ### List of relative paths of all rst files in RST_DIR + + RST_FILES = [str(f.relative_to(RST_DIR)) for f in Path(RST_DIR).rglob("*.rst")] diff --git a/src/extensions/score_metamodel/tests/test_traceability.py b/src/extensions/score_metamodel/tests/test_traceability.py deleted file mode 100644 index 29fcb300..00000000 --- a/src/extensions/score_metamodel/tests/test_traceability.py +++ /dev/null @@ -1,229 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - - -# ╭──────────────────────────────────────╮ -# │ TEST DISABLED DUE TO CHECKS BEING │ -# │ DISABLED │ -# ╰──────────────────────────────────────╯ - - -# from unittest.mock import Mock -# -# import pytest -# from sphinx.application import Sphinx -# from sphinx_needs.data import NeedsInfoType -# -# import score_metamodel.tests as tests -# from score_metamodel.checks.traceability import ( -# check_linkage_parent, -# check_linkage_safety, -# check_linkage_status, -# ) -# -# -# @pytest.mark.metadata( -# Verifies=[ -# "TOOL_REQ__toolchain_sphinx_needs_build__requirement_linkage_status_check" -# ], -# Description="It should check the traceability like linkage of attributes.", -# ASIL="ASIL_D", -# Priority="1", -# TestType="Requirements-based test", -# DerivationTechnique="Analysis of requirements", -# ) -# class TestTraceability: -# def test_check_linkage_parent_positive(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="TOOL_REQ__1", -# status="valid", -# satisfies=[ -# "feat_req__2", -# ], -# ) -# -# need_2 = NeedsInfoType( -# id="feat_req__2", -# status="valid", -# satisfies=[ -# "TOOL_REQ__1", -# ], -# ) -# needs = [need_1, need_2] -# -# check_linkage_parent(app, needs, logger) -# logger.assert_no_warnings() -# -# def test_check_linkage_parent_negative(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="TOOL_REQ__1", -# status="valid", -# satisfies=[ -# "feat_req__2", -# ], -# ) -# -# needs = [need_1] -# -# check_linkage_parent(app, needs, logger) -# -# logger.assert_warning( -# f"has a parent requirement(s): `{need_1['satisfies'][0]}` with an " -# f"invalid status.", -# expect_location=False, -# ) -# -# def test_check_linkage_safety_positive(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="COMP_REQ__1", -# status="valid", -# safety="QM", -# satisfies=[ -# "feat_req__2", -# ], -# ) -# -# need_2 = NeedsInfoType( -# id="feat_req__2", -# status="valid", -# safety="QM", -# satisfies=[ -# "stkh_req__communication__intra_process", -# ], -# ) -# -# need_3 = NeedsInfoType( -# id="stkh_req__communication__intra_process", -# status="valid", -# safety="QM", -# ) -# -# needs = [need_1, need_2, need_3] -# -# check_linkage_safety(app, needs, logger) -# logger.assert_no_warnings() -# -# def test_check_linkage_safety_negative_ASIL_D(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="feat_req__1", -# safety="ASIL_D", -# satisfies=[ -# "stkh_req__communication__inter_process", -# ], -# ) -# -# need_2 = NeedsInfoType( -# id="stkh_req__communication__inter_process", -# status="valid", -# safety="ASIL_B", -# ) -# -# needs = [need_1, need_2] -# -# check_linkage_safety(app, needs, logger) -# logger.assert_warning( -# f"with `{need_1['safety']}` has no parent requirement that contains " -# f"the same or lower ASIL. Allowed ASIL values: `ASIL_D`.", -# expect_location=False, -# ) -# -# def test_check_linkage_safety_negative_ASIL_B(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="feat_req__1", -# safety="ASIL_B", -# satisfies=[ -# "stkh_req__communication__inter_process", -# ], -# ) -# -# need_2 = NeedsInfoType( -# id="stkh_req__communication__inter_process", -# safety="QM", -# ) -# needs = [need_1, need_2] -# -# check_linkage_safety(app, needs, logger) -# logger.assert_warning( -# f"with `{need_1['safety']}` has no parent requirement that contains " -# f"the same or lower ASIL. Allowed ASIL values: `ASIL_B`, `ASIL_D`.", -# expect_location=False, -# ) -# -# def test_check_linkage_status_positive(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="TOOL_REQ__1", -# status="valid", -# satisfies=[ -# "feat_req__2", -# ], -# ) -# -# need_2 = NeedsInfoType( -# id="feat_req__2", -# status="valid", -# ) -# needs = [need_1, need_2] -# -# check_linkage_status(app, needs, logger) -# logger.assert_no_warnings() -# -# def test_check_linkage_status_negative(self): -# logger = tests.fake_check_logger() -# app = Mock(spec=Sphinx) -# -# need_1 = NeedsInfoType( -# id="TOOL_REQ__001", -# status="valid", -# satisfies=["feat_req__2"], -# ) -# -# need_2 = NeedsInfoType( -# id="feat_req__2", -# status="valid", -# satisfies=[ -# "feat_req__3", -# ], -# ) -# need_3 = NeedsInfoType( -# id="feat_req__3", -# status="invalid", -# satisfies=[ -# "feat_req__4", -# ], -# ) -# needs = [need_1, need_2, need_3] -# check_linkage_status(app, needs, logger) -# -# logger.assert_warning( -# "has a valid status but one of its parents: `feat_req__3` has an " -# "invalid status.", -# expect_location=False, -# ) diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index 210e5887..76228505 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -24,6 +24,7 @@ In addition it sets common PlantUML options, like output to svg_obj. """ +from gettext import find import os import sys from pathlib import Path @@ -65,12 +66,25 @@ def get_runfiles_dir() -> Path: f"Could not find runfiles_dir at {runfiles_dir}. " "Have a look at README.md for instructions on how to build docs." ) - return runfiles_dir +def find_correct_path(runfiles: str) -> str: + """ + This ensures that the 'plantuml' binary path is found in local 'docs-as-code' and module use. + """ + dirs = [str(x) for x in Path(runfiles).glob("*docs-as-code~")] + if dirs: + # Happens if 'docs-as-code' is used as Module + p = runfiles + "/docs-as-code~/src/plantuml" + else: + # Only happens in 'docs-as-code' repository + p = runfiles + "/../plantuml" + return p + + def setup(app: Sphinx): - app.config.plantuml = str(get_runfiles_dir() / ".." / "plantuml") + app.config.plantuml = find_correct_path(str(get_runfiles_dir())) app.config.plantuml_output_format = "svg_obj" app.config.plantuml_syntax_error_image = True app.config.needs_build_needumls = "_plantuml_sources" diff --git a/src/find_runfiles/BUILD b/src/find_runfiles/BUILD new file mode 100644 index 00000000..9acb8406 --- /dev/null +++ b/src/find_runfiles/BUILD @@ -0,0 +1,31 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_python_basics//:defs.bzl", "score_py_pytest") + +py_library( + name = "find_runfiles", + srcs = ["__init__.py"], + imports = ["."], + visibility = ["//visibility:public"], +) + +score_py_pytest( + name = "find_runfiles_test", + size = "small", + srcs = ["test_find_runfiles.py"], + deps = [ + ":find_runfiles", + ] + all_requirements, +) diff --git a/src/incremental.py b/src/incremental.py index 34e1d868..45dbbf9e 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -13,16 +13,12 @@ import argparse import logging -import itertools import os -import sys import json -from pathlib import Path from typing import Any import debugpy -from python.runfiles import Runfiles from sphinx.cmd.build import main as sphinx_main from sphinx_autobuild.__main__ import main as sphinx_autobuild_main From c2f9e2706bd93e37a643d6035a75bf9f920592a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 8 May 2025 14:39:39 +0200 Subject: [PATCH 008/231] Prefix name with score (#9) * Prefix name with score * Name change to comply with standards --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index c60b2d70..eeeed880 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -12,7 +12,7 @@ # ******************************************************************************* module( - name = "docs-as-code", + name = "score_docs_as_code", version = "0.1.0", compatibility_level = 0, ) From c3ade5f2911597f135b59799620a33503349ee7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 8 May 2025 15:43:15 +0200 Subject: [PATCH 009/231] Renaming 'docs-as-code' => 'score_docs_as_code' (#10) --- README.md | 4 ++-- docs.bzl | 32 ++++++++++++------------- process-docs/conf.py | 4 ++-- src/BUILD | 4 ++-- src/README.md | 2 +- src/extensions/BUILD | 2 +- src/extensions/score_layout/__init__.py | 8 +++---- src/extensions/score_plantuml.py | 10 ++++---- 8 files changed, 33 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 23479dad..0aa2564f 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ This module allows you to easily integrate Sphinx documentation generation into Add the module to your `MODULE.bazel` file: ```starlark -bazel_dep(name = "docs-as-code", version = "0.1.0") +bazel_dep(name = "score_docs_as_code", version = "0.1.0") ``` And make sure to also add the S-core bazel registry to your `.bazelrc` file @@ -40,7 +40,7 @@ ______________________________________________________________________ #### 1. Import the `docs()` macro in your BUILD file: ```python -load("@docs-as-code//docs.bzl", "docs") +load("@score_docs_as_code//docs.bzl", "docs") docs( conf_dir = "", diff --git a/docs.bzl b/docs.bzl index b5926760..9b6e181c 100644 --- a/docs.bzl +++ b/docs.bzl @@ -38,7 +38,7 @@ # For user-facing documentation, refer to `/README.md`. load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") -load("@docs-as-code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") +load("@score_docs_as_code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") load("@pip_process//:requirements.bzl", "all_requirements", "requirement") load("@rules_java//java:java_binary.bzl", "java_binary") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") @@ -46,14 +46,14 @@ load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_python_basics//:defs.bzl", "score_virtualenv") sphinx_requirements = all_requirements + [ - "@docs-as-code//src:plantuml_for_python", - "@docs-as-code//src/extensions:score_plantuml", - "@docs-as-code//src/find_runfiles:find_runfiles", - "@docs-as-code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", - "@docs-as-code//src/extensions/score_header_service:score_header_service", - "@docs-as-code//src/extensions/score_layout:score_layout", - "@docs-as-code//src/extensions/score_metamodel:score_metamodel", - "@docs-as-code//src/extensions/score_source_code_linker:score_source_code_linker", + "@score_docs_as_code//src:plantuml_for_python", + "@score_docs_as_code//src/extensions:score_plantuml", + "@score_docs_as_code//src/find_runfiles:find_runfiles", + "@score_docs_as_code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", + "@score_docs_as_code//src/extensions/score_header_service:score_header_service", + "@score_docs_as_code//src/extensions/score_layout:score_layout", + "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", + "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", ] def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = []): @@ -67,7 +67,7 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ sphinx_build_binary( name = "sphinx_build", visibility = ["//visibility:public"], - data = ["@docs-as-code//src:docs_assets", "@docs-as-code//src:score_extension_files"], + data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:score_extension_files"], deps = sphinx_requirements, ) @@ -124,10 +124,10 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s py_binary( name = incremental_name, - srcs = ["@docs-as-code//src:incremental.py"], + srcs = ["@score_docs_as_code//src:incremental.py"], deps = dependencies, # TODO: Figure out if we need all dependencies as data here or not. - data = [":score_source_code_parser", "@docs-as-code//src:plantuml", "@docs-as-code//src:docs_assets"] + dependencies, + data = [":score_source_code_parser", "@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -139,9 +139,9 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s py_binary( name = live_name, - srcs = ["@docs-as-code//src:incremental.py"], + srcs = ["@score_docs_as_code//src:incremental.py"], deps = dependencies, - data = ["@docs-as-code//src:plantuml", "@docs-as-code//src:docs_assets"] + dependencies, + data = ["@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -191,8 +191,8 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external ], tools = [ ":score_source_code_parser", - "@docs-as-code//src:plantuml", - "@docs-as-code//src:docs_assets", + "@score_docs_as_code//src:plantuml", + "@score_docs_as_code//src:docs_assets", ] + external_needs_deps, visibility = ["//visibility:public"], ) diff --git a/process-docs/conf.py b/process-docs/conf.py index 9d52ec80..09feecd6 100644 --- a/process-docs/conf.py +++ b/process-docs/conf.py @@ -22,7 +22,7 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -project = "docs-as-code" +project = "score_docs_as_code" author = "Score" version = "0.1" @@ -30,7 +30,7 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration logger = logging.getLogger("process-docs") -logger.debug("Loading docs-as-code conf.py") +logger.debug("Loading score_docs_as_code conf.py") extensions = [ diff --git a/src/BUILD b/src/BUILD index 3b5e8907..6f2aaf88 100644 --- a/src/BUILD +++ b/src/BUILD @@ -49,8 +49,8 @@ java_binary( # upgrade of `aspect_rules_py` since the py_venv rule now supports a data field py_library( name = "plantuml_for_python", - srcs = ["@docs-as-code//src:dummy.py"], - data = ["@docs-as-code//src:plantuml"], + srcs = ["@score_docs_as_code//src:dummy.py"], + data = ["@score_docs_as_code//src:plantuml"], visibility = ["//visibility:public"], ) diff --git a/src/README.md b/src/README.md index 554bcff6..128cc32e 100644 --- a/src/README.md +++ b/src/README.md @@ -54,7 +54,7 @@ It should be treated as a 'get-started' guide, giving you all needed information -## docs-as-code Directory Architecture +## score_docs_as_code Directory Architecture ``` process-docs/ # Local documentation to test functionality diff --git a/src/extensions/BUILD b/src/extensions/BUILD index 0cb13c76..710a8f51 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -24,7 +24,7 @@ load("@score_python_basics//:defs.bzl", "score_py_pytest", "score_virtualenv") py_library( name = "score_plantuml", - srcs = ["@docs-as-code//src/extensions:score_plantuml.py"], + srcs = ["@score_docs_as_code//src/extensions:score_plantuml.py"], imports = ["."], visibility = ["//visibility:public"], ) diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 4ebc685f..188122cb 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -39,12 +39,12 @@ def update_config(app: Sphinx, _config: Any): # For now this seems the only place this is used / needed. # In the future it might be a good idea to make this available in other places, maybe via the 'find_runfiles' lib if r := os.getenv("RUNFILES_DIR"): - dirs = [str(x) for x in Path(r).glob("*docs-as-code~")] + dirs = [str(x) for x in Path(r).glob("*score_docs_as_code~")] if dirs: - # Happens if 'docs-as-code' is used as Module - p = str(r) + "/docs-as-code~/src/assets" + # Happens if 'score_docs_as_code' is used as Module + p = str(r) + "/score_docs_as_code~/src/assets" else: - # Only happens in 'docs-as-code' repository + # Only happens in 'score_docs_as_code' repository p = str(r) + "/_main/src/assets" app.config.html_static_path = app.config.html_static_path + [p] diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index 76228505..ba5b5da1 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -71,14 +71,14 @@ def get_runfiles_dir() -> Path: def find_correct_path(runfiles: str) -> str: """ - This ensures that the 'plantuml' binary path is found in local 'docs-as-code' and module use. + This ensures that the 'plantuml' binary path is found in local 'score_docs_as_code' and module use. """ - dirs = [str(x) for x in Path(runfiles).glob("*docs-as-code~")] + dirs = [str(x) for x in Path(runfiles).glob("*score_docs_as_code~")] if dirs: - # Happens if 'docs-as-code' is used as Module - p = runfiles + "/docs-as-code~/src/plantuml" + # Happens if 'score_docs_as_code' is used as Module + p = runfiles + "/score_docs_as_code~/src/plantuml" else: - # Only happens in 'docs-as-code' repository + # Only happens in 'score_docs_as_code' repository p = runfiles + "/../plantuml" return p From ee07a3b2820ea421369d48b036385279d0977582 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 9 May 2025 07:57:26 +0200 Subject: [PATCH 010/231] quick fix for randomly failing build (#13) --- MODULE.bazel | 2 +- src/extensions/score_draw_uml_funcs/__init__.py | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index eeeed880..dc6e4f00 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.1.0", + version = "0.1.1", compatibility_level = 0, ) diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index 27df4a31..00712bcb 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -32,9 +32,6 @@ from pathlib import Path from typing import Any -from sphinx.application import Sphinx -from sphinx_needs.logging import get_logger - from score_draw_uml_funcs.helpers import ( gen_header, gen_interface_element, @@ -49,6 +46,8 @@ get_module, get_real_interface_logical, ) +from sphinx.application import Sphinx +from sphinx_needs.logging import get_logger logger = get_logger(__file__) @@ -379,6 +378,14 @@ def __call__( if imcomp := impl_comp.get(iface, {}): module = get_module(imcomp, all_needs) + # FIXME: sometimes module is empty, then the following code fails + if not module: + logger.info( + f"FIXME: {need['id']}: " + f"Module for interface {iface} -> {imcomp} is empty." + ) + continue + if module not in proc_modules: tmp, link_text, proc_impl_interfaces, proc_used_interfaces = ( draw_module( From f1e69442dae07d89baaf1f769798e83280213d13 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 9 May 2025 08:55:04 +0200 Subject: [PATCH 011/231] bump version (#14) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index dc6e4f00..49071554 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.1.1", + version = "0.2.0", compatibility_level = 0, ) From 242bcc4824f3dbe47bd3f3d39895abf616f348ee Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 9 May 2025 11:28:44 +0200 Subject: [PATCH 012/231] fix command line argument for live_preview (#15) --- .../score_source_code_linker/__init__.py | 36 ++++--------------- src/incremental.py | 7 ++-- 2 files changed, 10 insertions(+), 33 deletions(-) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 22e6ac70..adff9c5a 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -11,26 +11,27 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import json -import os from copy import deepcopy from pathlib import Path -from pprint import pprint -from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment from sphinx_needs.data import SphinxNeedsData from sphinx_needs.logging import get_logger +from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL + LOGGER = get_logger(__name__) LOGGER.setLevel("DEBUG") def setup(app: Sphinx) -> dict[str, str | bool]: # Extension: score_source_code_linker - app.add_config_value("disable_source_code_linker", False, rebuild="env") - app.add_config_value("score_source_code_linker_file_overwrite", "", rebuild="env") # TODO: can we detect live_preview & esbonio here? Until then we have a flag: + app.add_config_value("disable_source_code_linker", False, rebuild="env", types=bool) + app.add_config_value( + "score_source_code_linker_file_overwrite", "", rebuild="env", types=str + ) # Define need_string_links here to not have it in conf.py app.config.needs_string_links = { @@ -73,40 +74,17 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: Needs_Data = SphinxNeedsData(env) needs = Needs_Data.get_needs_mutable() needs_copy = deepcopy(needs) - # bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main/process-docs/score_source_code_parser.json - # bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main/tooling/extensions/score_source_code_linker/__init__.py - # bazel-out/k8-fastbuild/bin/process-docs/score_source_code_parser.json - # /home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/score_source_code_parser.json - # - - ## -> build: - - # bazel-out/k8-opt-exec-ST-d57f47055a04/bin/tooling/sphinx_build.runfiles/_main/tooling/extensions/score_source_code_linker/__init__.py - - # Tried with build - # bazel-out/k8-fastbuild/bin/process-docs/_docs/_sources/process-docs/score_source_code_parser.json - - # SEARCHING: - # bazel-out/k8-opt-exec-ST-d57f47055a04/bin/process-docs/score_source_code_parser.json p5 = Path(__file__).parents[5] - # bazel-out/k8-opt-exec-ST-d57f47055a04/bin/tooling - # LOGGER.info("DEBUG: ============= CONF DIR===========") - # LOGGER.info(f"DEBUG: {Path(app.confdir).name}") - # LOGGER.info("DEBUG: =============================") if str(p5).endswith("src"): LOGGER.info("DEBUG: WE ARE IN THE IF") path = str(p5.parent / Path(app.confdir).name / "score_source_code_parser.json") else: LOGGER.info("DEBUG: WE ARE IN THE ELSE") path = str(p5 / "score_source_code_parser.json") - # LOGGER.info("DEBUG============= FILE PATH OF JSON (where we search)===========") - # LOGGER.info(f"DEBUG: {path}") - # LOGGER.info("DEBUG: =============================") + if app.config.score_source_code_linker_file_overwrite: path = app.config.score_source_code_linker_file_overwrite - # json_paths = [str(Path(__file__).parent.parent.parent.parent.parent.parent/"score_source_code_parser.json")] - # json_paths = [app.config.source_code_linker_file] try: with open(path) as f: diff --git a/src/incremental.py b/src/incremental.py index 45dbbf9e..b67e0636 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -12,13 +12,11 @@ # ******************************************************************************* import argparse +import json import logging import os -import json -from typing import Any import debugpy - from sphinx.cmd.build import main as sphinx_main from sphinx_autobuild.__main__ import main as sphinx_autobuild_main @@ -92,7 +90,8 @@ def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str] action = get_env("ACTION") if action == "live_preview": sphinx_autobuild_main( - base_arguments + ["--define=disable_source_code_linker=True"] + # Note: bools need to be passed via '0' and '1' from the command line. + base_arguments + ["--define=disable_source_code_linker=1"] ) else: sphinx_main(base_arguments) From f30d07301e2893310704922dd6517bcd18202465 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 9 May 2025 12:09:54 +0200 Subject: [PATCH 013/231] rename process-docs to examples/simple (#17) --- .vscode/settings.json | 36 +++++++++++++++++++++ BUILD | 2 +- README.md | 6 ++-- {process-docs => examples/simple}/BUILD | 4 +-- {process-docs => examples/simple}/conf.py | 22 ++++--------- {process-docs => examples/simple}/index.rst | 0 pyproject.toml | 6 ++-- src/README.md | 4 +-- 8 files changed, 53 insertions(+), 27 deletions(-) create mode 100644 .vscode/settings.json rename {process-docs => examples/simple}/BUILD (97%) rename {process-docs => examples/simple}/conf.py (75%) rename {process-docs => examples/simple}/index.rst (100%) diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..a3c7371a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,36 @@ +{ + // General Settings + "files.insertFinalNewline": true, + "files.trimFinalNewlines": true, + "files.trimTrailingWhitespace": true, + "editor.insertSpaces": true, + "editor.tabCompletion": "on", + + // When using ruff for formatting 88 characters per line is the standard. + "editor.rulers": [88], + "[python]": { + // Opinionated option for the future: + // "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.sortImports": "explicit" + }, + "editor.defaultFormatter": "charliermarsh.ruff" + }, + + // RST Settings + "[restructuredtext]": { + "editor.tabSize": 3 + }, + // + // + "python.testing.pytestArgs": [ + ".", + "--ignore-glob=bazel-*/*", + "--ignore-glob=.venv_docs/*", + "--ignore-glob=_build/*" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "bazel.lsp.command": "bazel", + "bazel.lsp.args": ["run", "//:starpls_server"] +} diff --git a/BUILD b/BUILD index 16742e14..22c08c79 100644 --- a/BUILD +++ b/BUILD @@ -16,7 +16,7 @@ load("@score_cr_checker//:cr_checker.bzl", "copyright_checker") copyright_checker( name = "copyright", srcs = [ - "process-docs", + "examples", "src", "//:BUILD", "//:MODULE.bazel", diff --git a/README.md b/README.md index 0aa2564f..a55c9192 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ bazel_dep(name = "score_docs_as_code", version = "0.1.0") And make sure to also add the S-core bazel registry to your `.bazelrc` file ```starlark -common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ +common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ common --registry=https://bcr.bazel.build ``` @@ -61,7 +61,7 @@ docs( #### 2. Adapt your conf.py if needed ```python -# ... +# ... extensions = [ "sphinx_design", "sphinx_needs", @@ -76,7 +76,7 @@ extensions = [ ``` Make sure that your conf.py imports all of the extensions you want to enable.\ -For a full example look at [This repos conf.py](process-docs/conf.py) +For a full example look at [a simple example](examples/simple) #### 3. Run a documentation build: diff --git a/process-docs/BUILD b/examples/simple/BUILD similarity index 97% rename from process-docs/BUILD rename to examples/simple/BUILD index 3dfd25f8..74f1efd5 100644 --- a/process-docs/BUILD +++ b/examples/simple/BUILD @@ -21,13 +21,13 @@ load("//:docs.bzl", "docs") # - `docs:docs` for building documentation at build-time docs( - conf_dir = "process-docs", + conf_dir = "examples/simple", docs_targets = [ { "suffix": "", # local without external needs }, ], - source_dir = "process-docs", + source_dir = "examples/simple", source_files_to_scan_for_needs_links = [ # Note: you can add filegroups, globs, or entire targets here. "//src:score_extension_files", diff --git a/process-docs/conf.py b/examples/simple/conf.py similarity index 75% rename from process-docs/conf.py rename to examples/simple/conf.py index 09feecd6..fbef18a8 100644 --- a/process-docs/conf.py +++ b/examples/simple/conf.py @@ -16,22 +16,17 @@ # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html -import logging - # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -project = "score_docs_as_code" -author = "Score" +project = "Simple Example Project" +author = "S-CORE" version = "0.1" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -logger = logging.getLogger("process-docs") -logger.debug("Loading score_docs_as_code conf.py") - extensions = [ "sphinx_design", @@ -44,13 +39,11 @@ "score_layout", ] -logger.debug("After loading extensions") - exclude_patterns = [ - # The following entries are not required when building the documentation - # via 'bazel build //docs:docs', as that command runs in a sandboxed environment. - # However, when building the documentation via 'sphinx-build' or esbonio, - # these entries are required to prevent the build from failing. + # The following entries are not required when building the documentation via 'bazel + # build //docs:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # entries are required to prevent the build from failing. "bazel-*", ".venv_docs", ] @@ -59,6 +52,3 @@ # Enable numref numfig = True - - -logger.debug("After loading S-CORE conf.py") diff --git a/process-docs/index.rst b/examples/simple/index.rst similarity index 100% rename from process-docs/index.rst rename to examples/simple/index.rst diff --git a/pyproject.toml b/pyproject.toml index d7ec00ee..4275d36f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # This file is at the root level, as it applies to all Python code, # not only to docs or to tools. -[tool.pyright] -extends = "bazel-bin/process-docs/ide_support.runfiles/score_python_basics~/pyproject.toml" +[tool.pyright] +extends = "bazel-bin/examples/simple/ide_support.runfiles/score_python_basics~/pyproject.toml" exclude = [ "**/__pycache__", @@ -11,7 +11,7 @@ exclude = [ ] [tool.ruff] -extend = "bazel-bin/process-docs/ide_support.runfiles/score_python_basics~/pyproject.toml" +extend = "bazel-bin/examples/simple/ide_support.runfiles/score_python_basics~/pyproject.toml" extend-exclude = [ "**/__pycache__", diff --git a/src/README.md b/src/README.md index 128cc32e..bad1f6af 100644 --- a/src/README.md +++ b/src/README.md @@ -11,7 +11,7 @@ It should be treated as a 'get-started' guide, giving you all needed information 1. Install Bazelisk (version manager for Bazel) 2. Create the Python virtual environment: ```bash - bazel run //process-docs:ide_support + bazel run //your-docs-dir:ide_support ``` 3. Select `.venv_docs/bin/python` as the python interpreter inside your IDE *Note: This virtual environment does **not** have pip, therefore `pip install` is not available.* @@ -57,7 +57,7 @@ It should be treated as a 'get-started' guide, giving you all needed information ## score_docs_as_code Directory Architecture ``` -process-docs/ # Local documentation to test functionality +examples/ # Shows how to use doc-as-code tooling src/ ├── assets/ # Documentation styling (CSS) ├── decision_records/ # Architecture Decision Records (ADRs) From a8711bd723c628eddf2a62e96c61d6ee3adcbacc Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Fri, 9 May 2025 13:54:41 +0200 Subject: [PATCH 014/231] add CI/CD workflows to the repository (#16) --- .github/actions/gitlint/action.yml | 42 +++++++++++++++++++++++++++++ .github/workflows/copyright.yml | 24 +++++++++++++++++ .github/workflows/format.yml | 30 +++++++++++++++++++++ .github/workflows/license_check.yml | 32 ++++++++++++++++++++++ .github/workflows/test.yml | 26 ++++++++++++++++++ 5 files changed, 154 insertions(+) create mode 100644 .github/actions/gitlint/action.yml create mode 100644 .github/workflows/copyright.yml create mode 100644 .github/workflows/format.yml create mode 100644 .github/workflows/license_check.yml create mode 100644 .github/workflows/test.yml diff --git a/.github/actions/gitlint/action.yml b/.github/actions/gitlint/action.yml new file mode 100644 index 00000000..418c504e --- /dev/null +++ b/.github/actions/gitlint/action.yml @@ -0,0 +1,42 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: "Gitlint Action" +description: "An action to install and run Gitlint on PR commits" +inputs: + pr-number: + description: "Pull Request number used to fetch commits" + required: true + base-branch: + description: "Base branch to compare commits against (default: main)" + default: "main" + required: false +runs: + using: "docker" + image: "jorisroovers/gitlint:0.19.1" + entrypoint: /bin/sh + args: + - -c + - | + git config --global --add safe.directory /github/workspace && \ + git fetch origin +refs/heads/${{ inputs.base-branch }}:refs/remotes/origin/${{ inputs.base-branch }} && \ + git fetch origin +refs/pull/${{ inputs.pr-number }}/head && \ + if ! gitlint --commits origin/${{ inputs.base-branch }}..HEAD; then \ + echo -e "\nWARNING: Your commit message does not follow the required format." && \ + echo "Formatting rules: https://eclipse-score.github.io/score/main/contribute/general/git.html" && \ + echo -e "To fix your commit message, run:\n" && \ + echo " git commit --amend" && \ + echo "Then update your commit (fix gitlint warnings). Finally, force-push:" && \ + echo " git push --force-with-lease" && \ + exit 1; \ + fi diff --git a/.github/workflows/copyright.yml b/.github/workflows/copyright.yml new file mode 100644 index 00000000..08ef3767 --- /dev/null +++ b/.github/workflows/copyright.yml @@ -0,0 +1,24 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Copyright checks +on: + pull_request: + types: [opened, reopened, synchronize] + merge_group: + types: [checks_requested] +jobs: + copyright-check: + uses: eclipse-score/cicd-workflows/.github/workflows/copyright.yml@main + with: + bazel-target: "run //:copyright.check" diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml new file mode 100644 index 00000000..38188049 --- /dev/null +++ b/.github/workflows/format.yml @@ -0,0 +1,30 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Formatting checks +on: + pull_request: + types: [opened, reopened, synchronize] + merge_group: + types: [checks_requested] +jobs: + formatting-check: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + - name: Setup Bazel + uses: bazel-contrib/setup-bazel@0.9.1 + - name: Run formatting checks + run: | + bazel test //src:format.check diff --git a/.github/workflows/license_check.yml b/.github/workflows/license_check.yml new file mode 100644 index 00000000..aba7f99d --- /dev/null +++ b/.github/workflows/license_check.yml @@ -0,0 +1,32 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: License check preparation +on: + pull_request_target: + types: [opened, reopened, synchronize] + merge_group: + types: [checks_requested] + +permissions: + pull-requests: write + issues: write + + +jobs: + license-check: + uses: eclipse-score/cicd-workflows/.github/workflows/license-check.yml@main + with: + repo-url: "${{ github.server_url }}/${{ github.repository }}" + secrets: + dash-api-token: ${{ secrets.ECLIPSE_GITLAB_API_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..be022518 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,26 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Run Bazel Tests +on: + pull_request: + types: [opened, reopened, synchronize] +jobs: + code: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + - name: Run test targets + run: | + bazel test ... From 51201064a811aed0b81ab3f35756824e7cf01c62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 12 May 2025 15:11:35 +0200 Subject: [PATCH 015/231] quick fix format-cmd for CI/CD (#20) --- .github/workflows/format.yml | 1 + .github/workflows/test.yml | 1 + docs.bzl | 4 ++-- pyproject.toml | 4 ++-- src/BUILD | 18 +++++++++++++++--- 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 38188049..5f6c97e4 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -27,4 +27,5 @@ jobs: uses: bazel-contrib/setup-bazel@0.9.1 - name: Run formatting checks run: | + bazel run //src:ide_support bazel test //src:format.check diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index be022518..2e7b1a06 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -23,4 +23,5 @@ jobs: uses: actions/checkout@v4.2.2 - name: Run test targets run: | + bazel run //src:ide_support bazel test ... diff --git a/docs.bzl b/docs.bzl index 9b6e181c..c938090c 100644 --- a/docs.bzl +++ b/docs.bzl @@ -38,11 +38,11 @@ # For user-facing documentation, refer to `/README.md`. load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") -load("@score_docs_as_code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") load("@pip_process//:requirements.bzl", "all_requirements", "requirement") load("@rules_java//java:java_binary.bzl", "java_binary") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") +load("@score_docs_as_code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") load("@score_python_basics//:defs.bzl", "score_virtualenv") sphinx_requirements = all_requirements + [ @@ -158,7 +158,7 @@ def _ide_support(): reqs = sphinx_requirements, ) -def _docs(name = "docs", format = "html", external_needs_deps = list(), external_needs_def = dict()): +def _docs(name = "docs", format = "html", external_needs_deps = list(), external_needs_def = list()): ext_needs_arg = "--define=external_needs_source=" + json.encode(external_needs_def) sphinx_docs( diff --git a/pyproject.toml b/pyproject.toml index 4275d36f..e6e9b40d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # This file is at the root level, as it applies to all Python code, # not only to docs or to tools. [tool.pyright] -extends = "bazel-bin/examples/simple/ide_support.runfiles/score_python_basics~/pyproject.toml" +extends = "bazel-bin/src/ide_support.runfiles/score_python_basics~/pyproject.toml" exclude = [ "**/__pycache__", @@ -11,7 +11,7 @@ exclude = [ ] [tool.ruff] -extend = "bazel-bin/examples/simple/ide_support.runfiles/score_python_basics~/pyproject.toml" +extend = "bazel-bin/src/ide_support.runfiles/score_python_basics~/pyproject.toml" extend-exclude = [ "**/__pycache__", diff --git a/src/BUILD b/src/BUILD index 6f2aaf88..3a13a60f 100644 --- a/src/BUILD +++ b/src/BUILD @@ -10,7 +10,6 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* - load("@aspect_rules_lint//format:defs.bzl", "format_multirun", "format_test") load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements", "requirement") @@ -19,7 +18,20 @@ load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//python:pip.bzl", "compile_pip_requirements") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary") load("@score_dash_license_checker//:dash.bzl", "dash_license_checker") - +load("@score_python_basics//:defs.bzl", "score_virtualenv") + +score_virtualenv( + reqs = [ + "@score_docs_as_code//src:plantuml_for_python", + "@score_docs_as_code//src/extensions:score_plantuml", + "@score_docs_as_code//src/find_runfiles:find_runfiles", + "@score_docs_as_code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", + "@score_docs_as_code//src/extensions/score_header_service:score_header_service", + "@score_docs_as_code//src/extensions/score_layout:score_layout", + "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", + "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", + ], +) # These are only exported because they're passed as files to the //docs.bzl # macros, and thus must be visible to other packages. They should only be # referenced by the //docs.bzl macros. @@ -135,7 +147,7 @@ filegroup( dash_license_checker( src = ":requirements_lock", - file_type = "requirements", # let it auto-detect based on project_config + file_type = "requirements", visibility = ["//visibility:public"], ) From 81e2916726f60b99bf28aac9dac63c9aa2048f73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 12 May 2025 15:52:16 +0200 Subject: [PATCH 016/231] always export needs.json & bugfixes (#21) --- .github/workflows/license_check.yml | 1 + MODULE.bazel | 2 +- src/BUILD | 2 +- src/extensions/score_metamodel/__init__.py | 6 ++++++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/license_check.yml b/.github/workflows/license_check.yml index aba7f99d..51a9ffdd 100644 --- a/.github/workflows/license_check.yml +++ b/.github/workflows/license_check.yml @@ -28,5 +28,6 @@ jobs: uses: eclipse-score/cicd-workflows/.github/workflows/license-check.yml@main with: repo-url: "${{ github.server_url }}/${{ github.repository }}" + bazel-target: "//src:license-check" secrets: dash-api-token: ${{ secrets.ECLIPSE_GITLAB_API_TOKEN }} diff --git a/MODULE.bazel b/MODULE.bazel index 49071554..2b484ffc 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.0", + version = "0.2.1", compatibility_level = 0, ) diff --git a/src/BUILD b/src/BUILD index 3a13a60f..f9ff9160 100644 --- a/src/BUILD +++ b/src/BUILD @@ -30,7 +30,7 @@ score_virtualenv( "@score_docs_as_code//src/extensions/score_layout:score_layout", "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", - ], + ] + all_requirements, ) # These are only exported because they're passed as files to the //docs.bzl # macros, and thus must be visible to other packages. They should only be diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 70519d80..c69e5d6e 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -290,6 +290,12 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.graph_checks = metamodel["needs_graph_check"] app.config.stop_words = metamodel["stop_words"] app.config.weak_words = metamodel["weak_words"] + + # Ensure that 'needs.json' is always build. + app.config.needs_build_json = True + app.config.needs_reproducible_json = True + app.config.needs_json_remove_defaults = True + app.connect("config-inited", parse_external_needs_sources) discover_checks() From 451573486ee939fd5002e42f1f2009845fb53ea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 12 May 2025 17:04:50 +0200 Subject: [PATCH 017/231] docs with external link (#22) --- .github/workflows/license_check.yml | 2 +- docs/BUILD | 60 ++++++++++++++++++++++ docs/conf.py | 54 +++++++++++++++++++ docs/index.rst | 29 +++++++++++ src/extensions/score_metamodel/__init__.py | 5 +- 5 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 docs/BUILD create mode 100644 docs/conf.py create mode 100644 docs/index.rst diff --git a/.github/workflows/license_check.yml b/.github/workflows/license_check.yml index 51a9ffdd..7d80eecd 100644 --- a/.github/workflows/license_check.yml +++ b/.github/workflows/license_check.yml @@ -28,6 +28,6 @@ jobs: uses: eclipse-score/cicd-workflows/.github/workflows/license-check.yml@main with: repo-url: "${{ github.server_url }}/${{ github.repository }}" - bazel-target: "//src:license-check" + bazel-target: "run //src:license-check" secrets: dash-api-token: ${{ secrets.ECLIPSE_GITLAB_API_TOKEN }} diff --git a/docs/BUILD b/docs/BUILD new file mode 100644 index 00000000..0c2ce22a --- /dev/null +++ b/docs/BUILD @@ -0,0 +1,60 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("//:docs.bzl", "docs") + +# Creates all documentation targets: +# - `docs:incremental` for building docs incrementally at runtime +# - `docs:live_preview` for live preview in the browser without an IDE +# - `docs:ide_support` for creating python virtualenv for IDE support +# - `docs:docs` for building documentation at build-time + +docs( + conf_dir = "docs", + docs_targets = [ + { + "suffix": "latest", # latest main branch documentation build + "external_needs_info": [ + { + "base_url": "https://eclipse-score.github.io/score/main/", + "json_url": "https://eclipse-score.github.io/score/main/needs.json", + "version": "0.1", + "id_prefix": "score_", + }, + ], + }, + ], + source_dir = "docs", + source_files_to_scan_for_needs_links = [ + # Note: you can add filegroups, globs, or entire targets here. + "//src:score_extension_files", + ], +) + +# ╭───────────────────────────────────────╮ +# │ This is commented out until local │ +# │ multi-repo testing is implemented │ +# ╰───────────────────────────────────────╯ + +# { +# "suffix": "release", # The version imported from MODULE.bazel +# "target": ["@score_platform//docs:docs"], +# "external_needs_info": [ +# { +# "base_url": "https://eclipse-score.github.io/score/pr-980/", +# "json_path": "/score_platform~/docs/docs/_build/html/needs.json", +# "version": "0.1", +# }, +# ], +# }, diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..fbef18a8 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,54 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "Simple Example Project" +author = "S-CORE" +version = "0.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + + +extensions = [ + "sphinx_design", + "sphinx_needs", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] + +exclude_patterns = [ + # The following entries are not required when building the documentation via 'bazel + # build //docs:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # entries are required to prevent the build from failing. + "bazel-*", + ".venv_docs", +] + +templates_path = ["templates"] + +# Enable numref +numfig = True diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..b23acaa2 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,29 @@ +.. + # ******************************************************************************* + # Copyright (c) 2024 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +Hello World +================= +This is a simple example of a documentation page using the `docs` tool. + +.. stkh_req:: TestTitle + :id: stkh_req__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this + This is a link to an external need inside the 'score' documentation + :need:`SCORE_gd_req__req__attr_safety` + diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index c69e5d6e..2e8321ea 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -77,7 +77,10 @@ def _run_checks(app: Sphinx, exception: Exception | None) -> None: if exception: return - needs_all_needs = SphinxNeedsData(app.env).get_needs_view() + # Filter out external needs, as checks are only intended to be run on internal needs. + needs_all_needs = ( + SphinxNeedsData(app.env).get_needs_view().filter_is_external(False) + ) logger.debug(f"Running checks for {len(needs_all_needs)} needs") From d45244df0268b145bf6d8764eef797de2a50f6f0 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Tue, 13 May 2025 08:59:01 +0200 Subject: [PATCH 018/231] docs-as-code: Remove gitlint non needed action related file (#19) by: Aymen Soussi aymen.soussi@expleogroup.com --- .github/actions/gitlint/action.yml | 42 ------------------------------ .github/workflows/copyright.yml | 2 +- .github/workflows/format.yml | 2 +- 3 files changed, 2 insertions(+), 44 deletions(-) delete mode 100644 .github/actions/gitlint/action.yml diff --git a/.github/actions/gitlint/action.yml b/.github/actions/gitlint/action.yml deleted file mode 100644 index 418c504e..00000000 --- a/.github/actions/gitlint/action.yml +++ /dev/null @@ -1,42 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -name: "Gitlint Action" -description: "An action to install and run Gitlint on PR commits" -inputs: - pr-number: - description: "Pull Request number used to fetch commits" - required: true - base-branch: - description: "Base branch to compare commits against (default: main)" - default: "main" - required: false -runs: - using: "docker" - image: "jorisroovers/gitlint:0.19.1" - entrypoint: /bin/sh - args: - - -c - - | - git config --global --add safe.directory /github/workspace && \ - git fetch origin +refs/heads/${{ inputs.base-branch }}:refs/remotes/origin/${{ inputs.base-branch }} && \ - git fetch origin +refs/pull/${{ inputs.pr-number }}/head && \ - if ! gitlint --commits origin/${{ inputs.base-branch }}..HEAD; then \ - echo -e "\nWARNING: Your commit message does not follow the required format." && \ - echo "Formatting rules: https://eclipse-score.github.io/score/main/contribute/general/git.html" && \ - echo -e "To fix your commit message, run:\n" && \ - echo " git commit --amend" && \ - echo "Then update your commit (fix gitlint warnings). Finally, force-push:" && \ - echo " git push --force-with-lease" && \ - exit 1; \ - fi diff --git a/.github/workflows/copyright.yml b/.github/workflows/copyright.yml index 08ef3767..3114bce4 100644 --- a/.github/workflows/copyright.yml +++ b/.github/workflows/copyright.yml @@ -1,5 +1,5 @@ # ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation +# Copyright (c) 2025 Contributors to the Eclipse Foundation # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 5f6c97e4..b3b0b477 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,5 +1,5 @@ # ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation +# Copyright (c) 2025 Contributors to the Eclipse Foundation # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. From ba3147b32db486a8c73c0f7c35244a88b7e4bac9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 13 May 2025 16:47:28 +0200 Subject: [PATCH 019/231] Fixed source_code_linker finding external needs (#23) * Fixed source_code_linker finding external needs * Added explaining comments Added simple loop search logic to try all available prefixes per id. --- BUILD | 2 + docs/BUILD | 2 +- src/BUILD | 23 +++++++++- src/extensions/score_metamodel/BUILD | 2 +- .../score_source_code_linker/__init__.py | 45 ++++++++++++++----- 5 files changed, 59 insertions(+), 15 deletions(-) diff --git a/BUILD b/BUILD index 22c08c79..951bdbf3 100644 --- a/BUILD +++ b/BUILD @@ -13,6 +13,8 @@ load("@score_cr_checker//:cr_checker.bzl", "copyright_checker") +package(default_visibility = ["//visibility:public"]) + copyright_checker( name = "copyright", srcs = [ diff --git a/docs/BUILD b/docs/BUILD index 0c2ce22a..a3b0fa5b 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -27,7 +27,7 @@ docs( "suffix": "latest", # latest main branch documentation build "external_needs_info": [ { - "base_url": "https://eclipse-score.github.io/score/main/", + "base_url": "https://eclipse-score.github.io/score/main", "json_url": "https://eclipse-score.github.io/score/main/needs.json", "version": "0.1", "id_prefix": "score_", diff --git a/src/BUILD b/src/BUILD index f9ff9160..ecc9d08b 100644 --- a/src/BUILD +++ b/src/BUILD @@ -101,16 +101,35 @@ pkg_tar( srcs = [":html_files"], ) +# 'source_code_linker' needs all targets to be passed to it. +# This is a convenient gathering of all the 'python internal modules' to avoid writing them individiually +py_library( + name = "docs_as_code_py_modules", + srcs = [ + "@score_docs_as_code//src:plantuml_for_python", + "@score_docs_as_code//src/extensions:score_plantuml", + "@score_docs_as_code//src/extensions/score_draw_uml_funcs", + "@score_docs_as_code//src/extensions/score_header_service", + "@score_docs_as_code//src/extensions/score_layout", + "@score_docs_as_code//src/extensions/score_metamodel", + "@score_docs_as_code//src/extensions/score_source_code_linker", + "@score_docs_as_code//src/find_runfiles", + ], + visibility = ["//visibility:public"], +) + filegroup( name = "score_extension_files", srcs = glob( - ["*/**"], + [ + "**", + ], exclude = [ "**/test/**", "**/tests/**", "**/__pycache__/**", ], - ), + ) + [":docs_as_code_py_modules"], visibility = ["//visibility:public"], ) diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index f180e555..c2c415df 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -18,7 +18,7 @@ py_library( name = "score_metamodel", srcs = glob( ["**/*.py"], - ), + ) + ["metamodel.yaml"], data = glob(["*.yaml"]), # Needed to remove 'resolving of symlink' in score_metamodel.__init__ imports = [ ".", diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index adff9c5a..fac87c12 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -16,7 +16,7 @@ from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment -from sphinx_needs.data import SphinxNeedsData +from sphinx_needs.data import NeedsMutable, SphinxNeedsData, NeedsInfoType from sphinx_needs.logging import get_logger from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL @@ -59,6 +59,25 @@ def setup(app: Sphinx) -> dict[str, str | bool]: } +def find_need( + all_needs: NeedsMutable, id: str, prefixes: list[str] +) -> NeedsInfoType | None: + """ + Checks all possible external 'prefixes' for an ID + So that the linker can add the link to the correct NeedsInfoType object. + """ + if id in all_needs: + return all_needs[id] + + # Try all possible prefixes + for prefix in prefixes: + prefixed_id = f"{prefix}{id}" + if prefixed_id in all_needs: + return all_needs[prefixed_id] + + return None + + # re-qid: gd_req__req__attr_impl def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: """ @@ -77,28 +96,25 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: p5 = Path(__file__).parents[5] if str(p5).endswith("src"): - LOGGER.info("DEBUG: WE ARE IN THE IF") + LOGGER.debug("DEBUG: WE ARE IN THE IF") path = str(p5.parent / Path(app.confdir).name / "score_source_code_parser.json") else: - LOGGER.info("DEBUG: WE ARE IN THE ELSE") + LOGGER.debug("DEBUG: WE ARE IN THE ELSE") path = str(p5 / "score_source_code_parser.json") if app.config.score_source_code_linker_file_overwrite: path = app.config.score_source_code_linker_file_overwrite + # For some reason the prefix 'sphinx_needs internally' is CAPSLOCKED. + # So we have to make sure we uppercase the prefixes + prefixes = [x["id_prefix"].upper() for x in app.config.needs_external_needs] try: with open(path) as f: gh_json = json.load(f) for id, link in gh_json.items(): id = id.strip() - try: - # NOTE: Removing & adding the need is important to make sure - # the needs gets 're-evaluated'. - need = needs_copy[id] # NeedsInfoType - Needs_Data.remove_need(need["id"]) - need["source_code_link"] = ",".join(link) - Needs_Data.add_need(need) - except KeyError: + need = find_need(needs_copy, id, prefixes) + if need is None: # NOTE: manipulating link to remove git-hash, # making the output file location more readable files = [x.replace(GITHUB_BASE_URL, "").split("/", 1)[-1] for x in link] @@ -107,6 +123,13 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: + f"Found in file(s): {files}", type="score_source_code_linker", ) + continue + + # NOTE: Removing & adding the need is important to make sure + # the needs gets 're-evaluated'. + Needs_Data.remove_need(need["id"]) + need["source_code_link"] = ",".join(link) + Needs_Data.add_need(need) except Exception as e: LOGGER.warning( f"An unexpected error occurred while adding source_code_links to needs." From 0b9ad5033467dc53226c173eb2acdb43fd06e495 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20H=C3=B6nle?= Date: Fri, 16 May 2025 09:05:08 +0200 Subject: [PATCH 020/231] docs: include inc files (#26) Include inc files in doc build --- docs.bzl | 1 + 1 file changed, 1 insertion(+) diff --git a/docs.bzl b/docs.bzl index c938090c..00083233 100644 --- a/docs.bzl +++ b/docs.bzl @@ -176,6 +176,7 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external "**/*.yaml", "**/*.json", "**/*.csv", + "**/*.inc", ], exclude = ["**/tests/*"]), config = ":conf.py", extra_opts = [ From 2bc27b4c06f5bca84e19b6c4d0b486334400936d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 16 May 2025 09:18:38 +0200 Subject: [PATCH 021/231] Update Module version (#30) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Maximilian Sören Pollak --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 2b484ffc..58f6b352 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.1", + version = "0.2.2", compatibility_level = 0, ) From bd979d41848ab7644e6649b4e88daf441580b2f3 Mon Sep 17 00:00:00 2001 From: Dan Calavrezo Date: Tue, 20 May 2025 19:28:07 +0300 Subject: [PATCH 022/231] docs: extened docs target (#34) - Added target for building Github archive - Take the input files an put them in the archive as-is. No renaming. - Use empty lists for now, as we are still not sure how to use or adapt the source code linker in multirepo Addresses: #16 --------- Signed-off-by: Dan Calavrezo --- README.md | 5 +++++ docs.bzl | 27 +++++++++++++++++++++++++++ docs/BUILD | 5 +---- examples/simple/BUILD | 5 +---- 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index a55c9192..261eb3a3 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,12 @@ Using the `docs` macro enables multiple targets which are now useable. | incremental | Builds documentation incrementally (faster) | `bazel run` | | live_preview | Creates a live_preview of the documentation viewable in a local server | `bazel run` | | ide_support | Creates virtual environment under '.venv_docs' | `bazel run` | +| `html` | Filegroup that exposes the generated HTML files | `bazel build //docs:html` | +| `html_files` | Prepares a flattened version of the HTML output for packaging | `bazel build //docs:html_files` | +| `github_pages` | Creates a `.tar` archive from the HTML output (ready for deployment) | `bazel build //docs:github_pages` | + +> For each entry in `docs_targets`, these targets are suffixed accordingly (e.g. `docs_api`, `html_api`, `github_pages_api`). ______________________________________________________________________ ## Configuration Options diff --git a/docs.bzl b/docs.bzl index 00083233..a7cf6d1e 100644 --- a/docs.bzl +++ b/docs.bzl @@ -40,6 +40,8 @@ load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements", "requirement") load("@rules_java//java:java_binary.bzl", "java_binary") +load("@rules_pkg//pkg:mappings.bzl", "pkg_files") +load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_docs_as_code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") @@ -161,6 +163,9 @@ def _ide_support(): def _docs(name = "docs", format = "html", external_needs_deps = list(), external_needs_def = list()): ext_needs_arg = "--define=external_needs_source=" + json.encode(external_needs_def) + # Clean suffix used in all generated target names + target_suffix = "" if name == "docs" else "_" + name[len("docs"):] + sphinx_docs( name = name, srcs = native.glob([ @@ -197,3 +202,25 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external ] + external_needs_deps, visibility = ["//visibility:public"], ) + + native.filegroup( + name = "assets" + target_suffix, + srcs = native.glob(["_assets/**"]), + visibility = ["//visibility:public"], + ) + + native.filegroup( + name = "html" + target_suffix, + srcs = [":" + name], + visibility = ["//visibility:public"], + ) + + pkg_files( + name = "html_files" + target_suffix, + srcs = [":html" + target_suffix], + ) + + pkg_tar( + name = "github_pages" + target_suffix, + srcs = [":html_files" + target_suffix], + ) diff --git a/docs/BUILD b/docs/BUILD index a3b0fa5b..8b8ab40d 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -36,10 +36,7 @@ docs( }, ], source_dir = "docs", - source_files_to_scan_for_needs_links = [ - # Note: you can add filegroups, globs, or entire targets here. - "//src:score_extension_files", - ], + source_files_to_scan_for_needs_links = [], ) # ╭───────────────────────────────────────╮ diff --git a/examples/simple/BUILD b/examples/simple/BUILD index 74f1efd5..651a91cc 100644 --- a/examples/simple/BUILD +++ b/examples/simple/BUILD @@ -28,10 +28,7 @@ docs( }, ], source_dir = "examples/simple", - source_files_to_scan_for_needs_links = [ - # Note: you can add filegroups, globs, or entire targets here. - "//src:score_extension_files", - ], + source_files_to_scan_for_needs_links = [], ) # ╭───────────────────────────────────────╮ From 04e561ea987298de47fc3de4986e5b0033885b5f Mon Sep 17 00:00:00 2001 From: Dan Calavrezo Date: Wed, 21 May 2025 10:13:33 +0300 Subject: [PATCH 023/231] docs: updated version (#36) updated version for release Addresses: #16 Signed-off-by: Dan Calavrezo --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 58f6b352..adbcf232 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.2", + version = "0.2.3", compatibility_level = 0, ) From 7ed2c2abe2da7db4812659e637baf4b8c6a9dc55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 21 May 2025 11:22:06 +0200 Subject: [PATCH 024/231] Fixed wrong check activation (#37) * Fixed wrong check activation * Fixed test & rst files Test and rst files needed fixing to comply with new check rules Check found False positives due to 'process' being moved and loosing it's prefix. --- docs/index.rst | 2 +- examples/simple/index.rst | 3 +- .../checks/attributes_format.py | 31 ++++++++++++++++--- .../test_attributes_format_id_format.rst | 8 ++--- 4 files changed, 34 insertions(+), 10 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index b23acaa2..9193ffaa 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,7 +17,7 @@ Hello World This is a simple example of a documentation page using the `docs` tool. .. stkh_req:: TestTitle - :id: stkh_req__test_requirement + :id: stkh_req__docs__test_requirement :status: valid :safety: QM :rationale: A simple requirement we need to enable a documentation build diff --git a/examples/simple/index.rst b/examples/simple/index.rst index e3c47208..7856e574 100644 --- a/examples/simple/index.rst +++ b/examples/simple/index.rst @@ -17,7 +17,7 @@ Hello World This is a simple example of a documentation page using the `docs` tool. .. stkh_req:: TestTitle - :id: stkh_req__test_requirement + :id: stkh_req__docs__test_requirement :status: valid :safety: QM :rationale: A simple requirement we need to enable a documentation build @@ -26,6 +26,7 @@ This is a simple example of a documentation page using the `docs` tool. Some content to make sure we also can render this + .. .. std_req:: External Link Test Req .. :id: std_req__iso26262__testing .. :status: valid diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index d5d75efe..a0017406 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -25,12 +25,35 @@ def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): the requirement id or not. --- """ + # These folders are taken from 'https://github.com/eclipse-score/process_description/tree/main/process' + # This means, any needs within any of these folders (no matter where they are) will not be required to have 3 parts + process_folder_names = [ + "general_concepts", + "introduction", + "process_areas", + "roles", + "standards", + "workflows", + "workproducts", + "process", + ] # Split the string by underscores parts = need["id"].split("__") - - if need["id"].startswith( - ("gd_", "wf__", "wp__", "rl__", "stkh_req__", "tool_req__", "doc__") - ) or ("process/" in str(need.get("docname", ""))): + if need["type"] in [ + "std_wp", + "document", # This is used in 'platform_managment' in score. + "gd_guidl", + "workflow", + "gd_chklst", + "std_req", + "role", + "doc_concept", + "gd_temp", + "gd_method", + "gd_req", + "workproduct", + "doc_getstrt", + ] or any(prefix in str(need.get("docname", "")) for prefix in process_folder_names): if len(parts) != 2 and len(parts) != 3: msg = ( "expected to consisting of one of these 2 formats:" diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst index 07648c85..3c8025c5 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst @@ -14,16 +14,16 @@ #CHECK: check_id_format .. Id does not consists of 3 parts -#EXPECT: std_wp__test__test__abcd.id (std_wp__test__test__abcd): expected to consisting of this format: `____`. +#EXPECT: stk_req__test.id (stk_req__test): expected to consisting of this format: `____`. -.. std_wp:: This is a test - :id: std_wp__test__test__abcd +.. stkh_req:: This is a test + :id: stk_req__test .. Id follows pattern #EXPECT-NOT: expected to consisting of this format: `____`. .. std_wp:: This is a test - :id: std_wp__test__abce + :id: std_wp__test__test__abcd .. Id starts with wp and number of parth is neither 2 nor 3 #EXPECT: wp__test__test__abcd.id (wp__test__test__abcd): expected to consisting of one of these 2 formats:`__` or `____`. From 5af8620faaefd1948cd02dca86c4e24b5f75e31b Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Wed, 21 May 2025 12:07:35 +0200 Subject: [PATCH 025/231] Incremental cleanup (#39) Cleaned up unused imports and small fix inside the README Also-by: Aymen Soussi aymen.soussi@expleogroup.com --- README.md | 4 ++-- src/extensions/score_metamodel/tests/rst/conf.py | 2 +- .../score_metamodel/tests/test_rules_file_based.py | 5 ----- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 261eb3a3..55116af9 100644 --- a/README.md +++ b/README.md @@ -81,8 +81,8 @@ For a full example look at [a simple example](examples/simple) #### 3. Run a documentation build: ```bash -bazel run //path/to/BUILD-file:incremental # documentation at '_build/' -bazel build //path/to/BUILD-file:docs # documentation at 'bazel-bin/ +bazel run //path/to/BUILD-file:incremental_latest # documentation at '_build/' +bazel build //path/to/BUILD-file:docs_latest # documentation at 'bazel-bin/ ``` #### 4. Access your documentation at diff --git a/src/extensions/score_metamodel/tests/rst/conf.py b/src/extensions/score_metamodel/tests/rst/conf.py index 684c9cee..4630b7fa 100644 --- a/src/extensions/score_metamodel/tests/rst/conf.py +++ b/src/extensions/score_metamodel/tests/rst/conf.py @@ -1,5 +1,5 @@ # ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation +# Copyright (c) 2025 Contributors to the Eclipse Foundation # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index fa640229..f875e172 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -20,11 +20,6 @@ import pytest from sphinx.testing.util import SphinxTestApp -from src.extensions.score_metamodel import ( - graph_check_function, - local_check_function, -) - RST_DIR = Path(__file__).absolute().parent / "rst" DOCS_DIR = Path(__file__).absolute().parent.parent.parent.parent.parent TOOLING_DIR_NAME = "src" From 7e6a388d1bfc1f922db6dc071deaaade80449672 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 21 May 2025 13:37:37 +0200 Subject: [PATCH 026/231] Increase versioning (#38) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index adbcf232..86de00df 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.3", + version = "0.2.4", compatibility_level = 0, ) From ba387d7b90fafa0064b265006a01b966a6d254b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 22 May 2025 21:41:14 +0200 Subject: [PATCH 027/231] Adding further examples & bugfixes (#32) - Add multiple 'uni-direcional' docs build examples - Bugfix 'docs_needs' build target in Module imports - Patch some spelling mistakes - Delete 'docs'. It has been moved to examples/simple already. --- Addresses a bug that caused the 'docs_needs' import via a Module (like in the linking-release example) to not be executed correctly, as it would be missing the dependencies and missing the 'sphinx_build binary' --- MODULE.bazel | 7 +- README.md | 38 +++++----- docs.bzl | 31 ++++---- examples/README.md | 70 +++++++++++++++++++ examples/linking-both/BUILD | 54 ++++++++++++++ {docs => examples/linking-both}/conf.py | 2 +- {docs => examples/linking-both}/index.rst | 5 +- examples/linking-latest/BUILD | 42 +++++++++++ examples/linking-latest/conf.py | 54 ++++++++++++++ examples/linking-latest/index.rst | 30 ++++++++ {docs => examples/linking-release}/BUILD | 30 ++++---- examples/linking-release/conf.py | 54 ++++++++++++++ examples/linking-release/index.rst | 30 ++++++++ src/BUILD | 2 +- src/extensions/README.md | 2 +- src/extensions/score_header_service/README.md | 4 +- src/extensions/score_metamodel/README.md | 4 +- src/extensions/score_metamodel/__init__.py | 1 + .../score_source_code_linker/README.md | 2 +- 19 files changed, 400 insertions(+), 62 deletions(-) create mode 100644 examples/README.md create mode 100644 examples/linking-both/BUILD rename {docs => examples/linking-both}/conf.py (96%) rename {docs => examples/linking-both}/index.rst (80%) create mode 100644 examples/linking-latest/BUILD create mode 100644 examples/linking-latest/conf.py create mode 100644 examples/linking-latest/index.rst rename {docs => examples/linking-release}/BUILD (72%) create mode 100644 examples/linking-release/conf.py create mode 100644 examples/linking-release/index.rst diff --git a/MODULE.bazel b/MODULE.bazel index 86de00df..5d3c5a77 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -29,7 +29,7 @@ bazel_dep(name = "rules_pkg", version = "1.0.1") # Python version # ############################################################################### -bazel_dep(name = "rules_python", version = "1.0.0") +bazel_dep(name = "rules_python", version = "1.4.1") PYTHON_VERSION = "3.12" @@ -86,10 +86,13 @@ http_file( ) # Provides, pytest & venv -bazel_dep(name = "score_python_basics", version = "0.3.0") +bazel_dep(name = "score_python_basics", version = "0.3.1") # Checker rule for CopyRight checks/fixes bazel_dep(name = "score_cr_checker", version = "0.2.2") +# This is only needed to build the examples. +bazel_dep(name = "score_platform", version = "0.1.0") + # Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") diff --git a/README.md b/README.md index 55116af9..e9fb9ec6 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# Bazel Sphinx Documentation Builder +# Score-Docs-As-Code Module -A Bazel module providing comprehensive tools and extensions for building Sphinx documentation within Bazel projects. +A Bazel module providing tools and extensions to enable and simplify documentation building via Sphinx ## Overview -This module allows you to easily integrate Sphinx documentation generation into your Bazel build system. It provides a collection of utilities, extensions, and themes specifically designed to enhance documentation capabilities while maintaining Bazel's reproducible build environment. +This module allows you to easily integrate Sphinx documentation generation into your Bazel build system. It provides a collection of utilities and extensions specifically designed to enhance documentation capabilities. ## Features @@ -23,10 +23,10 @@ This module allows you to easily integrate Sphinx documentation generation into Add the module to your `MODULE.bazel` file: ```starlark -bazel_dep(name = "score_docs_as_code", version = "0.1.0") +bazel_dep(name = "score_docs_as_code", version = "0.2.5") ``` -And make sure to also add the S-core bazel registry to your `.bazelrc` file +And make sure to also add the S-core Bazel registry to your `.bazelrc` file ```starlark common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ @@ -47,12 +47,12 @@ docs( source_dir = "", docs_targets = [ { - # For more detailed explenation look at the 'docs_targets' section + # For more detailed explanation look at the 'docs_targets' section "suffix": "", # This creates the normal 'incremental' and 'docs' target }, ], source_files_to_scan_for_needs_links = [ - # Note: you can add filegroups, globs, or entire targets here. + # Note: you can add file groups, globs, or entire targets here. "" ], ) @@ -75,8 +75,8 @@ extensions = [ # ... ``` -Make sure that your conf.py imports all of the extensions you want to enable.\ -For a full example look at [a simple example](examples/simple) +Make sure that your conf.py imports all of the extensions you want to enable. + #### 3. Run a documentation build: @@ -90,7 +90,12 @@ bazel build //path/to/BUILD-file:docs_latest # documentation at 'bazel-bin/ - `_build/` for incremental - `bazel-bin/bazel-bin//docs/_build/html` -______________________________________________________________________ +
+
+ +> ### *For the full example as well as more complex ones, check out the [examples directory](examples/)* + +--- ### Available Targets @@ -123,17 +128,10 @@ The `docs()` macro accepts the following arguments: | `source_files_to_scan_for_needs_links` | List of targets,globs,filegroups that the 'source_code_linker' should parse | No | `[]` | | `visibility` | Bazel visibility | No | `None` | -## Advanced Usage - -### Custom Configuration - -#### Docs-targets - -!! TODO !! -This should be filled out after the local mutli-repo tests are integrated and we have examples of different configurations +--- +--- ## Available Extensions - This module includes several custom Sphinx extensions to enhance your documentation: ### Score Layout Extension @@ -148,7 +146,7 @@ Consistent header styling across documentation pages. ### Score Metamodel -Validation and checking of documentation structure against a defined metamodel. +Validation and checking of documentation structure against a defined Metamodel. [Learn more](src/extensions/score_metamodel/README.md) ### Score Source Code Linker diff --git a/docs.bzl b/docs.bzl index a7cf6d1e..d65919d2 100644 --- a/docs.bzl +++ b/docs.bzl @@ -66,13 +66,6 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ * only callable from 'docs/BUILD' """ - sphinx_build_binary( - name = "sphinx_build", - visibility = ["//visibility:public"], - data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:score_extension_files"], - deps = sphinx_requirements, - ) - # Parse source files for needs links # This needs to be created to generate a target, otherwise it won't execute as dependency for other macros parse_source_files_for_needs_links( @@ -86,6 +79,13 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ suffix = "_" + target["suffix"] if target["suffix"] else "" external_needs_deps = target.get("target", []) external_needs_def = target.get("external_needs_info", []) + + sphinx_build_binary( + name = "sphinx_build" + suffix, + visibility = ["//visibility:public"], + data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:score_extension_files"] + external_needs_deps, + deps = sphinx_requirements, + ) _incremental( incremental_name = "incremental" + suffix, live_name = "live_preview" + suffix, @@ -97,10 +97,18 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ ) _docs( name = "docs" + suffix, + suffix = suffix, format = "html", external_needs_deps = external_needs_deps, external_needs_def = external_needs_def, ) + _docs( + name = "docs_needs" + suffix, + suffix = suffix, + format = "needs", + external_needs_deps = external_needs_deps, + external_needs_def = external_needs_def, + ) # Virtual python environment for working on the documentation (esbonio). # incl. python support when working on conf.py and sphinx extensions. @@ -108,7 +116,6 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ _ide_support() # creates 'needs.json' build target - _docs(name = "docs_needs", format = "needs") def _incremental(incremental_name = "incremental", live_name = "live_preview", source_dir = "docs", conf_dir = "docs", build_dir = "_build", extra_dependencies = list(), external_needs_deps = list(), external_needs_def = None): """ @@ -129,7 +136,7 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s srcs = ["@score_docs_as_code//src:incremental.py"], deps = dependencies, # TODO: Figure out if we need all dependencies as data here or not. - data = [":score_source_code_parser", "@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies, + data = [":score_source_code_parser", "@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies + external_needs_deps, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -143,7 +150,7 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s name = live_name, srcs = ["@score_docs_as_code//src:incremental.py"], deps = dependencies, - data = ["@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies, + data = ["@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies + external_needs_deps, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -160,7 +167,7 @@ def _ide_support(): reqs = sphinx_requirements, ) -def _docs(name = "docs", format = "html", external_needs_deps = list(), external_needs_def = list()): +def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = list(), external_needs_def = list()): ext_needs_arg = "--define=external_needs_source=" + json.encode(external_needs_def) # Clean suffix used in all generated target names @@ -191,7 +198,7 @@ def _docs(name = "docs", format = "html", external_needs_deps = list(), external formats = [ format, ], - sphinx = ":sphinx_build", + sphinx = ":sphinx_build" + suffix, tags = [ "manual", ], diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..f444bd2c --- /dev/null +++ b/examples/README.md @@ -0,0 +1,70 @@ +## Examples + +These examples show how to use the 'docs' macro in order to build without outgoing links, establish links to a latest (main branch) or a (module) release. + + +| Folder | Description | +|-----------|-------------| +| `simple` | Build documentation without links to another sphinx documentations | +| `linking-latest` | Build documentation with links to another documentation via URL | +| `linking-release` | Build documentation with links to another documentation via MODULE import | + +--- +In order to enable linking against an imported Modules needs make sure you have imported it into the MODULE.bazel via +`bazel_dep(...)`. + +Then have a look how the BUILD file is setup, and mimic it with the changes needed for your specific case. +Underneath are some explanations regarding the different key-value pairs and their function. + +Here is a more general overview + +```python +load("@score_docs_as_code//docs.bzl", "docs") + +docs( + conf_dir = "", + source_dir = "", + docs_targets = [ + { + "suffix": "", # 'release' for example + "target": [""], # '@score_platform//docs:docs_needs + "external_needs_info": [ + { + "base_url": "", + "json_path/url": "", # local_path OR a URL + "version": "", + }, + ], + }, + ], + source_files_to_scan_for_needs_links = [ + # Note: you can add file groups, globs, or entire targets here. + "" + ], +) +``` + +`docs_targets` is a list of dictionaries, it accepts the following key-value pairs. + +| Parameter | Description | Required | Default | +|-----------|-------------|----------|---------| +| `suffix` | suffix that gets appended to target definitions. E.g. `release` | yes | '' | +| `target` | Target to be build/executed beforehand in order to build 'needs.json'. E.g. `@score_platform//docs:docs_needs` | No | [] | +| `external_needs_info` | List of dictionaries that contains all available builds | yes | - | +| `base_url` | URL of the documentation that external needs of the following json should point to | Yes | - | +| `json_path\json_url` | A local relative path or URL that points to the needs.json file | yes | '' | +| `id_prefix` | prefix that all exeternal ID's from this needs.json will get. Will be in UPPERCASE | No | '' | + +The `external_needs_info` is based on external needs, which can be explored more in detail [here](https://sphinx-needs.readthedocs.io/en/latest/configuration.html#needs-external-needs) + +--- + +The targets available in the examples are +```python +bazel build //examples/linking-release:docs_release +bazel run //examples/linking-release:incremental_release +bazel run //examples/linking-release:livew_preview_release +``` diff --git a/examples/linking-both/BUILD b/examples/linking-both/BUILD new file mode 100644 index 00000000..5ec53772 --- /dev/null +++ b/examples/linking-both/BUILD @@ -0,0 +1,54 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("//:docs.bzl", "docs") + +# Creates all documentation targets: +# - `docs:incremental` for building docs incrementally at runtime +# - `docs:live_preview` for live preview in the browser without an IDE +# - `docs:ide_support` for creating python virtualenv for IDE support +# - `docs:docs` for building documentation at build-time + +docs( + conf_dir = "examples/linking-both", + docs_targets = [ + { + "suffix": "latest", # latest main branch documentation build + "external_needs_info": [ + { + "base_url": "https://eclipse-score.github.io/score/main", + "json_url": "https://eclipse-score.github.io/score/main/needs.json", + "version": "0.1", + "id_prefix": "score_", + }, + ], + }, + { + "suffix": "release", # The version imported from MODULE.bazel + "target": ["@score_platform//docs:docs_needs"], + "external_needs_info": [ + { + "base_url": "https://eclipse-score.github.io/score/main", + "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", + "version": "0.1", + "id_prefix": "score_", + }, + ], + }, + ], + source_dir = "examples/linking-both", + source_files_to_scan_for_needs_links = [ + "//src:score_extension_files", + ], +) diff --git a/docs/conf.py b/examples/linking-both/conf.py similarity index 96% rename from docs/conf.py rename to examples/linking-both/conf.py index fbef18a8..5862fb81 100644 --- a/docs/conf.py +++ b/examples/linking-both/conf.py @@ -1,5 +1,5 @@ # ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation +# Copyright (c) 2025 Contributors to the Eclipse Foundation # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. diff --git a/docs/index.rst b/examples/linking-both/index.rst similarity index 80% rename from docs/index.rst rename to examples/linking-both/index.rst index 9193ffaa..fa856283 100644 --- a/docs/index.rst +++ b/examples/linking-both/index.rst @@ -24,6 +24,7 @@ This is a simple example of a documentation page using the `docs` tool. :reqtype: Functional Some content to make sure we also can render this - This is a link to an external need inside the 'score' documentation - :need:`SCORE_gd_req__req__attr_safety` + This is a link to an external need inside the 'score' documentation. + :need:`SCORE_gd_req__req__attr_safety`. + Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/examples/linking-latest/BUILD b/examples/linking-latest/BUILD new file mode 100644 index 00000000..f3663c0e --- /dev/null +++ b/examples/linking-latest/BUILD @@ -0,0 +1,42 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("//:docs.bzl", "docs") + +# Creates all documentation targets: +# - `docs:incremental` for building docs incrementally at runtime +# - `docs:live_preview` for live preview in the browser without an IDE +# - `docs:ide_support` for creating python virtualenv for IDE support +# - `docs:docs` for building documentation at build-time + +docs( + conf_dir = "examples/linking-latest", + docs_targets = [ + { + "suffix": "latest", # latest main branch documentation build + "external_needs_info": [ + { + "base_url": "https://eclipse-score.github.io/score/main", + "json_url": "https://eclipse-score.github.io/score/main/needs.json", + "version": "0.1", + "id_prefix": "score_", + }, + ], + }, + ], + source_dir = "examples/linking-latest", + source_files_to_scan_for_needs_links = [ + "//src:score_extension_files", + ], +) diff --git a/examples/linking-latest/conf.py b/examples/linking-latest/conf.py new file mode 100644 index 00000000..5862fb81 --- /dev/null +++ b/examples/linking-latest/conf.py @@ -0,0 +1,54 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "Simple Example Project" +author = "S-CORE" +version = "0.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + + +extensions = [ + "sphinx_design", + "sphinx_needs", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] + +exclude_patterns = [ + # The following entries are not required when building the documentation via 'bazel + # build //docs:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # entries are required to prevent the build from failing. + "bazel-*", + ".venv_docs", +] + +templates_path = ["templates"] + +# Enable numref +numfig = True diff --git a/examples/linking-latest/index.rst b/examples/linking-latest/index.rst new file mode 100644 index 00000000..fa856283 --- /dev/null +++ b/examples/linking-latest/index.rst @@ -0,0 +1,30 @@ +.. + # ******************************************************************************* + # Copyright (c) 2024 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +Hello World +================= +This is a simple example of a documentation page using the `docs` tool. + +.. stkh_req:: TestTitle + :id: stkh_req__docs__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this + This is a link to an external need inside the 'score' documentation. + :need:`SCORE_gd_req__req__attr_safety`. + Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ + diff --git a/docs/BUILD b/examples/linking-release/BUILD similarity index 72% rename from docs/BUILD rename to examples/linking-release/BUILD index 8b8ab40d..5265b8c5 100644 --- a/docs/BUILD +++ b/examples/linking-release/BUILD @@ -11,32 +11,36 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("//:docs.bzl", "docs") - # Creates all documentation targets: # - `docs:incremental` for building docs incrementally at runtime # - `docs:live_preview` for live preview in the browser without an IDE # - `docs:ide_support` for creating python virtualenv for IDE support # - `docs:docs` for building documentation at build-time +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("//:docs.bzl", "docs") + docs( - conf_dir = "docs", + conf_dir = "examples/linking-release", docs_targets = [ { - "suffix": "latest", # latest main branch documentation build + "suffix": "release", # The version imported from MODULE.bazel + "target": ["@score_platform//docs:docs_needs"], "external_needs_info": [ { "base_url": "https://eclipse-score.github.io/score/main", - "json_url": "https://eclipse-score.github.io/score/main/needs.json", + "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", "version": "0.1", "id_prefix": "score_", }, ], }, ], - source_dir = "docs", - source_files_to_scan_for_needs_links = [], + source_dir = "examples/linking-release", + source_files_to_scan_for_needs_links = [ + # Note: you can add filegroups, globs, or entire targets here. + "//src:score_extension_files", + ], ) # ╭───────────────────────────────────────╮ @@ -45,13 +49,3 @@ docs( # ╰───────────────────────────────────────╯ # { -# "suffix": "release", # The version imported from MODULE.bazel -# "target": ["@score_platform//docs:docs"], -# "external_needs_info": [ -# { -# "base_url": "https://eclipse-score.github.io/score/pr-980/", -# "json_path": "/score_platform~/docs/docs/_build/html/needs.json", -# "version": "0.1", -# }, -# ], -# }, diff --git a/examples/linking-release/conf.py b/examples/linking-release/conf.py new file mode 100644 index 00000000..5862fb81 --- /dev/null +++ b/examples/linking-release/conf.py @@ -0,0 +1,54 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "Simple Example Project" +author = "S-CORE" +version = "0.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + + +extensions = [ + "sphinx_design", + "sphinx_needs", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] + +exclude_patterns = [ + # The following entries are not required when building the documentation via 'bazel + # build //docs:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # entries are required to prevent the build from failing. + "bazel-*", + ".venv_docs", +] + +templates_path = ["templates"] + +# Enable numref +numfig = True diff --git a/examples/linking-release/index.rst b/examples/linking-release/index.rst new file mode 100644 index 00000000..d2d30725 --- /dev/null +++ b/examples/linking-release/index.rst @@ -0,0 +1,30 @@ +.. + # ******************************************************************************* + # Copyright (c) 2024 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +Hello World +================= +This is a simple example of a documentation page using the `docs` tool. + +.. stkh_req:: TestTitle + :id: stkh_req__docs__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this + This is a link to an external need inside the 'score' documentation + :need:`SCORE_stkh_req__overall_goals__reuse_of_app_soft` + Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ + diff --git a/src/BUILD b/src/BUILD index ecc9d08b..e0e71ad1 100644 --- a/src/BUILD +++ b/src/BUILD @@ -122,7 +122,7 @@ filegroup( name = "score_extension_files", srcs = glob( [ - "**", + "src/**", ], exclude = [ "**/test/**", diff --git a/src/extensions/README.md b/src/extensions/README.md index e26a9c7a..ff15eb23 100644 --- a/src/extensions/README.md +++ b/src/extensions/README.md @@ -120,7 +120,7 @@ In addition, you can provide anything else that you might need to test your spec For examples on how to use and implement the sphinx testing app, you can check out the [source code linker](docs/_tooling/score_source_code_linker/tests) -Find everything related to testing within bazel and how to add your test suite to it, [see here](/tools/testing/pytest/README.md) +Find everything related to testing within Bazel and how to add your test suite to it, [see here](/tools/testing/pytest/README.md) Also look at already built extensions inside S-CORE. They can be found in their respective folders: - [score_metamodel](/docs/_tooling/extensions/score_metamodel/README.md) diff --git a/src/extensions/score_header_service/README.md b/src/extensions/score_header_service/README.md index a636a8e7..24b6a0c6 100644 --- a/src/extensions/score_header_service/README.md +++ b/src/extensions/score_header_service/README.md @@ -23,12 +23,12 @@ GH_TOKEN: Github access token GITHUB_REF_NAME: Github reference name (/merge) GITHUB_REPOSITORY: Github repository / -## Excecution +## Execution The document generation has to be executed as follows: GH_TOKEN=$GH_TOKEN bazel run //docs:incremental -Sphinx cannot acess the environment variables when started via bazel build. +Sphinx cannot access the environment variables when started via Bazel build. If extraction method **Merge commit info** is used the document generation can be executed as follows: diff --git a/src/extensions/score_metamodel/README.md b/src/extensions/score_metamodel/README.md index 9819963a..4b868da1 100644 --- a/src/extensions/score_metamodel/README.md +++ b/src/extensions/score_metamodel/README.md @@ -1,6 +1,6 @@ # score_metamodel -This extension provides the metamodel and corresponding checks of the SCORE +This extension provides the Metamodel and corresponding checks of the SCORE project as a Sphinx extension. See [../README](../README.md) for more information on why we use extensions. @@ -49,7 +49,7 @@ There are multiple ways to solve this issue, for example via https://github.com/useblocks/sphinx-needs/pull/1248 However we chose to implement a custom warning mechanism, as we needed a -more elaborate solution for our use case anyway. Calling the checks ourselfes +more elaborate solution for our use case anyway. Calling the checks ourselves seems to be the most flexible solution. Technically local checks will be called with a single need, while graph-based diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 2e8321ea..775d6e44 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -104,6 +104,7 @@ def is_check_enabled(check: local_check_function | graph_check_function): # Graph-Based checks: These warnings require a graph of all other needs to # be checked. + needs = list(needs_all_needs.values()) for check in [c for c in graph_checks if is_check_enabled(c)]: logger.debug(f"Running graph check {check} for all needs") diff --git a/src/extensions/score_source_code_linker/README.md b/src/extensions/score_source_code_linker/README.md index 12c72197..2304e38e 100644 --- a/src/extensions/score_source_code_linker/README.md +++ b/src/extensions/score_source_code_linker/README.md @@ -7,7 +7,7 @@ In a second step this intermediary file is parsed during sphinx build. If a requ ## Implementation Components ### Bazel Integration -The extension uses two main components to integrate with bazel: +The extension uses two main components to integrate with Bazel: 1. `collect_source_files` - Processes all files from provided deps From 995616e60f3c30e0888ae654f1229ddfa6fe14e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 22 May 2025 22:22:42 +0200 Subject: [PATCH 028/231] Update version to 0.2.5 (#41) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 5d3c5a77..023e8d53 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.4", + version = "0.2.5", compatibility_level = 0, ) From 503470ca58928c7a954ef6ed4e6e6d6453250ad5 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Mon, 26 May 2025 00:18:58 +0200 Subject: [PATCH 029/231] Integrate renovatebot into docs-as-code (#43) --- .github/workflows/renovate.yml | 31 +++++++++++++++++++++++++++++++ renovate.json | 15 +++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 .github/workflows/renovate.yml create mode 100644 renovate.json diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml new file mode 100644 index 00000000..2c8e5bdb --- /dev/null +++ b/.github/workflows/renovate.yml @@ -0,0 +1,31 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Run renovate + +on: + schedule: + - cron: "0 19 * * *" # Every day at 9:00 PM Europe time + workflow_dispatch: {} # Allow manual runs too + +jobs: + renovate: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@@v4.2.2 + + - name: Run renovate + uses: renovatebot/github-action@v42.0.2 + with: + token: ${{ secrets.RENOVATE_TOKEN }} diff --git a/renovate.json b/renovate.json new file mode 100644 index 00000000..ce318deb --- /dev/null +++ b/renovate.json @@ -0,0 +1,15 @@ +{ + "extends": ["config:base"], + "enabledManagers": ["bazel"], + "regexManagers": [ + { + "fileMatch": ["^MODULE\\.bazel$"], + "matchStrings": [ + "bazel_dep\\(name\\s*=\\s*\"(?[^\"]+)\",\\s*version\\s*=\\s*\"(?[^\"]+)\"" + ], + "datasourceTemplate": "github-releases", + "versioningTemplate": "semver", + "packageNameTemplate": "{{depName}}" + } + ] + } From 9791db5759b249c696a3add048e2988381e22852 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Mon, 26 May 2025 07:57:27 +0200 Subject: [PATCH 030/231] fix typo (#44) Signed-off-by: Alexander Lanin --- .github/workflows/renovate.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml index 2c8e5bdb..ecfa40e4 100644 --- a/.github/workflows/renovate.yml +++ b/.github/workflows/renovate.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@@v4.2.2 + uses: actions/checkout@v4.2.2 - name: Run renovate uses: renovatebot/github-action@v42.0.2 From 14793ad5909bd7110ef22c5eef5d9e9b41ec435b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 26 May 2025 09:05:46 +0200 Subject: [PATCH 031/231] Add bugfix for new module json_encoding quirks (#42) * Add bugfix for new module json_encoding quirks --- MODULE.bazel | 6 +++--- examples/linking-both/BUILD | 2 -- src/extensions/score_metamodel/__init__.py | 6 ++++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 023e8d53..3c7184d5 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.5", + version = "0.2.6", compatibility_level = 0, ) @@ -55,7 +55,7 @@ pip.parse( use_repo(pip, "pip_process") # Additional Python rules provided by aspect, e.g. an improved version of -bazel_dep(name = "aspect_rules_py", version = "1.0.0") +bazel_dep(name = "aspect_rules_py", version = "1.4.0") bazel_dep(name = "buildifier_prebuilt", version = "7.3.1") ############################################################################### @@ -63,7 +63,7 @@ bazel_dep(name = "buildifier_prebuilt", version = "7.3.1") # Generic linting and formatting rules # ############################################################################### -bazel_dep(name = "aspect_rules_lint", version = "1.3.1") +bazel_dep(name = "aspect_rules_lint", version = "1.4.2") # PlantUML for docs bazel_dep(name = "rules_java", version = "8.6.3") diff --git a/examples/linking-both/BUILD b/examples/linking-both/BUILD index 5ec53772..b0863bed 100644 --- a/examples/linking-both/BUILD +++ b/examples/linking-both/BUILD @@ -29,7 +29,6 @@ docs( { "base_url": "https://eclipse-score.github.io/score/main", "json_url": "https://eclipse-score.github.io/score/main/needs.json", - "version": "0.1", "id_prefix": "score_", }, ], @@ -41,7 +40,6 @@ docs( { "base_url": "https://eclipse-score.github.io/score/main", "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", - "version": "0.1", "id_prefix": "score_", }, ], diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 775d6e44..4f998c02 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -266,10 +266,12 @@ def default_options() -> list[str]: def parse_external_needs_sources(app: Sphinx, config): - # HACK: mabye there is a nicer way for this + # HACK: maybe there is a nicer way for this if app.config.external_needs_source not in ["[]", ""]: x = None - x = json.loads(app.config.external_needs_source) + # NOTE: Due to upgrades in modules, encoding changed. Need to clean string in order to read it right again. + clean_str = app.config.external_needs_source.replace('\\"', "") + x = json.loads(clean_str) if r := os.getenv("RUNFILES_DIR"): if x[0].get("json_path", None): for a in x: From bb30a98c6a3325b16e0d5496287321d10c92587e Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Tue, 27 May 2025 10:29:39 +0200 Subject: [PATCH 032/231] fix RenovateBot not detecting updates issue. (#48) --- renovate.json | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/renovate.json b/renovate.json index ce318deb..aee0b0ad 100644 --- a/renovate.json +++ b/renovate.json @@ -1,15 +1,14 @@ { - "extends": ["config:base"], - "enabledManagers": ["bazel"], - "regexManagers": [ - { - "fileMatch": ["^MODULE\\.bazel$"], - "matchStrings": [ - "bazel_dep\\(name\\s*=\\s*\"(?[^\"]+)\",\\s*version\\s*=\\s*\"(?[^\"]+)\"" - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "semver", - "packageNameTemplate": "{{depName}}" - } - ] - } + "extends": ["config:base"], + "regexManagers": [ + { + "fileMatch": ["^MODULE\\.bazel$"], + "matchStrings": [ + "bazel_dep\\(name\\s*=\\s*\"(?[^\"]+)\",\\s*version\\s*=\\s*\"(?[^\"]+)\"" + ], + "datasourceTemplate": "github-releases", + "versioningTemplate": "semver", + "packageNameTemplate": "{{depName}}" + } + ] +} From 3650fcfe55206bd812ffd44659f7030afba07d24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 28 May 2025 13:27:18 +0200 Subject: [PATCH 033/231] Bugfix external_prefix regex check (#50) Remove defined external 'id_prefixes' from to be checked links. Added another example to have an rst file inside a folder as well. Added some explanation to descriptions of needs --- MODULE.bazel | 2 +- examples/linking-both/index.rst | 26 ++++++++++- examples/linking-both/testing/test.rst | 43 +++++++++++++++++++ src/extensions/score_metamodel/__init__.py | 3 ++ .../score_metamodel/checks/check_options.py | 17 +++++++- .../checks/id_contains_feature.py | 8 +++- 6 files changed, 94 insertions(+), 5 deletions(-) create mode 100644 examples/linking-both/testing/test.rst diff --git a/MODULE.bazel b/MODULE.bazel index 3c7184d5..d03e1ed2 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -92,7 +92,7 @@ bazel_dep(name = "score_python_basics", version = "0.3.1") bazel_dep(name = "score_cr_checker", version = "0.2.2") # This is only needed to build the examples. -bazel_dep(name = "score_platform", version = "0.1.0") +bazel_dep(name = "score_platform", version = "0.1.1") # Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") diff --git a/examples/linking-both/index.rst b/examples/linking-both/index.rst index fa856283..ce3bfe96 100644 --- a/examples/linking-both/index.rst +++ b/examples/linking-both/index.rst @@ -12,12 +12,20 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* + +.. toctree:: + :maxdepth: 1 + :glob: + + testing/test + + Hello World ================= This is a simple example of a documentation page using the `docs` tool. .. stkh_req:: TestTitle - :id: stkh_req__docs__test_requirement + :id: stkh_req__index__test_requirement :status: valid :safety: QM :rationale: A simple requirement we need to enable a documentation build @@ -25,6 +33,20 @@ This is a simple example of a documentation page using the `docs` tool. Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_gd_req__req__attr_safety`. + :need:`SCORE_gd_req__req__attr_safety` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ + + +.. feat_req:: Some Title + :id: feat_req__index__some_title + :reqtype: Process + :security: YES + :safety: ASIL_D + :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :status: invalid + + With this requirement we can check if the removal of the prefix is working correctly. + It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + against the allowed defined regex in the metamodel + diff --git a/examples/linking-both/testing/test.rst b/examples/linking-both/testing/test.rst new file mode 100644 index 00000000..c4ec5f40 --- /dev/null +++ b/examples/linking-both/testing/test.rst @@ -0,0 +1,43 @@ +.. + # ******************************************************************************* + # Copyright (c) 2024 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +Inside a folder +================= +This example will help catch things and bugs when rst's are defined inside a folder. + +.. stkh_req:: TestTitle + :id: stkh_req__testing__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this. + This is a link to an external need inside the 'score' documentation. + :need:`SCORE_gd_req__req__attr_safety` + Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ + + +.. feat_req:: Some Title + :id: feat_req__testing__some_title + :reqtype: Process + :security: YES + :safety: ASIL_D + :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :status: invalid + + With this requirement we can check if the removal of the prefix is working correctly. + It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + against the 'allowed' defined regex in the metamodel + diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 4f998c02..7516e83d 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -279,10 +279,13 @@ def parse_external_needs_sources(app: Sphinx, config): if "json_path" in a.keys(): a["json_path"] = r + a["json_path"] app.config.needs_external_needs = x + # Making the prefixes uppercase here to match sphinx_needs, as it does this internally too. + app.config.allowed_external_prefixes = [z["id_prefix"].upper() for z in x] def setup(app: Sphinx) -> dict[str, str | bool]: app.add_config_value("external_needs_source", "", rebuild="env") + app.add_config_value("allowed_external_prefixes", [], rebuild="env") app.config.needs_id_required = True app.config.needs_id_regex = "^[A-Za-z0-9_-]{6,}" diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index ea1d0d9e..fc9bfd10 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -15,6 +15,7 @@ from sphinx.application import Sphinx from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType +from collections.abc import Generator from score_metamodel import ( CheckLogger, @@ -40,6 +41,7 @@ def validate_fields( fields: dict[str, str], required: bool, field_type: str, + allowed_prefixes: list[str], ): """ Validates that fields (options or links) in a need match their expected patterns. @@ -50,9 +52,14 @@ def validate_fields( :param required: Whether the fields are required (True) or optional (False). :param field_type: A string indicating the field type ('option' or 'link'). """ + + def remove_prefix(values: list[str], prefixes: list[str]) -> list[str]: + # Memory and allocation wise better to use a generator here. + # Removes any prefix allowed by configuration, if prefix is there. + return [word.removeprefix(p) for word in values for p in prefixes] + for field, pattern in fields.items(): raw_value: str | list[str] | None = need.get(field, None) - if raw_value in [None, [], ""]: if required: log.warning_for_need( @@ -69,6 +76,10 @@ def validate_fields( else: values = [str(raw_value)] + # The filter ensures that the function is only called when needed. + if field_type == "link" and allowed_prefixes: + values = remove_prefix(values, allowed_prefixes) + for value in values: try: if not re.match(pattern, value): @@ -123,6 +134,9 @@ def check_options( ], } + # If undefined this is an empty list + allowed_prefixes = app.config.allowed_external_prefixes + for field_type, check_fields in checking_dict.items(): for field_values, is_required in check_fields: validate_fields( @@ -131,6 +145,7 @@ def check_options( field_values, required=is_required, field_type=field_type, + allowed_prefixes=allowed_prefixes, ) diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index 63b14c06..02825189 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -38,7 +38,13 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): # Get the part of the string after the first two underscores: the path feature = parts[1] - docname = os.path.dirname(str(need.get("docname", ""))) + dir_docname = os.path.dirname(str(need.get("docname", ""))) + + # If the 'rst' file is not in a directory, the above expression will be "". + # Even if the need itself has a docname. That's why we have this logic here. + # NOTE: This does not match the process requirements + docname = dir_docname if dir_docname else need.get("docname", "") + if feature not in docname: log.warning_for_option( need, "id", f"Feature '{feature}' not in path '{docname}'." From e40c00373174c8ef8e6168f0977a65ecaacc1c4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 28 May 2025 16:31:38 +0200 Subject: [PATCH 034/231] publish documentation to github pages (#55) --- .github/workflows/docs-cleanup.yml | 29 +++++ .github/workflows/docs.yml | 42 +++++++ docs.bzl | 1 + docs/BUILD | 52 +++++++++ docs/conf.py | 62 +++++++++++ .../docs-as-code/extensions}/data_flow.png | Bin .../extensions/extension_guide.md | 9 +- .../docs-as-code/extensions/header_service.md | 1 + docs/docs-as-code/extensions/index.rst | 75 +++++++++++++ .../docs-as-code/extensions/metamodel.md | 3 +- .../extensions/rst_filebased_testing.md | 1 + .../extensions/source_code_linker.md | 1 + .../docs-as-code/getting_started.md | 24 ++-- docs/docs-as-code/index.rst | 44 ++++++++ docs/example/index.rst | 56 ++++++++++ docs/example/testing/index.rst | 43 ++++++++ docs/faq/index.rst | 22 ++++ docs/index.rst | 55 +++++++++ src/requirements.in | 1 + src/requirements.txt | 104 +++++++++++++++++- 20 files changed, 604 insertions(+), 21 deletions(-) create mode 100644 .github/workflows/docs-cleanup.yml create mode 100644 .github/workflows/docs.yml create mode 100644 docs/BUILD create mode 100644 docs/conf.py rename {src/extensions/score_source_code_linker => docs/docs-as-code/extensions}/data_flow.png (100%) rename src/extensions/README.md => docs/docs-as-code/extensions/extension_guide.md (91%) rename src/extensions/score_header_service/README.md => docs/docs-as-code/extensions/header_service.md (99%) create mode 100644 docs/docs-as-code/extensions/index.rst rename src/extensions/score_metamodel/README.md => docs/docs-as-code/extensions/metamodel.md (94%) rename src/extensions/score_metamodel/tests/README.md => docs/docs-as-code/extensions/rst_filebased_testing.md (98%) rename src/extensions/score_source_code_linker/README.md => docs/docs-as-code/extensions/source_code_linker.md (99%) rename README.md => docs/docs-as-code/getting_started.md (90%) create mode 100644 docs/docs-as-code/index.rst create mode 100644 docs/example/index.rst create mode 100644 docs/example/testing/index.rst create mode 100644 docs/faq/index.rst create mode 100644 docs/index.rst diff --git a/.github/workflows/docs-cleanup.yml b/.github/workflows/docs-cleanup.yml new file mode 100644 index 00000000..cfa4ae24 --- /dev/null +++ b/.github/workflows/docs-cleanup.yml @@ -0,0 +1,29 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Documentation Cleanup + +permissions: + contents: write + pages: write + id-token: write + +on: + schedule: + - cron: '0 0 * * *' # Runs every day at midnight UTC + +jobs: + docs-cleanup: + uses: eclipse-score/cicd-workflows/.github/workflows/docs-cleanup.yml@main + secrets: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..20efbaf7 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,42 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Documentation + +permissions: + contents: write + pages: write + pull-requests: write + id-token: write + +on: + pull_request_target: + types: [opened, reopened, synchronize] # Allows forks to trigger the docs build + push: + branches: + - main + merge_group: + types: [checks_requested] + +jobs: + build-docs: + uses: eclipse-score/cicd-workflows/.github/workflows/docs.yml@main + permissions: + contents: write + pages: write + pull-requests: write + id-token: write + + with: + bazel-target: "//docs:github_pages__release" + retention-days: 3 diff --git a/docs.bzl b/docs.bzl index d65919d2..9e9f95bf 100644 --- a/docs.bzl +++ b/docs.bzl @@ -178,6 +178,7 @@ def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = lis srcs = native.glob([ "**/*.png", "**/*.svg", + "**/*.md", "**/*.rst", "**/*.html", "**/*.css", diff --git a/docs/BUILD b/docs/BUILD new file mode 100644 index 00000000..56c0f2c8 --- /dev/null +++ b/docs/BUILD @@ -0,0 +1,52 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("//:docs.bzl", "docs") + +# Creates all documentation targets: +# - `docs:incremental` for building docs incrementally at runtime +# - `docs:live_preview` for live preview in the browser without an IDE +# - `docs:ide_support` for creating python virtualenv for IDE support +# - `docs:docs` for building documentation at build-time + +docs( + conf_dir = "docs", + docs_targets = [ + { + "suffix": "latest", # latest main branch documentation build + "external_needs_info": [ + { + "base_url": "https://eclipse-score.github.io/score/main", + "json_url": "https://eclipse-score.github.io/score/main/needs.json", + "id_prefix": "score_", + }, + ], + }, + { + "suffix": "release", # The version imported from MODULE.bazel + "target": ["@score_platform//docs:docs_needs"], + "external_needs_info": [ + { + "base_url": "https://eclipse-score.github.io/score/main", + "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", + "id_prefix": "score_", + }, + ], + }, + ], + source_dir = "docs", + source_files_to_scan_for_needs_links = [ + "//src:score_extension_files", + ], +) diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..bac0a5ed --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,62 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "Score Docs-as-Code" +author = "S-CORE" +version = "0.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + + +extensions = [ + "sphinx_design", + "sphinx_needs", + "myst_parser", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_metamodel", + "score_draw_uml_funcs", + "score_source_code_linker", + "score_layout", +] + +exclude_patterns = [ + # The following entries are not required when building the documentation via 'bazel + # build //docs:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # entries are required to prevent the build from failing. + "bazel-*", + ".venv_docs", +] + +# Enable markdown rendering +source_suffix = { + ".rst": "restructuredtext", + ".md": "markdown", +} + +templates_path = ["templates"] + + +# Enable numref +numfig = True diff --git a/src/extensions/score_source_code_linker/data_flow.png b/docs/docs-as-code/extensions/data_flow.png similarity index 100% rename from src/extensions/score_source_code_linker/data_flow.png rename to docs/docs-as-code/extensions/data_flow.png diff --git a/src/extensions/README.md b/docs/docs-as-code/extensions/extension_guide.md similarity index 91% rename from src/extensions/README.md rename to docs/docs-as-code/extensions/extension_guide.md index ff15eb23..74e14c2f 100644 --- a/src/extensions/README.md +++ b/docs/docs-as-code/extensions/extension_guide.md @@ -1,3 +1,4 @@ +(extension-guide)= # Guide to Creating a Sphinx Extension This document will help you with the most important building blocks and provide all information needed to start writing your own Sphinx extension in the S-CORE project. @@ -118,13 +119,13 @@ To create a Sphinx testing app, you need the same components as a normal Sphinx In addition, you can provide anything else that you might need to test your specific extension. -For examples on how to use and implement the sphinx testing app, you can check out the [source code linker](docs/_tooling/score_source_code_linker/tests) +For examples on how to use and implement the sphinx testing app, you can check out the [source code linker](https://github.com/eclipse-score/docs-as-code/tree/main/src/extensions/score_source_code_linker/) -Find everything related to testing within Bazel and how to add your test suite to it, [see here](/tools/testing/pytest/README.md) +Find everything related to testing within Bazel and how to add your test suite to it, [see here](https://github.com/eclipse-score/tooling/blob/main/python_basics/score_pytest/README.md) Also look at already built extensions inside S-CORE. They can be found in their respective folders: -- [score_metamodel](/docs/_tooling/extensions/score_metamodel/README.md) -- [score_draw_uml_funcs](/docs/_tooling/extensions/score_draw_uml_funcs/__init__.py) +- [score_metamodel](https://github.com/eclipse-score/docs-as-code/tree/main/src/extensions/score_metamodel) +- [score_draw_uml_funcs](https://github.com/eclipse-score/docs-as-code/tree/main/src/extensions/score_draw_uml_funcs) ## Further Resources diff --git a/src/extensions/score_header_service/README.md b/docs/docs-as-code/extensions/header_service.md similarity index 99% rename from src/extensions/score_header_service/README.md rename to docs/docs-as-code/extensions/header_service.md index 24b6a0c6..70f8bbc8 100644 --- a/src/extensions/score_header_service/README.md +++ b/docs/docs-as-code/extensions/header_service.md @@ -1,3 +1,4 @@ +(header-service)= # Automatic Header Generation Service ## Purpose diff --git a/docs/docs-as-code/extensions/index.rst b/docs/docs-as-code/extensions/index.rst new file mode 100644 index 00000000..38e9f17a --- /dev/null +++ b/docs/docs-as-code/extensions/index.rst @@ -0,0 +1,75 @@ +.. # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +.. _extensions: + + +========== +Extensions +========== + +Hello there + + +.. grid:: 1 1 3 3 + :class-container: score-grid + + .. grid-item-card:: + + Metamodel + ^^^ + Learn more about our Metamodel extension and what this extension takes care of. + :ref:`Metamodel Extension`. + + .. grid-item-card:: + + Header Service + ^^^ + Learn about the Header Service extension, and how you can configure it. + It creates RST Tables and automatically fills our information needed. + :ref:`Header Service Extension ` + + .. grid-item-card:: + + Source Code Linker + ^^^ + Learn about the Source Code Linker extension, and how you can configure it. + It enables the possibility to link source code to requirements. + :ref:`Source Code Linker Extension ` + + .. grid-item-card:: + + RST Filebased testing + ^^^ + A new testing approach that we have integrated. It makes it easy to ensure that the metamodel and it's checks + work as intended. Create new checks simply by writing RST files. + Head over to :ref:`File Based Testing ` to learn more. + + .. grid-item-card:: + + Extension Guide + ^^^ + Want to learn how to write your own sphinx extension, or see how others have done it? + Head over to :ref:`Building an Extension` to dive in. + + + +.. toctree:: + :maxdepth: 1 + :caption: Contents: + + Metamodel + Filebased Testing + Header Service + Source Code Linker + Extension Guide diff --git a/src/extensions/score_metamodel/README.md b/docs/docs-as-code/extensions/metamodel.md similarity index 94% rename from src/extensions/score_metamodel/README.md rename to docs/docs-as-code/extensions/metamodel.md index 4b868da1..4f0015cb 100644 --- a/src/extensions/score_metamodel/README.md +++ b/docs/docs-as-code/extensions/metamodel.md @@ -1,9 +1,10 @@ +(metamodel)= # score_metamodel This extension provides the Metamodel and corresponding checks of the SCORE project as a Sphinx extension. -See [../README](../README.md) for more information on why we use extensions. +See [Getting started](../getting_started) for more information on why we use extensions. ## Naming diff --git a/src/extensions/score_metamodel/tests/README.md b/docs/docs-as-code/extensions/rst_filebased_testing.md similarity index 98% rename from src/extensions/score_metamodel/tests/README.md rename to docs/docs-as-code/extensions/rst_filebased_testing.md index a86cace1..8016989b 100644 --- a/src/extensions/score_metamodel/tests/README.md +++ b/docs/docs-as-code/extensions/rst_filebased_testing.md @@ -1,3 +1,4 @@ +(file-based-testing)= # File based rule checks ## Test Function diff --git a/src/extensions/score_source_code_linker/README.md b/docs/docs-as-code/extensions/source_code_linker.md similarity index 99% rename from src/extensions/score_source_code_linker/README.md rename to docs/docs-as-code/extensions/source_code_linker.md index 2304e38e..5abddd0e 100644 --- a/src/extensions/score_source_code_linker/README.md +++ b/docs/docs-as-code/extensions/source_code_linker.md @@ -1,3 +1,4 @@ +(source-code-linker)= # Source Link Extension Details A Sphinx extension for source code traceability for requirements. This extension works with the Bazel system and Sphinx-needs to provide automatic source code traceability. diff --git a/README.md b/docs/docs-as-code/getting_started.md similarity index 90% rename from README.md rename to docs/docs-as-code/getting_started.md index e9fb9ec6..74d2e316 100644 --- a/README.md +++ b/docs/docs-as-code/getting_started.md @@ -1,4 +1,6 @@ -# Score-Docs-As-Code Module +(getting_started)= +# Using Docs-As-Code + A Bazel module providing tools and extensions to enable and simplify documentation building via Sphinx @@ -23,7 +25,7 @@ This module allows you to easily integrate Sphinx documentation generation into Add the module to your `MODULE.bazel` file: ```starlark -bazel_dep(name = "score_docs_as_code", version = "0.2.5") +bazel_dep(name = "score_docs_as_code", version = "0.2.7") ``` And make sure to also add the S-core Bazel registry to your `.bazelrc` file @@ -93,7 +95,7 @@ bazel build //path/to/BUILD-file:docs_latest # documentation at 'bazel-bin/

-> ### *For the full example as well as more complex ones, check out the [examples directory](examples/)* +> ### *For the full example as well as more complex ones, check out the {doc}`example <../example/index>` --- @@ -129,37 +131,37 @@ The `docs()` macro accepts the following arguments: | `visibility` | Bazel visibility | No | `None` | --- ---- + ## Available Extensions This module includes several custom Sphinx extensions to enhance your documentation: ### Score Layout Extension -Custom layout options for Sphinx HTML output. -[Learn more](src/extensions/score_layout/README.md) +Custom layout options for Sphinx HTML output are defined in `score_layout` + ### Score Header Service Consistent header styling across documentation pages. -[Learn more](src/extensions/score_header_service/README.md) +{doc}`Learn more ` ### Score Metamodel Validation and checking of documentation structure against a defined Metamodel. -[Learn more](src/extensions/score_metamodel/README.md) +{doc}`Learn more ` ### Score Source Code Linker Links between requirements documentation and source code implementations. -[Learn more](src/extensions/score_source_code_linker/README.md) +{doc}`Learn more ### Score PlantUML Integration with PlantUML for generating diagrams. -[Learn more](src/extensions/README.md) + ### Score Draw UML Functions Helper functions for creating UML diagrams. -[Learn more](src/extensions/score_draw_uml_funcs/README.md) + diff --git a/docs/docs-as-code/index.rst b/docs/docs-as-code/index.rst new file mode 100644 index 00000000..73867ed8 --- /dev/null +++ b/docs/docs-as-code/index.rst @@ -0,0 +1,44 @@ +.. # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +.. _docs-as-code: + + +============ +Docs-as-Code +============ + +.. grid:: 1 1 2 2 + :class-container: score-grid + + .. grid-item-card:: + + Getting started with docs-as-code + ^^^ + Start here to learn about general usage of the Docs-as-Code Module + :ref:`Get started `. + + .. grid-item-card:: + + Information about Extensions + ^^^ + Head over to our extensions to learn about what we offer and how to configure,extend or integrate them. + :ref:`See our extensions here ` + + + +.. toctree:: + :maxdepth: 1 + :caption: Contents: + + Getting Started diff --git a/docs/example/index.rst b/docs/example/index.rst new file mode 100644 index 00000000..3c6cc109 --- /dev/null +++ b/docs/example/index.rst @@ -0,0 +1,56 @@ +.. + # ******************************************************************************* + # Copyright (c) 2024 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +.. _example: + + +======= +Example +======= + +This is a rendered example of the 'examples/linking-both' folder using the `docs` tool. + +.. stkh_req:: TestTitle + :id: stkh_req__index__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this + This is a link to an external need inside the 'score' documentation. + :need:`SCORE_gd_req__req__attr_safety` + Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ + + + +.. feat_req:: Some Title + :id: feat_req__example__some_title + :reqtype: Process + :security: YES + :safety: ASIL_D + :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :status: invalid + + With this requirement we can check if the removal of the prefix is working correctly. + It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + against the allowed defined regex in the metamodel + Note: The ID is different here as the 'folder structure' is as well + + +.. toctree:: + :maxdepth: 1 + :titlesonly: + + Subfolder example diff --git a/docs/example/testing/index.rst b/docs/example/testing/index.rst new file mode 100644 index 00000000..b4aa03e3 --- /dev/null +++ b/docs/example/testing/index.rst @@ -0,0 +1,43 @@ +.. # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + + +Inside a folder +================= +This example will help catch things and bugs when rst's are defined inside a folder. + +.. stkh_req:: TestTitle + :id: stkh_req__testing__test_requirement + :status: valid + :safety: QM + :rationale: A simple requirement we need to enable a documentation build + :reqtype: Functional + + Some content to make sure we also can render this. + This is a link to an external need inside the 'score' documentation. + :need:`SCORE_gd_req__req__attr_safety` + Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ + + +.. feat_req:: Some Title + :id: feat_req__testing__some_title + :reqtype: Process + :security: YES + :safety: ASIL_D + :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :status: invalid + + With this requirement we can check if the removal of the prefix is working correctly. + It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + against the 'allowed' defined regex in the metamodel + diff --git a/docs/faq/index.rst b/docs/faq/index.rst new file mode 100644 index 00000000..75adbe53 --- /dev/null +++ b/docs/faq/index.rst @@ -0,0 +1,22 @@ +.. # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + + +FAQ +=== + +In this document you will find answers to frequently asked questions regarding docs-as-code and it's usage. + + +TODO: Everything +---------------- diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..15b7d00f --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,55 @@ +.. # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +Project Documentation +===================== + +Welcome to the docs-as-code documentation, it is organized into several key sections. + + +.. grid:: 1 1 3 3 + :class-container: score-grid + + .. grid-item-card:: + + **Example** + ^^^ + See the rendered version of the files inside :ref:`example`. + + + .. grid-item-card:: + + **FAQ** + ^^^ + Find answers to frequently asked questions and common issues. + + .. grid-item-card:: + + **Docs-as-Code Documentation** + ^^^ + Learn how to use this module with how-to guides and tutorials. + :ref:`docs-as-code` + + + +.. dropdown:: Click to see details + + .. toctree:: + :maxdepth: 2 + :titlesonly: + + example/index + faq/index + docs-as-code/index + docs-as-code/extensions/index + diff --git a/src/requirements.in b/src/requirements.in index c8d7046c..c2f81393 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -11,6 +11,7 @@ pydata-sphinx-theme sphinx-design sphinx-autobuild ruamel.yaml +myst-parser PyGithub sphinx-needs[plotting] # Until release of esbonio 1.x, we need to install it ourselves so the VS Code esbonio-extension diff --git a/src/requirements.txt b/src/requirements.txt index 93432373..6dc56ddf 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -34,6 +34,10 @@ babel==2.17.0 \ # via # pydata-sphinx-theme # sphinx +basedpyright==1.29.1 \ + --hash=sha256:06bbe6c3b50ab4af20f80e154049477a50d8b81d2522eadbc9f472f2f92cd44b \ + --hash=sha256:b7eb65b9d4aaeeea29a349ac494252032a75a364942d0ac466d7f07ddeacc786 + # via -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt beautifulsoup4==4.13.4 \ --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 @@ -357,6 +361,7 @@ docutils==0.21.2 \ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 # via + # myst-parser # pydata-sphinx-theme # sphinx esbonio==0.16.5 \ @@ -433,12 +438,14 @@ iniconfig==2.1.0 \ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67 - # via sphinx + # via + # myst-parser + # sphinx jsonschema==4.23.0 \ --hash=sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4 \ --hash=sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566 @@ -533,6 +540,12 @@ lsprotocol==2023.0.1 \ --hash=sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2 \ --hash=sha256:cc5c15130d2403c18b734304339e51242d3018a05c4f7d0f198ad6e0cd21861d # via pygls +markdown-it-py==3.0.0 \ + --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ + --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb + # via + # mdit-py-plugins + # myst-parser markupsafe==3.0.2 \ --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ @@ -632,6 +645,31 @@ matplotlib==3.10.1 \ --hash=sha256:e9b4bb156abb8fa5e5b2b460196f7db7264fc6d62678c03457979e7d5254b7be \ --hash=sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16 # via sphinx-needs +mdit-py-plugins==0.4.2 \ + --hash=sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636 \ + --hash=sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5 + # via myst-parser +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +myst-parser==4.0.1 \ + --hash=sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4 \ + --hash=sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d + # via -r src/requirements.in +nodejs-wheel-binaries==22.15.0 \ + --hash=sha256:01a3fe4d60477f93bf21a44219db33548c75d7fed6dc6e6f4c05cf0adf015609 \ + --hash=sha256:0ab0fbcda2ddc8aab7db1505d72cb958f99324b3834c4543541a305e02bfe860 \ + --hash=sha256:2bde1d8e00cd955b9ce9ee9ac08309923e2778a790ee791b715e93e487e74bfd \ + --hash=sha256:51deaf13ee474e39684ce8c066dfe86240edb94e7241950ca789befbbbcbd23d \ + --hash=sha256:867121ccf99d10523f6878a26db86e162c4939690e24cfb5bea56d01ea696c93 \ + --hash=sha256:a54bb3fee9170003fa8abc69572d819b2b1540344eff78505fcc2129a9175596 \ + --hash=sha256:aa16366d48487fff89446fb237693e777aa2ecd987208db7d4e35acc40c3e1b1 \ + --hash=sha256:acdd4ef73b6701aab9fbe02ac5e104f208a5e3c300402fa41ad7bc7f49499fbf \ + --hash=sha256:ff81aa2a79db279c2266686ebcb829b6634d049a5a49fc7dc6921e4f18af9703 + # via + # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # basedpyright numpy==2.2.5 \ --hash=sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70 \ --hash=sha256:02f226baeefa68f7d579e213d0f3493496397d8f1cff5e2b222af274c86a552a \ @@ -695,7 +733,7 @@ packaging==24.2 \ --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt # matplotlib # pytest # sphinx @@ -790,7 +828,7 @@ pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt # pytest pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -842,11 +880,66 @@ pyspellchecker==0.8.2 \ pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r /home/maxi/.cache/bazel/_bazel_maxi/b64a2544752b0743f97f94438562b33d/external/score_python_basics~/requirements.txt + # via -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via matplotlib +pyyaml==6.0.2 \ + --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ + --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \ + --hash=sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086 \ + --hash=sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e \ + --hash=sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133 \ + --hash=sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5 \ + --hash=sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484 \ + --hash=sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee \ + --hash=sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5 \ + --hash=sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68 \ + --hash=sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a \ + --hash=sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf \ + --hash=sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99 \ + --hash=sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8 \ + --hash=sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85 \ + --hash=sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19 \ + --hash=sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc \ + --hash=sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a \ + --hash=sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1 \ + --hash=sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317 \ + --hash=sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c \ + --hash=sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631 \ + --hash=sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d \ + --hash=sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652 \ + --hash=sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5 \ + --hash=sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e \ + --hash=sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b \ + --hash=sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8 \ + --hash=sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476 \ + --hash=sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706 \ + --hash=sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563 \ + --hash=sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237 \ + --hash=sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b \ + --hash=sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083 \ + --hash=sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180 \ + --hash=sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425 \ + --hash=sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e \ + --hash=sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f \ + --hash=sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725 \ + --hash=sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183 \ + --hash=sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab \ + --hash=sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774 \ + --hash=sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725 \ + --hash=sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e \ + --hash=sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5 \ + --hash=sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d \ + --hash=sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290 \ + --hash=sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44 \ + --hash=sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed \ + --hash=sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4 \ + --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ + --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ + --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 + # via myst-parser referencing==0.36.2 \ --hash=sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa \ --hash=sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0 @@ -1061,6 +1154,7 @@ sphinx==8.2.3 \ # via # -r src/requirements.in # esbonio + # myst-parser # pydata-sphinx-theme # sphinx-autobuild # sphinx-data-viewer From 9715f86cabc55ddc08bc6ccfc558d062e1806086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 30 May 2025 13:29:28 +0200 Subject: [PATCH 035/231] Increase version for release (#58) * Increase version for release --- .github/workflows/test.yml | 2 +- MODULE.bazel | 4 ++-- src/requirements.txt | 38 +++++++++++++++++++------------------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2e7b1a06..ab0984ab 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,4 +24,4 @@ jobs: - name: Run test targets run: | bazel run //src:ide_support - bazel test ... + bazel test //src/... diff --git a/MODULE.bazel b/MODULE.bazel index d03e1ed2..247fd3fc 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.2.6", + version = "0.3.0", compatibility_level = 0, ) @@ -86,7 +86,7 @@ http_file( ) # Provides, pytest & venv -bazel_dep(name = "score_python_basics", version = "0.3.1") +bazel_dep(name = "score_python_basics", version = "0.3.2") # Checker rule for CopyRight checks/fixes bazel_dep(name = "score_cr_checker", version = "0.2.2") diff --git a/src/requirements.txt b/src/requirements.txt index 6dc56ddf..076f2dc3 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -34,9 +34,9 @@ babel==2.17.0 \ # via # pydata-sphinx-theme # sphinx -basedpyright==1.29.1 \ - --hash=sha256:06bbe6c3b50ab4af20f80e154049477a50d8b81d2522eadbc9f472f2f92cd44b \ - --hash=sha256:b7eb65b9d4aaeeea29a349ac494252032a75a364942d0ac466d7f07ddeacc786 +basedpyright==1.29.2 \ + --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ + --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d # via -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt beautifulsoup4==4.13.4 \ --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ @@ -657,16 +657,16 @@ myst-parser==4.0.1 \ --hash=sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4 \ --hash=sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d # via -r src/requirements.in -nodejs-wheel-binaries==22.15.0 \ - --hash=sha256:01a3fe4d60477f93bf21a44219db33548c75d7fed6dc6e6f4c05cf0adf015609 \ - --hash=sha256:0ab0fbcda2ddc8aab7db1505d72cb958f99324b3834c4543541a305e02bfe860 \ - --hash=sha256:2bde1d8e00cd955b9ce9ee9ac08309923e2778a790ee791b715e93e487e74bfd \ - --hash=sha256:51deaf13ee474e39684ce8c066dfe86240edb94e7241950ca789befbbbcbd23d \ - --hash=sha256:867121ccf99d10523f6878a26db86e162c4939690e24cfb5bea56d01ea696c93 \ - --hash=sha256:a54bb3fee9170003fa8abc69572d819b2b1540344eff78505fcc2129a9175596 \ - --hash=sha256:aa16366d48487fff89446fb237693e777aa2ecd987208db7d4e35acc40c3e1b1 \ - --hash=sha256:acdd4ef73b6701aab9fbe02ac5e104f208a5e3c300402fa41ad7bc7f49499fbf \ - --hash=sha256:ff81aa2a79db279c2266686ebcb829b6634d049a5a49fc7dc6921e4f18af9703 +nodejs-wheel-binaries==22.16.0 \ + --hash=sha256:2728972d336d436d39ee45988978d8b5d963509e06f063e80fe41b203ee80b28 \ + --hash=sha256:2fffb4bf1066fb5f660da20819d754f1b424bca1b234ba0f4fa901c52e3975fb \ + --hash=sha256:447ad796850eb52ca20356ad39b2d296ed8fef3f214921f84a1ccdad49f2eba1 \ + --hash=sha256:4ae3cf22138891cb44c3ee952862a257ce082b098b29024d7175684a9a77b0c0 \ + --hash=sha256:71f2de4dc0b64ae43e146897ce811f80ac4f9acfbae6ccf814226282bf4ef174 \ + --hash=sha256:7f526ca6a132b0caf633566a2a78c6985fe92857e7bfdb37380f76205a10b808 \ + --hash=sha256:986b715a96ed703f8ce0c15712f76fc42895cf09067d72b6ef29e8b334eccf64 \ + --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ + --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e # via # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt # basedpyright @@ -729,9 +729,9 @@ numpy==2.2.5 \ # via # contourpy # matplotlib -packaging==24.2 \ - --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ - --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f +packaging==25.0 \ + --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ + --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt # matplotlib @@ -824,9 +824,9 @@ platformdirs==4.3.7 \ --hash=sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94 \ --hash=sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351 # via esbonio -pluggy==1.5.0 \ - --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ - --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 +pluggy==1.6.0 \ + --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ + --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt # pytest From fae7591273f308397365287675ac5125a51b38cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 3 Jun 2025 08:05:10 +0200 Subject: [PATCH 036/231] fixes exeternal prefix cleaning (#61) --- MODULE.bazel | 2 +- src/extensions/score_metamodel/BUILD | 4 ++- .../score_metamodel/checks/check_options.py | 8 ++--- src/extensions/score_metamodel/metamodel.yaml | 2 +- .../test_attributes_external_prefix.rst | 36 +++++++++++++++++++ .../score_metamodel/tests/rst/conf.py | 8 +++++ 6 files changed, 53 insertions(+), 7 deletions(-) create mode 100644 src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst diff --git a/MODULE.bazel b/MODULE.bazel index 247fd3fc..ddb23b15 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -66,7 +66,7 @@ bazel_dep(name = "buildifier_prebuilt", version = "7.3.1") bazel_dep(name = "aspect_rules_lint", version = "1.4.2") # PlantUML for docs -bazel_dep(name = "rules_java", version = "8.6.3") +bazel_dep(name = "rules_java", version = "8.11.0") http_jar = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_jar") diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index c2c415df..bc328f7e 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -33,6 +33,8 @@ score_py_pytest( size = "small", srcs = glob(["tests/*.py"]), # All requirements already in the library so no need to have it double - data = ["//src:test_rst_files"] + glob(["**/*.rst"]), + data = ["//src:test_rst_files"] + glob( + ["tests/**/*.rst"], + ), deps = [":score_metamodel"], ) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index fc9bfd10..c264171e 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -53,10 +53,10 @@ def validate_fields( :param field_type: A string indicating the field type ('option' or 'link'). """ - def remove_prefix(values: list[str], prefixes: list[str]) -> list[str]: + def remove_prefix(word: str, prefixes: list[str]) -> str: # Memory and allocation wise better to use a generator here. # Removes any prefix allowed by configuration, if prefix is there. - return [word.removeprefix(p) for word in values for p in prefixes] + return [word.removeprefix(prefix) for prefix in prefixes][0] for field, pattern in fields.items(): raw_value: str | list[str] | None = need.get(field, None) @@ -77,10 +77,10 @@ def remove_prefix(values: list[str], prefixes: list[str]) -> list[str]: values = [str(raw_value)] # The filter ensures that the function is only called when needed. - if field_type == "link" and allowed_prefixes: - values = remove_prefix(values, allowed_prefixes) for value in values: + if field_type == "link" and allowed_prefixes: + value = remove_prefix(value, allowed_prefixes) try: if not re.match(pattern, value): log.warning_for_option( diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 734619dd..e2b954ca 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -227,7 +227,7 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: - satisfies: "^stkh_req__.*$" + satisfies: "^.*_req__.*$" optional_options: codelink: "^.*$" testlink: "^.*$" diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst new file mode 100644 index 00000000..a3df11b9 --- /dev/null +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst @@ -0,0 +1,36 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +#CHECK: check_options + +.. Cleaning of 'external prefix' before checking regex confirmity +#EXPECT-NOT tool_req__test_abcd.satisfies (PROCESS_doc_getstrt__req__process): does not follow pattern `^doc_.+$`. + +.. tool_req:: This is a test + :id: tool_req__test_abcd + :satisfies: PROCESS_doc_getstrt__req__process + + This should not give a warning + + +.. Also make sure it works wit lists of links + +#EXPECT-NOT: tool_req__test_aaaa.satisfies (PROCESS_doc_getstrt__req__process): does not follow pattern `^doc_.+$`. +#EXPECT-NOT: tool_req__test_aaaa.satisfies (PROCESS_gd_guidl__req__engineering): does not follow pattern `^gd_.+$`. + +.. tool_req:: This is a test + :id: tool_req__test_aaaa + :satisfies: PROCESS_doc_getstrt__req__process;PROCESS_gd_guidl__req__engineering + + This should give a warning diff --git a/src/extensions/score_metamodel/tests/rst/conf.py b/src/extensions/score_metamodel/tests/rst/conf.py index 4630b7fa..69e04bf1 100644 --- a/src/extensions/score_metamodel/tests/rst/conf.py +++ b/src/extensions/score_metamodel/tests/rst/conf.py @@ -20,3 +20,11 @@ "sphinx_needs", "score_metamodel", ] + +needs_external_needs = [ + { + "base_url": "https://eclipse-score.github.io/process_description/main/", + "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", + "id_prefix": "process_", + } +] From 94bda9e1959e9d0821c7f5f634c203b3012e2400 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 3 Jun 2025 15:43:56 +0200 Subject: [PATCH 037/231] Increase module version (#64) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index ddb23b15..cd8b5341 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.3.0", + version = "0.3.1", compatibility_level = 0, ) From 0460310a5b69f5b1d0704c9adbb5c73dd0010fbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 4 Jun 2025 10:13:04 +0200 Subject: [PATCH 038/231] fix cleaning of external prefix (#68) --- src/extensions/score_metamodel/checks/check_options.py | 3 +-- src/extensions/score_metamodel/tests/test_check_options.py | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index c264171e..05a0504b 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -77,9 +77,8 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: values = [str(raw_value)] # The filter ensures that the function is only called when needed. - for value in values: - if field_type == "link" and allowed_prefixes: + if allowed_prefixes: value = remove_prefix(value, allowed_prefixes) try: if not re.match(pattern, value): diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 00128717..f2eb5f33 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -141,6 +141,7 @@ def test_missing_mandatory_options_info(self): app = Mock(spec=Sphinx) app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO_WITHOUT_MANDATORY_OPTIONS + app.config.allowed_external_prefixes = [] # Expect that the checks pass check_options(app, need_1, logger) logger.assert_warning( @@ -165,6 +166,7 @@ def test_invalid_option_type(self): app = Mock(spec=Sphinx) app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO_WITH_INVALID_OPTION_TYPE + app.config.allowed_external_prefixes = [] # Expect that the checks pass check_options(app, need_1, logger) logger.assert_warning( @@ -190,6 +192,7 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): app = Mock(spec=Sphinx) app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO_WITH_OPT_OPT + app.config.allowed_external_prefixes = [] # Expect that the checks pass check_extra_options(app, need_1, logger) From 027e3b745e1de4b15b0ea0d05e442e0115aa4c0e Mon Sep 17 00:00:00 2001 From: Markus Schu <142009492+masc2023@users.noreply.github.com> Date: Wed, 4 Jun 2025 10:33:36 +0200 Subject: [PATCH 039/231] Update metamodel with (#69) trustable framework needs, https://github.com/eclipse-score/process_description/pull/27 add security tag to the document need Resolves: https://github.com/eclipse-score/score/issues/947 --- src/extensions/score_metamodel/metamodel.yaml | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index e2b954ca..8d7c0b8f 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -34,6 +34,34 @@ needs_types: ############################################################################## # Process Metamodel ############################################################################## + # TSF + tsf: + title: "TSF" + prefix: "tsf__" + mandatory_options: + id: "^tsf__[0-9a-zA-Z_-]*$" + status: "^(draft|valid)$" + optional_links: + links: "^.*$" + + tenet: + title: "Tenet" + prefix: "tenet__" + mandatory_options: + id: "^tenet__[0-9a-zA-Z_-]*$" + status: "^(draft|valid)$" + optional_links: + links: "^.*$" + + assertion: + title: "Assertion" + prefix: "^assertion__" + mandatory_options: + id: "assertion__[0-9a-zA-Z_-]*$" + status: "^(draft|valid)$" + optional_links: + links: "^.*$" + # Standards std_req: title: "Standard Requirement" @@ -156,6 +184,7 @@ needs_types: status: "^(valid|draft|invalid)$" optional_options: safety: "^(QM|ASIL_B|ASIL_D)$" + security: "^(YES|NO)$" realizes: "^wp__.+$" # The following 3 guidance requirements enforce the requirement structure and attributes: # req-Id: gd_req__req__structure From e1b3bd276f63e620f9e81087bf9e0c8193b1025f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 4 Jun 2025 10:44:06 +0200 Subject: [PATCH 040/231] Increase version of module (#71) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index cd8b5341..77b1a272 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.3.1", + version = "0.3.2", compatibility_level = 0, ) From 79c72955bd883de187944e26e525b8c7e9ac6cfa Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Wed, 4 Jun 2025 13:14:06 +0300 Subject: [PATCH 041/231] docs: add user and repo as parameters to incremental (#70) --- .github/workflows/docs.yml | 2 +- src/extensions/score_layout/__init__.py | 2 +- src/extensions/score_layout/html_options.py | 53 ++++++++++++++------- src/incremental.py | 17 +++++++ 4 files changed, 55 insertions(+), 19 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 20efbaf7..454b37f0 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -38,5 +38,5 @@ jobs: id-token: write with: - bazel-target: "//docs:github_pages__release" + bazel-target: "//docs:incremental_release -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}" retention-days: 3 diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 188122cb..18e36571 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -32,7 +32,7 @@ def update_config(app: Sphinx, _config: Any): app.config.needs_layouts = sphinx_options.needs_layouts app.config.needs_global_options = sphinx_options.needs_global_options app.config.html_theme = html_options.html_theme - app.config.html_context = html_options.html_context + app.config.html_context = html_options.return_html_context(app) app.config.html_theme_options = html_options.return_html_theme_options(app) # Setting HTML static path diff --git a/src/extensions/score_layout/html_options.py b/src/extensions/score_layout/html_options.py index 80e85407..24ada62a 100644 --- a/src/extensions/score_layout/html_options.py +++ b/src/extensions/score_layout/html_options.py @@ -14,7 +14,7 @@ def return_html_theme_options(app: Sphinx) -> dict[str, object]: - return { + theme_options = { "navbar_align": "content", "header_links_before_dropdown": 5, "icon_links": [ @@ -28,20 +28,33 @@ def return_html_theme_options(app: Sphinx) -> dict[str, object]: # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/source-buttons.html#add-an-edit-button "use_edit_page_button": True, "collapse_navigation": True, - # Enable version switcher - "switcher": { - "json_url": ( - f"https://{html_context['github_user']}.github.io/" - f"{html_context['github_repo']}/versions.json" - ), # URL to JSON file, hardcoded for now - "version_match": app.config.release, - }, - "navbar_end": ["theme-switcher", "navbar-icon-links", "version-switcher"], "logo": { "text": "Eclipse S-CORE", }, } + # Enable version switcher if github_user and github_repo are provided via CLI + if ( + app.config.html_context.get("github_user") != "dummy" + and app.config.html_context.get("github_repo") != "dummy" + ): + theme_options["switcher"] = { + "json_url": ( + f"https://{app.config.html_context['github_user']}.github.io/" + f"{app.config.html_context['github_repo']}/versions.json" + ), # URL to JSON file, hardcoded for now + "version_match": app.config.release, + } + theme_options["navbar_end"] = [ + "theme-switcher", + "navbar-icon-links", + "version-switcher", + ] + else: + theme_options["navbar_end"] = ["theme-switcher", "navbar-icon-links"] + + return theme_options + html_theme = "pydata_sphinx_theme" # "alabaster" html_static_path = ["src/assets", "_assets"] @@ -54,10 +67,16 @@ def return_html_theme_options(app: Sphinx) -> dict[str, object]: # html_logo = "_assets/S-CORE_Logo_white.svg" -html_context = { - # "github_url": "https://github.com", # or your GitHub Enterprise site - "github_user": "eclipse-score", - "github_repo": "score", - "github_version": "main", - "doc_path": "docs", -} +def return_html_context(app: Sphinx) -> dict[str, str]: + if not hasattr(app.config, "html_context") or ( + not app.config.html_context.get("github_user") + and not app.config.html_context.get("github_repo") + ): + return { + # still required for use_edit_page_button and other elements except version switcher + "github_user": "dummy", + "github_repo": "dummy", + "github_version": "main", + "doc_path": "docs", + } + return app.config.html_context diff --git a/src/incremental.py b/src/incremental.py index b67e0636..90e9ff85 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -65,6 +65,15 @@ def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str] parser.add_argument( "--debug", help="Enable Debugging via debugpy", action="store_true" ) + # optional GitHub user forwarded from the Bazel CLI + parser.add_argument( + "--github_user", + help="GitHub username to embed in the Sphinx build", + ) + parser.add_argument( + "--github_repo", + help="GitHub repository to embed in the Sphinx build", + ) args = parser.parse_args() if args.debug: debugpy.listen(("0.0.0.0", args.debug_port)) @@ -87,6 +96,14 @@ def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str] get_env("CONF_DIRECTORY"), f"--define=external_needs_source={json.dumps(transform_env_str_to_dict(get_env('EXTERNAL_NEEDS_INFO')))}", ] + + # configure sphinx build with GitHub user and repo from CLI + if args.github_user and args.github_repo: + base_arguments.append(f"-A=github_user={args.github_user}") + base_arguments.append(f"-A=github_repo={args.github_repo}") + base_arguments.append(f"-A=github_version=main") + base_arguments.append(f"-A=doc_path=docs") + action = get_env("ACTION") if action == "live_preview": sphinx_autobuild_main( From d7bd2382c72c205a70b87d48b6e7fc2b5cf32a45 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Wed, 4 Jun 2025 15:53:01 +0300 Subject: [PATCH 042/231] Create README.md (#75) Signed-off-by: Nicolae Dicu --- README.md | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..d3281d92 --- /dev/null +++ b/README.md @@ -0,0 +1,43 @@ +# docs-as-code + +Docs-as-code tooling for Eclipse S-CORE + +## Overview + +The S-CORE docs Sphinx configuration and build code. + +## Building documentation + +#### Run a documentation build: + +#### Integrate latest score main branch + +```bash +bazel run //docs:incremental_latest +``` + +#### Access your documentation at: + +- `_build/` for incremental + +#### Getting IDE support + +Create the virtual environment via `bazel run //process:ide_support`.\ +If your IDE does not automatically ask you to activate the newly created environment you can activate it. + +- In VSCode via `ctrl+p` => `Select Python Interpreter` then select `.venv/bin/python` +- In the terminal via `source .venv/bin/activate` + +#### Format your documentation with: + +```bash +bazel test //src:format.check +bazel run //src:format.fix +``` + +#### Find & fix missing copyright + +```bash +bazel run //:copyright-check +bazel run //:copyright.fix +``` From d7df8438d16fe0ab4ad4dffc99bf2f985107dff9 Mon Sep 17 00:00:00 2001 From: Andrey Babanin Date: Wed, 4 Jun 2025 17:25:20 +0200 Subject: [PATCH 043/231] fix: Change hover color for visited links (#76) Signed-off-by: Andrey Babanin --- src/assets/css/score_design.css | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/assets/css/score_design.css b/src/assets/css/score_design.css index cb810098..887da5ad 100644 --- a/src/assets/css/score_design.css +++ b/src/assets/css/score_design.css @@ -5,6 +5,10 @@ color: #FFFFFF; } +.score-grid a:hover:visited { + color: #e0e0e0; +} + .score-grid .sd-card-header { background-color: var(--pst-color-secondary) !important; color: var(--pst-color-text-muted); From 6a468c77c456c1527463a846b402e509725c33e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 5 Jun 2025 15:51:46 +0200 Subject: [PATCH 044/231] fix: external needs graph checks & misc fixes (#78) --- docs/example/index.rst | 2 +- docs/example/testing/index.rst | 2 +- examples/linking-both/index.rst | 2 +- examples/linking-both/testing/test.rst | 2 +- examples/linking-latest/index.rst | 2 +- src/extensions/score_metamodel/__init__.py | 17 ++++++++--------- .../score_metamodel/checks/graph_checks.py | 11 ++++++----- src/extensions/score_metamodel/metamodel.yaml | 16 ++++++++-------- .../tests/test_rules_file_based.py | 4 ++-- 9 files changed, 29 insertions(+), 29 deletions(-) diff --git a/docs/example/index.rst b/docs/example/index.rst index 3c6cc109..24641d3b 100644 --- a/docs/example/index.rst +++ b/docs/example/index.rst @@ -30,7 +30,7 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_gd_req__req__attr_safety` + :need:`SCORE_feat_req__kvs__config_file`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/docs/example/testing/index.rst b/docs/example/testing/index.rst index b4aa03e3..0a10a919 100644 --- a/docs/example/testing/index.rst +++ b/docs/example/testing/index.rst @@ -25,7 +25,7 @@ This example will help catch things and bugs when rst's are defined inside a fol Some content to make sure we also can render this. This is a link to an external need inside the 'score' documentation. - :need:`SCORE_gd_req__req__attr_safety` + :need:`SCORE_feat_req__kvs__config_file`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/examples/linking-both/index.rst b/examples/linking-both/index.rst index ce3bfe96..22d023a4 100644 --- a/examples/linking-both/index.rst +++ b/examples/linking-both/index.rst @@ -33,7 +33,7 @@ This is a simple example of a documentation page using the `docs` tool. Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_gd_req__req__attr_safety` + :need:`SCORE_feat_req__kvs__config_file` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/examples/linking-both/testing/test.rst b/examples/linking-both/testing/test.rst index c4ec5f40..5dcfa9d1 100644 --- a/examples/linking-both/testing/test.rst +++ b/examples/linking-both/testing/test.rst @@ -25,7 +25,7 @@ This example will help catch things and bugs when rst's are defined inside a fol Some content to make sure we also can render this. This is a link to an external need inside the 'score' documentation. - :need:`SCORE_gd_req__req__attr_safety` + :need:`SCORE_feat_req__kvs__config_file`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/examples/linking-latest/index.rst b/examples/linking-latest/index.rst index fa856283..336987e6 100644 --- a/examples/linking-latest/index.rst +++ b/examples/linking-latest/index.rst @@ -25,6 +25,6 @@ This is a simple example of a documentation page using the `docs` tool. Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_gd_req__req__attr_safety`. + :need:`SCORE_feat_req__kvs__config_file`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 7516e83d..26c53823 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -22,14 +22,13 @@ from ruamel.yaml import YAML from sphinx.application import Sphinx from sphinx_needs import logging -from sphinx_needs.data import NeedsInfoType, SphinxNeedsData - +from sphinx_needs.data import NeedsInfoType, SphinxNeedsData, NeedsView from .log import CheckLogger logger = logging.get_logger(__name__) local_check_function = Callable[[Sphinx, NeedsInfoType, CheckLogger], None] -graph_check_function = Callable[[Sphinx, list[NeedsInfoType], CheckLogger], None] +graph_check_function = Callable[[Sphinx, NeedsView, CheckLogger], None] local_checks: list[local_check_function] = [] graph_checks: list[graph_check_function] = [] @@ -78,9 +77,7 @@ def _run_checks(app: Sphinx, exception: Exception | None) -> None: return # Filter out external needs, as checks are only intended to be run on internal needs. - needs_all_needs = ( - SphinxNeedsData(app.env).get_needs_view().filter_is_external(False) - ) + needs_all_needs = SphinxNeedsData(app.env).get_needs_view() logger.debug(f"Running checks for {len(needs_all_needs)} needs") @@ -95,9 +92,12 @@ def is_check_enabled(check: local_check_function | graph_check_function): enabled_local_checks = [c for c in local_checks if is_check_enabled(c)] + needs_local_needs = ( + SphinxNeedsData(app.env).get_needs_view().filter_is_external(False) + ) # Need-Local checks: checks which can be checked file-local, without a # graph of other needs. - for need in needs_all_needs.values(): + for need in needs_local_needs.values(): for check in enabled_local_checks: logger.debug(f"Running local check {check} for need {need['id']}") check(app, need, log) @@ -105,10 +105,9 @@ def is_check_enabled(check: local_check_function | graph_check_function): # Graph-Based checks: These warnings require a graph of all other needs to # be checked. - needs = list(needs_all_needs.values()) for check in [c for c in graph_checks if is_check_enabled(c)]: logger.debug(f"Running graph check {check} for all needs") - check(app, needs, log) + check(app, needs_all_needs, log) if log.has_warnings: log.warning("Some needs have issues. See the log for more information.") diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index 6127a9fc..dfa38fe9 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -15,7 +15,7 @@ from typing import Any, Literal from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType +from sphinx_needs.data import NeedsInfoType, NeedsView from score_metamodel import ( CheckLogger, @@ -130,19 +130,20 @@ def get_need_selection( @graph_check def check_metamodel_graph( app: Sphinx, - all_needs: list[NeedsInfoType], + all_needs: NeedsView, log: CheckLogger, ): graph_checks_global = app.config.graph_checks # Convert list to dictionary for easy lookup - needs_dict = {need["id"]: need for need in all_needs} + needs_dict_all = {need["id"]: need for need in all_needs.values()} + needs_local = list(all_needs.filter_is_external(False).values()) # Iterate over all graph checks for check in graph_checks_global.items(): apply, eval = check[1].values() # Get all needs that match the selection criteria - selected_needs = get_need_selection(all_needs, apply, log) + selected_needs = get_need_selection(needs_local, apply, log) for need in selected_needs: for parent_relation in list(eval.keys()): @@ -153,7 +154,7 @@ def check_metamodel_graph( parent_ids = need[parent_relation] for parent_id in parent_ids: - parent_need = needs_dict.get(parent_id) + parent_need = needs_dict_all.get(parent_id) if parent_need is None: msg = f"Parent need `{parent_id}` not found in needs_dict." log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 8d7c0b8f..cc5c7025 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -187,9 +187,9 @@ needs_types: security: "^(YES|NO)$" realizes: "^wp__.+$" # The following 3 guidance requirements enforce the requirement structure and attributes: - # req-Id: gd_req__req__structure - # req-Id: gd_req__requirements_attr_description - # req-Id: gd_req__req__linkage + # req- Id: gd_req__req__structure + # req- Id: gd_req__requirements_attr_description + # req- Id: gd_req__req__linkage # Requirements stkh_req: title: "Stakeholder Requirement" @@ -219,7 +219,7 @@ needs_types: safety: "^(QM|ASIL_B|ASIL_D)$" status: "^(valid|invalid)$" mandatory_links: - # req-Id: gd_req__req__linkage_fulfill + # req- Id: gd_req__req__linkage_fulfill satisfies: "^stkh_req__.*$" optional_options: codelink: "^.*$" @@ -560,10 +560,10 @@ needs_extra_links: # - condition: defines the condition that should be checked # - [and / or / xor / not] ############################################################## -# req-Id: gd_req__req__linkage_architecture -# req-Id: gd_req__req__linkage_safety +# req- Id: gd_req__req__linkage_architecture +# req- Id: gd_req__req__linkage_safety graph_checks: - # req-Id: gd_req__req__linkage_safety + # req- Id: gd_req__req__linkage_safety req_safety_linkage: needs: include: "comp_req, feat_req" @@ -582,7 +582,7 @@ graph_checks: condition: "status == valid" check: satisfies: "status == valid" - # req-Id: gd_req__req__linkage_architecture + # req- Id: gd_req__req__linkage_architecture arch_safety_linkage: needs: include: "comp_req, feat_req" diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index f875e172..24aa324e 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -175,7 +175,7 @@ def test_rst_files( "Unable to extract test data from the rst file: " f"{rst_file}. Please check the file for the correct format." ) - print(f"RST Data: {rst_data}") + # print(f"RST Data: {rst_data}") app: SphinxTestApp = sphinx_app_setup(RST_DIR / rst_file) os.chdir(app.srcdir) # Change working directory to the source directory @@ -185,7 +185,7 @@ def test_rst_files( # Collect the warnings warnings = app.warning.getvalue().splitlines() - print(f"Warnings: {warnings}") + # print(f"Warnings: {warnings}") # Check if the expected warnings are present for warning_info in rst_data.warning_infos: From 93136e04a2cdb9cceccaeb39f8f150f91c91cd77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 6 Jun 2025 08:00:24 +0200 Subject: [PATCH 045/231] increase version & simplify json parsing (#79) --- MODULE.bazel | 2 +- src/incremental.py | 23 +---------------------- 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 77b1a272..e23f6c0c 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.3.2", + version = "0.3.3", compatibility_level = 0, ) diff --git a/src/incremental.py b/src/incremental.py index 90e9ff85..de5456dc 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -35,27 +35,6 @@ def get_env(name: str) -> str: return val -def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str]]: - """ - Transforms the 'string' we get from 'docs.bzl' back into something we can parse easliy inside sphinx/python - !! HACK: This truly isn't great !! - """ - transformed_dicts: list[dict[str, str]] = [] - dict_list = [ - x.split(",") - for x in external_needs_source.replace("]", "") - .replace("[", "") - .replace("{", "") - .split("}") - ] - for inner_dict in dict_list: - kv_splits = [kv.split(":", 1) for kv in inner_dict if len(inner_dict) > 1] - single_dict = {key_value[0]: key_value[1] for key_value in kv_splits} - if single_dict: - transformed_dicts.append(single_dict) - return transformed_dicts - - if __name__ == "__main__": # Add debuging functionality parser = argparse.ArgumentParser() @@ -94,7 +73,7 @@ def transform_env_str_to_dict(external_needs_source: str) -> list[dict[str, str] "auto", "--conf-dir", get_env("CONF_DIRECTORY"), - f"--define=external_needs_source={json.dumps(transform_env_str_to_dict(get_env('EXTERNAL_NEEDS_INFO')))}", + f"--define=external_needs_source={get_env('EXTERNAL_NEEDS_INFO')}", ] # configure sphinx build with GitHub user and repo from CLI From 5ea6264b563846eb94d36d9b80d7c5937618c4de Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 13 Jun 2025 08:25:30 +0200 Subject: [PATCH 046/231] add requirements (#67) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Maximilian Sören Pollak --- .vscode/settings.json | 8 +- MODULE.bazel | 1 + docs/BUILD | 15 +- docs/conf.py | 2 + docs/{ => how-to-integrate}/example/index.rst | 6 +- .../example/testing/index.rst | 9 +- .../getting_started.md | 40 +- docs/how-to-integrate/index.rst | 17 + docs/{faq/index.rst => how-to-use/faq.rst} | 0 docs/how-to-use/index.rst | 12 + docs/index.rst | 32 +- docs/product/capabilities.md | 69 ++ .../extensions/data_flow.png | Bin .../extensions/extension_guide.md | 0 .../extensions/header_service.md | 0 .../extensions/index.rst | 0 .../extensions/metamodel.md | 2 - .../extensions/rst_filebased_testing.md | 0 .../extensions/source_code_linker.md | 0 docs/{docs-as-code => product}/index.rst | 26 +- docs/product/requirements.rst | 637 ++++++++++++++++ .../checks/attributes_format.py | 4 +- .../score_metamodel/checks/check_options.py | 7 +- src/extensions/score_metamodel/metamodel.yaml | 710 +++++++++--------- .../parse_source_files.py | 6 + 25 files changed, 1182 insertions(+), 421 deletions(-) rename docs/{ => how-to-integrate}/example/index.rst (95%) rename docs/{ => how-to-integrate}/example/testing/index.rst (91%) rename docs/{docs-as-code => how-to-integrate}/getting_started.md (83%) create mode 100644 docs/how-to-integrate/index.rst rename docs/{faq/index.rst => how-to-use/faq.rst} (100%) create mode 100644 docs/how-to-use/index.rst create mode 100644 docs/product/capabilities.md rename docs/{docs-as-code => product}/extensions/data_flow.png (100%) rename docs/{docs-as-code => product}/extensions/extension_guide.md (100%) rename docs/{docs-as-code => product}/extensions/header_service.md (100%) rename docs/{docs-as-code => product}/extensions/index.rst (100%) rename docs/{docs-as-code => product}/extensions/metamodel.md (95%) rename docs/{docs-as-code => product}/extensions/rst_filebased_testing.md (100%) rename docs/{docs-as-code => product}/extensions/source_code_linker.md (100%) rename docs/{docs-as-code => product}/index.rst (73%) create mode 100644 docs/product/requirements.rst diff --git a/.vscode/settings.json b/.vscode/settings.json index a3c7371a..c1b5cceb 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -32,5 +32,11 @@ "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, "bazel.lsp.command": "bazel", - "bazel.lsp.args": ["run", "//:starpls_server"] + "bazel.lsp.args": [ + "run", + "//:starpls_server" + ], + + // Disable internal type checking, since we use basedpyright + "python.analysis.typeCheckingMode": "off" } diff --git a/MODULE.bazel b/MODULE.bazel index e23f6c0c..24182c5e 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -96,3 +96,4 @@ bazel_dep(name = "score_platform", version = "0.1.1") # Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") +bazel_dep(name = "score_process", version = "0.2.0") diff --git a/docs/BUILD b/docs/BUILD index 56c0f2c8..a3afa873 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -31,17 +31,30 @@ docs( "json_url": "https://eclipse-score.github.io/score/main/needs.json", "id_prefix": "score_", }, + { + "base_url": "https://eclipse-score.github.io/process_description/main", + "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", + "id_prefix": "process_", + }, ], }, { "suffix": "release", # The version imported from MODULE.bazel - "target": ["@score_platform//docs:docs_needs"], + "target": [ + "@score_platform//docs:docs_needs", + "@score_process//process:docs_needs_latest", + ], "external_needs_info": [ { "base_url": "https://eclipse-score.github.io/score/main", "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", "id_prefix": "score_", }, + { + "base_url": "https://eclipse-score.github.io/process_description/main", + "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", + "id_prefix": "process_", + }, ], }, ], diff --git a/docs/conf.py b/docs/conf.py index bac0a5ed..bc919911 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,6 +40,8 @@ "score_layout", ] +myst_enable_extensions = ["colon_fence"] + exclude_patterns = [ # The following entries are not required when building the documentation via 'bazel # build //docs:docs', as that command runs in a sandboxed environment. However, when diff --git a/docs/example/index.rst b/docs/how-to-integrate/example/index.rst similarity index 95% rename from docs/example/index.rst rename to docs/how-to-integrate/example/index.rst index 24641d3b..51379cf8 100644 --- a/docs/example/index.rst +++ b/docs/how-to-integrate/example/index.rst @@ -30,7 +30,7 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__kvs__config_file`. + :need:`SCORE_feat_req__persistency__config_file`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ @@ -42,8 +42,8 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs :safety: ASIL_D :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft :status: invalid - - With this requirement we can check if the removal of the prefix is working correctly. + + With this requirement we can check if the removal of the prefix is working correctly. It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value against the allowed defined regex in the metamodel Note: The ID is different here as the 'folder structure' is as well diff --git a/docs/example/testing/index.rst b/docs/how-to-integrate/example/testing/index.rst similarity index 91% rename from docs/example/testing/index.rst rename to docs/how-to-integrate/example/testing/index.rst index 0a10a919..802a7ced 100644 --- a/docs/example/testing/index.rst +++ b/docs/how-to-integrate/example/testing/index.rst @@ -23,9 +23,9 @@ This example will help catch things and bugs when rst's are defined inside a fol :rationale: A simple requirement we need to enable a documentation build :reqtype: Functional - Some content to make sure we also can render this. + Some content to make sure we also can render this. This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__kvs__config_file`. + :need:`SCORE_feat_req__persistency__config_file`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ @@ -35,9 +35,8 @@ This example will help catch things and bugs when rst's are defined inside a fol :security: YES :safety: ASIL_D :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft - :status: invalid + :status: invalid - With this requirement we can check if the removal of the prefix is working correctly. + With this requirement we can check if the removal of the prefix is working correctly. It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value against the 'allowed' defined regex in the metamodel - diff --git a/docs/docs-as-code/getting_started.md b/docs/how-to-integrate/getting_started.md similarity index 83% rename from docs/docs-as-code/getting_started.md rename to docs/how-to-integrate/getting_started.md index 74d2e316..a8ff191d 100644 --- a/docs/docs-as-code/getting_started.md +++ b/docs/how-to-integrate/getting_started.md @@ -95,9 +95,9 @@ bazel build //path/to/BUILD-file:docs_latest # documentation at 'bazel-bin/

-> ### *For the full example as well as more complex ones, check out the {doc}`example <../example/index>` +> ### *For the full example as well as more complex ones, check out the {doc}`example ` ---- +--- ### Available Targets @@ -129,39 +129,3 @@ The `docs()` macro accepts the following arguments: | `docs_targets` | List of dictionaries which allows multi-repo setup | Yes | - | | `source_files_to_scan_for_needs_links` | List of targets,globs,filegroups that the 'source_code_linker' should parse | No | `[]` | | `visibility` | Bazel visibility | No | `None` | - ---- - - -## Available Extensions -This module includes several custom Sphinx extensions to enhance your documentation: - -### Score Layout Extension - -Custom layout options for Sphinx HTML output are defined in `score_layout` - - -### Score Header Service - -Consistent header styling across documentation pages. -{doc}`Learn more ` - -### Score Metamodel - -Validation and checking of documentation structure against a defined Metamodel. -{doc}`Learn more ` - -### Score Source Code Linker - -Links between requirements documentation and source code implementations. -{doc}`Learn more - -### Score PlantUML - -Integration with PlantUML for generating diagrams. - - -### Score Draw UML Functions - -Helper functions for creating UML diagrams. - diff --git a/docs/how-to-integrate/index.rst b/docs/how-to-integrate/index.rst new file mode 100644 index 00000000..d88cd3bc --- /dev/null +++ b/docs/how-to-integrate/index.rst @@ -0,0 +1,17 @@ +.. _how-to-integrate: + +How to Integrate +================== + + +Here we'll document how to integrate the docs-as-code tooling into your S-CORE repository. + +For now here are some :ref:`example ` files to get you started. + +See also :doc:`getting_started`. + +.. toctree:: + :hidden: + + example/index + getting_started diff --git a/docs/faq/index.rst b/docs/how-to-use/faq.rst similarity index 100% rename from docs/faq/index.rst rename to docs/how-to-use/faq.rst diff --git a/docs/how-to-use/index.rst b/docs/how-to-use/index.rst new file mode 100644 index 00000000..656b3ce1 --- /dev/null +++ b/docs/how-to-use/index.rst @@ -0,0 +1,12 @@ +.. _how-to-use: + +How To Use +========== + +Interesting Links: +* https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html + +.. toctree:: + :hidden: + + faq diff --git a/docs/index.rst b/docs/index.rst index 15b7d00f..de9ce06c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -22,34 +22,30 @@ Welcome to the docs-as-code documentation, it is organized into several key sect .. grid-item-card:: - **Example** + **How to Integrate** ^^^ - See the rendered version of the files inside :ref:`example`. + Learn how to integrate this docs-as-code tooling into your S-CORE repository. .. grid-item-card:: - **FAQ** + How to Use ^^^ - Find answers to frequently asked questions and common issues. + Learn how to write documentation using the docs-as-code approach in S-CORE. .. grid-item-card:: - **Docs-as-Code Documentation** + :ref:`Product documentation ` ^^^ - Learn how to use this module with how-to guides and tutorials. - :ref:`docs-as-code` + Find the product documentation for docs-as-code, including tool requirements and architecture. +.. dropdown:: Sitemap + .. toctree:: + :maxdepth: 5 + :includehidden: + :titlesonly: -.. dropdown:: Click to see details - - .. toctree:: - :maxdepth: 2 - :titlesonly: - - example/index - faq/index - docs-as-code/index - docs-as-code/extensions/index - + how-to-integrate/index + how-to-use/index + product/index diff --git a/docs/product/capabilities.md b/docs/product/capabilities.md new file mode 100644 index 00000000..528fc06d --- /dev/null +++ b/docs/product/capabilities.md @@ -0,0 +1,69 @@ +(capabilities)= + +# 📘 S-CORE Docs-as-Code – Capabilities + +This document outlines the key capabilities of the S-CORE docs-as-code tooling. +Core capabilities of [Sphinx](https://www.sphinx-doc.org/) and [sphinx-needs](https://sphinx-needs.readthedocs.io/) are assumed and extended with S-CORE-specific conventions and infrastructure. + +## Input Format + +- Supports both reStructuredText (rst) and Markdown (CommonMark/GFM) + +## Build + +- Ensures deterministic output: identical input produces identical output +- ✅ Uses version-controlled configuration to ensure reproducibility +- ✅ Behaves consistently across different repositories and environments (e.g., local, CI/CD) +- ✅ Supports incremental builds to provide fast feedback during authoring +- ✅ Seamless integration with the Bazel build system + +## Configuration + +- ✅ Uses a single, shared, version-controlled configuration file +- ✅ Allows repository-specific overrides when needed +- ✅ Supports easy configuration of the metamodel (e.g., used roles, types) +- ✅ Ensures consistency with process and quality requirements + + + +## Cross-Repository Linking + +- ✅ Supports unidirectional links to: + - Versioned documentation (for tagged releases) + - Latest documentation (e.g. `main` branch) +- ✅ Keeps linked repositories and their rendered websites unaffected by incoming references +- Allows bidirectional links for integration-focused documentation +- In addition to high level versioning of repositories, supports verifying suspect links on a requirement level + +## Previews & Feedback + +- ✅ Automatically generates documentation previews for pull requests +- Previews are available within minutes of each push +- ✅ Preview output matches final published artifacts (identical rendering) + +## IDE & Developer Experience + +- ✅ Live preview functionality for documentation authors +- ✅ Integrated linting for: + - Syntax and formatting (reST and Markdown) + - Internal and external link validity + - ✅ Metamodel compliance +- Auto-completion support for: + - Cross-repository links + - Sphinx directives and roles (planned) + +## Architecture Visualization + +- ✅ Generates architecture diagrams from structured models +- Integrates diagram tools such as PlantUML and Mermaid + +## Code Integration + +- ✅ Enables traceability between documentation and source code by linking from implementation to requirements + + +## ⚙️ Bazel Support +*Used as the core build system across S-CORE* + +- ✅ Automatically validates changes to the S-CORE Bazel registry +- ✅ IDE support for editing Bazel `BUILD` and `.bzl` files (via LSP, plugins) diff --git a/docs/docs-as-code/extensions/data_flow.png b/docs/product/extensions/data_flow.png similarity index 100% rename from docs/docs-as-code/extensions/data_flow.png rename to docs/product/extensions/data_flow.png diff --git a/docs/docs-as-code/extensions/extension_guide.md b/docs/product/extensions/extension_guide.md similarity index 100% rename from docs/docs-as-code/extensions/extension_guide.md rename to docs/product/extensions/extension_guide.md diff --git a/docs/docs-as-code/extensions/header_service.md b/docs/product/extensions/header_service.md similarity index 100% rename from docs/docs-as-code/extensions/header_service.md rename to docs/product/extensions/header_service.md diff --git a/docs/docs-as-code/extensions/index.rst b/docs/product/extensions/index.rst similarity index 100% rename from docs/docs-as-code/extensions/index.rst rename to docs/product/extensions/index.rst diff --git a/docs/docs-as-code/extensions/metamodel.md b/docs/product/extensions/metamodel.md similarity index 95% rename from docs/docs-as-code/extensions/metamodel.md rename to docs/product/extensions/metamodel.md index 4f0015cb..055f0a4a 100644 --- a/docs/docs-as-code/extensions/metamodel.md +++ b/docs/product/extensions/metamodel.md @@ -4,8 +4,6 @@ This extension provides the Metamodel and corresponding checks of the SCORE project as a Sphinx extension. -See [Getting started](../getting_started) for more information on why we use extensions. - ## Naming * check: A check is a function that checks compliance to a specific rule. diff --git a/docs/docs-as-code/extensions/rst_filebased_testing.md b/docs/product/extensions/rst_filebased_testing.md similarity index 100% rename from docs/docs-as-code/extensions/rst_filebased_testing.md rename to docs/product/extensions/rst_filebased_testing.md diff --git a/docs/docs-as-code/extensions/source_code_linker.md b/docs/product/extensions/source_code_linker.md similarity index 100% rename from docs/docs-as-code/extensions/source_code_linker.md rename to docs/product/extensions/source_code_linker.md diff --git a/docs/docs-as-code/index.rst b/docs/product/index.rst similarity index 73% rename from docs/docs-as-code/index.rst rename to docs/product/index.rst index 73867ed8..00b7b660 100644 --- a/docs/docs-as-code/index.rst +++ b/docs/product/index.rst @@ -11,26 +11,33 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -.. _docs-as-code: +.. _product: ============ -Docs-as-Code +Docs-as-Code ============ -.. grid:: 1 1 2 2 +.. grid:: 1 1 3 3 :class-container: score-grid .. grid-item-card:: - Getting started with docs-as-code + Features ^^^ - Start here to learn about general usage of the Docs-as-Code Module - :ref:`Get started `. + High level view of docs-as-code :ref:`capabilities `. + + + .. grid-item-card:: + + Requirements + ^^^ + Detailed list of docs-as-code tool :ref:`requirements`. + .. grid-item-card:: - Information about Extensions + Information about Extensions ^^^ Head over to our extensions to learn about what we offer and how to configure,extend or integrate them. :ref:`See our extensions here ` @@ -40,5 +47,8 @@ Docs-as-Code .. toctree:: :maxdepth: 1 :caption: Contents: + :hidden: - Getting Started + capabilities + requirements + extensions/index diff --git a/docs/product/requirements.rst b/docs/product/requirements.rst new file mode 100644 index 00000000..a8125516 --- /dev/null +++ b/docs/product/requirements.rst @@ -0,0 +1,637 @@ +.. _requirements: + +================================= +Requirements (Process Compliance) +================================= + +📈 Status +########## + +This section provides an overview of current process requirements and their clarification & implementation status. + +.. needbar:: Docs-As-Code Requirements Status + :stacked: + :show_sum: + :xlabels: FROM_DATA + :ylabels: FROM_DATA + :colors: green,orange,red + :legend: + :transpose: + :xlabels_rotation: 45 + :horizontal: + + , implemented , partially / not quite clear , not implemented / not clear + Common, 'tool_req__docs' in id and implemented == "YES" and "Common Attributes" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Common Attributes" in tags, 'tool_req__docs' in id and implemented == "NO" and "Common Attributes" in tags + Doc, 'tool_req__docs' in id and implemented == "YES" and "Documents" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Documents" in tags, 'tool_req__docs' in id and implemented == "NO" and "Documents" in tags + Req, 'tool_req__docs' in id and implemented == "YES" and "Requirements" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Requirements" in tags, 'tool_req__docs' in id and implemented == "NO" and "Requirements" in tags + Arch, 'tool_req__docs' in id and implemented == "YES" and "Architecture" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Architecture" in tags, 'tool_req__docs' in id and implemented == "NO" and "Architecture" in tags + DDesign, 'tool_req__docs' in id and implemented == "YES" and "Detailed Design & Code" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Detailed Design & Code" in tags, 'tool_req__docs' in id and implemented == "NO" and "Detailed Design & Code" in tags + TVR, 'tool_req__docs' in id and implemented == "YES" and "Tool Verification Reports" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Tool Verification Reports" in tags, 'tool_req__docs' in id and implemented == "NO" and "Tool Verification Reports" in tags + Other, 'tool_req__docs' in id and implemented == "YES" and "Process / Other" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Process / Other" in tags, 'tool_req__docs' in id and implemented == "NO" and "Process / Other" in tags + SftyAn, 'tool_req__docs' in id and implemented == "YES" and "Safety Analysis" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Safety Analysis" in tags, 'tool_req__docs' in id and implemented == "NO" and "Safety Analysis" in tags + + + +🗂️ Common Attributes +##################### + +.. note:: + To stay consistent with sphinx-needs (the tool behind docs-as-code), we'll use `need` + for any kind of model element like a requirement, an architecture element or a + feature description. + + +---------------------- +🔢 ID +---------------------- + +.. tool_req:: Enforces need ID uniqueness + :id: tool_req__docs_common_attr_id + :implemented: YES + :tags: Common Attributes + :satisfies: + PROCESS_gd_req__req__attr_uid, + PROCESS_gd_req__tool__attr_uid, + PROCESS_gd_req__arch__attribute_uid + :parent_has_problem: NO + :parent_covered: YES: together with tool_req__docs_attr_id_scheme + + Docs-as-Code shall enforce that all Need IDs are globally unique across all included + documentation instances. + + .. note:: + Within each docs-instance (as managed by sphinx-needs), IDs are guaranteed to be unique. + When linking across instances, unique prefixes are automatically applied to maintain global uniqueness. + +.. tool_req:: Enforces need ID scheme + :id: tool_req__docs_common_attr_id_scheme + :implemented: PARTIAL + :tags: Common Attributes + :satisfies: PROCESS_gd_req__req__attr_uid, PROCESS_gd_req__arch__attribute_uid + :parent_has_problem: YES: Parents are not aligned + :parent_covered: YES: together with tool_req__docs_attr_id + + Docs-as-Code shall enforce that Need IDs follow the following naming scheme: + + .. TODO: is it "indicating" or "perfect match"? + e.g. workflow -> wf would be ok for "indicating", but not for "perfect match" + + * A prefix indicating the need type (e.g. `feature__`) + * A middle part indicating the hierarchical structure of the need: + * For requirements: a portion of the feature tree or a component acronym + * For architecture elements: the final part of the feature tree + * Additional descriptive text to ensure human readability + + +---------------------- +🏷️ Title +---------------------- + +.. tool_req:: Enforces title wording rules + :id: tool_req__docs_common_attr_title + :implemented: PARTIAL + :tags: Common Attributes + :satisfies: PROCESS_gd_req__requirements_attr_title + :parent_has_problem: NO + :parent_covered: NO: Can not ensure summary + + + Docs-as-Code shall enforce that Need titles do not contain the following words: + + * shall + * must + * will + + +--------------------------- +📝 Description +--------------------------- + +.. tool_req:: Enforces presence of description + :id: tool_req__docs_common_attr_description + :tags: Common Attributes + :parent_covered: NO: Can not cover 'ISO/IEC/IEEE/29148' + :implemented: NO + + Docs-as-Code shall enforce that each Need contains a description (content). + +---------------------------- +🔒 Security Classification +---------------------------- + +.. tool_req:: Security: enforce classification + :id: tool_req__docs_common_attr_security + :implemented: PARTIAL + :tags: Common Attributes + :satisfies: + PROCESS_gd_req__requirements_attr_security, + PROCESS_gd_req__arch_attr_security, + :parent_has_problem: YES: Architecture talks about requirements. Parents not aligned. + + Docs-as-Code shall enforce that the ``security`` attribute has one of the following values: + + * YES + * NO + + This rule applies to: + + * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + * all architecture elements (TODO; see https://github.com/eclipse-score/process_description/issues/34) + + +--------------------------- +🛡️ Safety Classification +--------------------------- + +.. tool_req:: Safety: enforce classification + :id: tool_req__docs_common_attr_safety + :tags: Common Attributes + :implemented: YES + :parent_covered: YES + :parent_has_problem: YES: Architecture talks about requirements. Parents not aligned + :satisfies: + PROCESS_gd_req__req__attr_safety, + PROCESS_gd_req__arch__attr_safety + + Docs-as-Code shall enforce that the ``safety`` attribute has one of the following values: + + * QM + * ASIL_B + * ASIL_D + + This rule applies to: + + * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + * all architecture elements (TODO; see https://github.com/eclipse-score/process_description/issues/34) + +---------- +🚦 Status +---------- + +.. tool_req:: Status: enforce attribute + :id: tool_req__docs_common_attr_status + :tags: Common Attributes + :implemented: YES + :parent_has_problem: YES: Architecture talks about requirements, currently we have valid|draft + :parent_covered: YES + :satisfies: + PROCESS_gd_req__req__attr_status, + PROCESS_gd_req__arch__attr_status, + + Docs-as-Code shall enforce that the ``status`` attribute has one of the following values: + + * valid + * invalid + + This rule applies to: + + * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + * all architecture elements (TODO; see https://github.com/eclipse-score/process_description/issues/34) + +📚 Documents +############# + +.. tool_req:: Document Types + :id: tool_req__docs_doc_types + :tags: Documents + :implemented: YES + + Docs-as-Code shall support the following document types: + + * Generic Document (document) + + +.. NOTE: Header_service trigger/working execution is disabled +.. tool_req:: Mandatory Document attributes + :id: tool_req__docs_doc_attr + :tags: Documents + :implemented: NO + :satisfies: + PROCESS_gd_req__doc_author, + PROCESS_gd_req__doc_approver, + PROCESS_gd_req__doc_reviewer, + :parent_covered: NO + :parent_has_problem: YES: Which need type to use for this? + + Docs-as-Code shall enforce that each document model element has the following attributes: + + * author + * approver + * reviewer + + +.. tool_req:: Document author is autofilled + :id: tool_req__docs_doc_attr_author_autofill + :tags: Documents + :implemented: NO + :satisfies: PROCESS_gd_req__doc_author + :parent_covered: YES: Together with tool_req__docs_doc_attr + :parent_has_problem: YES: Unclear how the contribution % is counted and how to accumulate %. Committer is a reserved role. + + Docs-as-Code shall provide an automatic mechanism to determine document authors. + + Contributors responsible for more than 50% of the content shall be considered the + document author. Contributors are accumulated over all commits to the file containing + the document. + + +.. tool_req:: Document approver is autofilled + :id: tool_req__docs_doc_attr_approver_autofill + :tags: Documents + :implemented: NO + :satisfies: PROCESS_gd_req__doc_approver + :parent_covered: YES: Together with tool_req__docs_doc_attr + :parent_has_problem: YES: CODEOWNER is Github specific. + + Docs-as-Code shall provide an automatic mechanism to determine the document approver. + + The approver shall be the last approver listed in *CODEOWNERS* of the file containing + the document. The determination is based on the last pull request (PR) that modified + the relevant file. + + +.. tool_req:: Document reviewer is autofilled + :id: tool_req__docs_doc_attr_reviewer_autofill + :tags: Documents + :implemented: NO + :satisfies: PROCESS_gd_req__doc_reviewer + :parent_covered: YES: Together with tool_req__docs_doc_attr + :parent_has_problem: NO + + Docs-as-Code shall provide an automatic mechanism to determine the document reviewers. + + The ``reviewer`` attribute shall include all reviewers who are not listed as + approvers. The determination is based on the last pull request (PR) that modified the + relevant file. + + +📋 Requirements +################ + +------------------------- +🔢 Requirement Types +------------------------- + +.. tool_req:: Requirements Types + :id: tool_req__docs_req_types + :tags: Requirements + :implemented: YES + :satisfies: PROCESS_gd_req__req__structure + :parent_has_problem: NO + :parent_covered: YES: Together with tool_req__docs_linkage + + Docs-as-Code shall support the following requirement types: + + * Stakeholder requirement (stkh_req) + * Feature requirement (feat_req) + * Component requirement (comp_req) + * Assumption of use requirement (aou_req) + * Process requirement (gd_req) + * Tool requirement (tool_req) + +------------------------- +🏷️ Attributes +------------------------- + +.. tool_req:: Enforces rationale attribute + :id: tool_req__docs_req_attr_rationale + :tags: Requirements + :implemented: YES + :parent_covered: NO: Can not ensure correct reasoning + :satisfies: PROCESS_gd_req__req__attr_rationale + + Docs-as-Code shall enforce that each stakeholder requirement contains a ``rationale`` attribute. + +.. tool_req:: Enforces requirement type classification + :id: tool_req__docs_req_attr_reqtype + :tags: Requirements + :implemented: PARTIAL + :parent_has_problem: YES: tool_req shall not have 'reqtype' as discussed. process not excluded! + :satisfies: PROCESS_gd_req__req__attr_type + + Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has + a ``reqtype`` attribute with one of the following values: + + * Functional + * Interface + * Process + * Legal + * Non-Functional + +.. tool_req:: Enables marking requirements as "covered" + :id: tool_req__docs_req_attr_reqcov + :tags: Requirements + :implemented: NO + :satisfies: PROCESS_gd_req__req__attr_req_cov + :parent_has_problem: YES: Not understandable what is required. + + .. warning:: + This requirement is not yet specified. The corresponding parent requirement is + unclear and must be clarified before a precise tool requirement can be defined. + +.. tool_req:: Support requirements test coverage + :id: tool_req__docs_req_attr_testcov + :tags: Requirements + :implemented: PARTIAL + :parent_covered: YES + :satisfies: PROCESS_gd_req__req__attr_test_covered + + Docs-As-Code shall allow for every need of type :need:`tool_req__docs_req_types` to + have a ``testcovered`` attribute, which must be one of: + + * Yes + * No + +------------------------- +🔗 Links +------------------------- + +.. tool_req:: Enables needs linking via satisfies attribute + :id: tool_req__docs_req_link_satisfies_allowed + :tags: Requirements + :implemented: PARTIAL + :satisfies: PROCESS_gd_req__req__linkage, PROCESS_gd_req__req__traceability + :parent_covered: YES + :parent_has_problem: YES: Mandatory for all needs? Especially some tool_reqs do not have a process requirement. + + Docs-as-Code shall enforce that linking between model elements via the ``satisfies`` + attribute follows defined rules. + + Allowed source and target combinations are defined in the following table: + + .. table:: + :widths: auto + + ======================== =========================== + Requirement Type Allowed Link Target + ======================== =========================== + Feature Requirements Stakeholder Requirements + Component Requirements Feature Requirements + Process Requirements Workflows + Tooling Requirements Process Requirements + ======================== =========================== + +🏛️ Architecture +################ + +---------------------- +🔢 Architecture Types +---------------------- + +.. tool_req:: Architecture Types + :id: tool_req__docs_arch_types + :tags: Architecture + :satisfies: + PROCESS_gd_req__arch__hierarchical_structure, + PROCESS_gd_req__arch__viewpoints, + PROCESS_gd_req__arch__build_blocks, + PROCESS_gd_req__arch__build_blocks_corr + :implemented: PARTIAL + :parent_has_problem: YES: Referenced in https://github.com/eclipse-score/process_description/issues/34 + :parent_covered: NO + :status: invalid + + .. warning:: + **OPEN ISSUE** → Architecture types are not yet understood + See: https://github.com/eclipse-score/process_description/issues/34 + + The list below is tentative at best. + + Docs-as-Code shall support the following architecture types: + + * Feature Architecture Static View (feat_arch_static) - does this count as an architecture type, or is it a view? + * Feature Architecture Dynamic View (feat_arch_dyn) - the views below have view in their type name!! + * Logical Architecture Interfaces (logic_arc_int) - That's a single interface and not "interfaces"? Or is it a view? + * Logical Architecture Interface Operation (logic_arc_int_op) + * Module Architecture Static View (mod_view_static) + * Module Architecture Dynamic View (mod_view_dyn) + * Component Architecture Static View (comp_arc_sta) + * Component Architecture Dynamic View (comp_arc_dyn) + * Component Architecture Interfaces (comp_arc_int) + * Component Architecture Interface Operation (comp_arc_int_op) + * Real interface?? (see gd_req__arch__build_blocks_corr) + * Feature Architecture Interface?? (see gd_req__arch__traceability) + + +------------------------ +🔗 Linkage +------------------------ + +.. tool_req:: Mandatory Architecture Attribute: fulfils + :id: tool_req__docs_arch_link_fulfils + :tags: Architecture + :implemented: PARTIAL + :satisfies: + PROCESS_gd_req__arch__linkage_requirement_type, + PROCESS_gd_req__arch__attr_fulfils, + PROCESS_gd_req__arch__traceability, + :parent_covered: YES + :parent_has_problem: YES: Attribute is not mentioned. Link direction not clear. Fig. 22 does not contain 'fulfils' + + Docs-as-Code shall enforce that linking via the ``fulfils`` attribute follows defined rules. + + Allowed source and target combinations are defined in the following table: + + .. table:: + :widths: auto + + ==================================== ========================================== + Requirement Type Allowed Link Target + ==================================== ========================================== + Functional feature requirements Static / dynamic feature architecture + Interface feature requirements Interface feature architecture + Functional component requirements Static / dynamic component architecture + Interface component requirements Interface component architecture + ==================================== ========================================== + +.. tool_req:: Mandate links for safety + :id: tool_req__docs_arch_link_safety_to_req + :tags: Architecture + :implemented: PARTIAL + :satisfies: PROCESS_gd_req__arch__linkage_requirement + :parent_covered: YES + :parent_has_problem: NO + + Docs-as-Code shall enforce that architecture model elements of type + :need:`tool_req__docs_arch_types` with ``safety != QM`` are linked to requirements of + type :need:`tool_req__docs_req_types` that are also safety relevant (``safety != + QM``). + +.. tool_req:: Restrict links for safety requirements + :id: tool_req__docs_req_arch_link_safety_to_arch + :tags: Architecture + :implemented: PARTIAL + :satisfies: PROCESS_gd_req__arch__linkage_safety_trace + :parent_covered: NO + :parent_has_problem: NO + + Docs-as-Code shall enforce that architecture model elements of type + :need:`tool_req__docs_arch_types` with ``safety != QM`` can only be linked to other + architecture model elements with ``safety != QM``. + +.. tool_req:: Security: Restrict linkage + :id: tool_req__docs_arch_link_security + :tags: Architecture + :implemented: NO + :parent_covered: YES + :satisfies: PROCESS_gd_req__arch__linkage_security_trace + + Docs-as-Code shall enforce that architecture elements with ``security == YES`` are + only linked to other architecture elements with ``security == YES``. + +---------------------- +🖼️ Diagram Related +---------------------- + +.. tool_req:: Support Diagram drawing of architecture + :id: tool_req__docs_arch_diag_draw + :tags: Architecture + :implemented: YES + :satisfies: PROCESS_doc_concept__arch__process, PROCESS_gd_req__arch__viewpoints + :parent_covered: YES + :parent_has_problem: NO + + Docs-as-Code shall enable the rendering of diagrams for the following architecture views: + + * Feature View & Component View: + * Static View + * Dynamic View + * Interface View + * Software Module View + * Platform View + + +💻 Detailed Design & Code +########################## + +---------------- +🔗 Code Linkage +---------------- + +.. tool_req:: Supports linking to source code + :tags: Detailed Design & Code + :id: tool_req__docs_dd_link_source_code_link + :implemented: PARTIAL + :parent_covered: YES + :satisfies: PROCESS_gd_req__req__attr_impl + + Docs-as-Code shall allow source code to link to requirements. + + A backlink to the corresponding source code location in GitHub shall be generated in + the output as an attribute of the linked requirement. + +.. tool_req:: Supports linking to test cases + :id: tool_req__docs_dd_link_testcase + :tags: Detailed Design & Code + :implemented: NO + :parent_has_problem: YES: Test vs Testcase unclear. Direction unclear. Goal unclear. + :satisfies: PROCESS_gd_req__req__attr_testlink + + Docs-as-Code shall allow requirements of type :need:`tool_req__docs_req_types` to + include a ``testlink`` attribute. + + This attribute shall support linking test cases to requirements. + +🧪 Tool Verification Reports +############################ + +.. they are so different, that they need their own section + +.. tool_req:: Tool Verification Report + :id: tool_req__docs_tvr_uid + :tags: Tool Verification Reports + :implemented: NO + :parent_covered: NO + :satisfies: PROCESS_gd_req__tool__attr_uid + + Docs-as-Code shall support the definition and management of Tool Verification Reports + (``tool_verification_report``). + +.. tool_req:: Enforce safety classification + :id: tool_req__docs_tvr_safety + :tags: Tool Verification Reports + :implemented: NO + :parent_has_problem: YES: Safety affected vs Safety relevance + :parent_covered: YES + :satisfies: PROCESS_gd_req__tool__attr_safety_affected + + Docs-as-Code shall enforce that every Tool Verification Report includes a + ``safety_affected`` attribute with one of the following values: + + * YES + * NO + +.. tool_req:: Enforce security classification + :id: tool_req__docs_tvr_security + :tags: Tool Verification Reports + :implemented: NO + :parent_covered: YES + :parent_has_problem: YES: Safety affected vs Safety relevance + :satisfies: PROCESS_gd_req__tool_attr_security_affected + + Docs-as-Code shall enforce that every Tool Verification Report includes a + ``security_affected`` attribute with one of the following values: + + * YES + * NO + +.. tool_req:: Enforce status classification + :id: tool_req__docs_tvr_status + :tags: Tool Verification Reports + :implemented: NO + :satisfies: PROCESS_gd_req__tool__attr_status + :parent_has_problem: NO + :parent_covered: YES + + Docs-as-Code shall enforce that every Tool Verification Report includes a ``status`` + attribute with one of the following values: + + * draft + * evaluated + * qualified + * released + * rejected + +⚙️ Process / Other +################### + +.. tool_req:: Workflow Types + :id: tool_req__docs_wf_types + :tags: Process / Other + :implemented: YES + + Docs-as-Code shall support the following workflow types: + + * Workflow (wf) + +.. tool_req:: Standard Requirement Types + :id: tool_req__docs_stdreq_types + :tags: Process / Other + :implemented: YES + + Docs-as-Code shall support the following requirement types: + + * Standard requirement (std_req) + + +🛡️ Safety Analysis +################### + +.. note:: + Safety analysis is not yet defined yet. This is just a placeholder for future + requirements. + + +.. +.. ------------------------------------------------------------------------ +.. + +.. needextend:: c.this_doc() and type == 'tool_req' + :safety: ASIL_B + :security: NO + +.. needextend:: c.this_doc() and type == 'tool_req' and "YES" in parent_has_problem + :status: invalid + +.. needextend:: c.this_doc() and type == 'tool_req' and not status + :status: valid diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index a0017406..55992d8b 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -11,11 +11,10 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +from score_metamodel import CheckLogger, local_check from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType -from score_metamodel import CheckLogger, local_check - # req-#id: gd_req__req__attr_uid @local_check @@ -46,6 +45,7 @@ def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): "workflow", "gd_chklst", "std_req", + "tool_req", "role", "doc_concept", "gd_temp", diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 05a0504b..ca675cd5 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -11,10 +11,6 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import re - -from sphinx.application import Sphinx -from sphinx_needs.config import NeedType -from sphinx_needs.data import NeedsInfoType from collections.abc import Generator from score_metamodel import ( @@ -22,6 +18,9 @@ default_options, local_check, ) +from sphinx.application import Sphinx +from sphinx_needs.config import NeedType +from sphinx_needs.data import NeedsInfoType FieldCheck = tuple[dict[str, str], bool] CheckingDictType = dict[str, list[FieldCheck]] diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index cc5c7025..fb09b29b 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -14,13 +14,15 @@ needs_types_base_options: optional_options: - source_code_link: "^https://github.com/eclipse-score/score/blob/.*$" + source_code_link: ^https://github.com/eclipse-score/.*$ # Custom semantic validation rules prohibited_words: + # req-Id: tool_req__docs_common_attr_title title: - shall - must - will + # req-Id: tool_req__docs_common_attr_description content: - just - about @@ -30,7 +32,6 @@ needs_types_base_options: - absolutely needs_types: - ############################################################################## # Process Metamodel ############################################################################## @@ -63,414 +64,445 @@ needs_types: links: "^.*$" # Standards + # req-Id: tool_req__docs_stdreq_types std_req: - title: "Standard Requirement" - prefix: "std_req__" + title: Standard Requirement + prefix: std_req__ mandatory_options: - id: "std_req__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-zA-Z_-]*$" - status: "^(valid)$" + id: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-zA-Z_-]*$ + status: ^(valid)$ optional_links: - links: "^.*$" + links: ^.*$ std_wp: - title: "Standard Work Product" - prefix: "std_wp__" + title: Standard Work Product + prefix: std_wp__ mandatory_options: - id: "std_wp__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-z_]*$" - status: "^(valid)$" + id: std_wp__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-z_]*$ + status: ^(valid)$ + # Workflow + # req-Id: tool_req__docs_wf_types workflow: - title: "Workflow" - prefix: "wf__" + title: Workflow + prefix: wf__ mandatory_options: - id: "^wf__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^wf__[0-9a-z_]*$ + status: ^(valid|draft)$ mandatory_links: - input: "^wp__.*$" - output: "^wp__.*$" - approved_by: "^rl__.*$" - responsible: "^rl__.*$" + input: ^wp__.*$ + output: ^wp__.*$ + approved_by: ^rl__.*$ + responsible: ^rl__.*$ optional_links: - supported_by: "^rl__.*$" - contains: "^gd_(req|temp|chklst|guidl|meth)__.*$" - has: "^doc_(getstrt|concept)__.*$" + supported_by: ^rl__.*$ + contains: ^gd_(req|temp|chklst|guidl|meth)__.*$ + has: ^doc_(getstrt|concept)__.*$ + # Guidances gd_req: - title: "Process Requirements" - prefix: "gd_req__" + title: Process Requirements + prefix: gd_req__ mandatory_options: - id: "^gd_req__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^gd_req__[0-9a-z_]*$ + # req-Id: tool_req__docs_common_attr_status + status: ^(valid|draft)$ optional_links: - complies: "^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$" - satisfies: "^wf__.*$" + complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + # req-Id: tool_req__docs_req_link_satisfies_allowed + satisfies: ^wf__.*$ gd_temp: - title: "Process Template" - prefix: "gd_temp__" + title: Process Template + prefix: gd_temp__ mandatory_options: - id: "^gd_temp__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^gd_temp__[0-9a-z_]*$ + status: ^(valid|draft)$ optional_links: - complies: "std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$" + complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ gd_chklst: - title: "Process Checklist" - prefix: "gd_chklst__" + title: Process Checklist + prefix: gd_chklst__ mandatory_options: - id: "^gd_chklst__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^gd_chklst__[0-9a-z_]*$ + status: ^(valid|draft)$ optional_links: - complies: "std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$" + complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ gd_guidl: - title: "Process Guideline" - prefix: "gd_guidl__" + title: Process Guideline + prefix: gd_guidl__ mandatory_options: - id: "^gd_guidl__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^gd_guidl__[0-9a-z_]*$ + status: ^(valid|draft)$ optional_links: - complies: "std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$" + complies: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ gd_method: - title: "Process Method" - prefix: "gd_meth__" + title: Process Method + prefix: gd_meth__ mandatory_options: - id: "^gd_meth__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^gd_meth__[0-9a-z_]*$ + status: ^(valid|draft)$ optional_links: - complies: "std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$" + complies: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ # S-CORE Workproduct workproduct: - title: "Workproduct" - prefix: "wp__" + title: Workproduct + prefix: wp__ mandatory_options: - id: "^wp__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^wp__[0-9a-z_]*$ + status: ^(valid|draft)$ optional_links: - complies: "std_(wp__iso26262|wp__isosae21434|wp__isopas8926|iic_aspice_40)__.*$" + complies: std_(wp__iso26262|wp__isosae21434|wp__isopas8926|iic_aspice_40)__.*$ + # Role role: - title: "Role" - prefix: "rl__" + title: Role + prefix: rl__ mandatory_options: - id: "^rl__[0-9a-z_]*$" + id: ^rl__[0-9a-z_]*$ optional_links: - contains: "^rl__.*$" + contains: ^rl__.*$ + # Documents doc_concept: - title: "Concept Definition" - prefix: "doc_concept__" + title: Concept Definition + prefix: doc_concept__ mandatory_options: - id: "^doc_concept__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^doc_concept__[0-9a-z_]*$ + status: ^(valid|draft)$ doc_getstrt: - title: "Getting Startet" - prefix: "doc_getstrt__" + title: Getting Startet + prefix: doc_getstrt__ mandatory_options: - id: "^doc_getstrt__[0-9a-z_]*$" - status: "^(valid|draft)$" + id: ^doc_getstrt__[0-9a-z_]*$ + status: ^(valid|draft)$ - ############################################################################## - # S-CORE Metamodel - ############################################################################## - # General document: - title: "Generic Document" - prefix: "doc__" + title: Generic Document + prefix: doc__ mandatory_options: - id: "^doc__[0-9a-z_]*$" - status: "^(valid|draft|invalid)$" + id: ^doc__[0-9a-z_]*$ + status: ^(valid|draft|invalid)$ optional_options: safety: "^(QM|ASIL_B|ASIL_D)$" security: "^(YES|NO)$" realizes: "^wp__.+$" - # The following 3 guidance requirements enforce the requirement structure and attributes: - # req- Id: gd_req__req__structure - # req- Id: gd_req__requirements_attr_description - # req- Id: gd_req__req__linkage + # Requirements + # req-Id: tool_req__docs_req_types stkh_req: - title: "Stakeholder Requirement" - prefix: "stkh_req__" - mandatory_options: - id: "^stkh_req__[0-9a-z_]*$" - reqtype: "^(Functional|Interface|Process|Legal|Non-Functional)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" - rationale: "^.+$" + title: Stakeholder Requirement + prefix: stkh_req__ + mandatory_options: + id: ^stkh_req__[0-9a-z_]*$ + # req-Id: tool_req__docs_req_attr_reqtype + reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ + # req-Id: tool_req__docs_req_attr_rationale + rationale: ^.+$ optional_options: - security: "^(YES|NO)$" - codelink: "^.*$" - testlink: "^.*$" - reqcovered: "^(YES|NO)$" - testcovered: "^(YES|NO)$" - hash: "^.*$" - + security: ^(YES|NO)$ + codelink: ^.*$ + testlink: ^.*$ + # req-Id: tool_req__docs_req_attr_reqcov + reqcovered: ^(YES|NO)$ + # req-Id: tool_req__docs_req_attr_testcov + testcovered: ^(YES|NO)$ + hash: ^.*$ + + # req-Id: tool_req__docs_req_types feat_req: - title: "Feature Requirement" - prefix: "feat_req__" - style: "node" - mandatory_options: - id: "^feat_req__[0-9a-z_]*$" - reqtype: "^(Functional|Interface|Process|Legal|Non-Functional)$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Feature Requirement + prefix: feat_req__ + style: node + mandatory_options: + id: ^feat_req__[0-9a-z_]*$ + # req-Id: tool_req__docs_req_attr_reqtype + reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - # req- Id: gd_req__req__linkage_fulfill - satisfies: "^stkh_req__.*$" + # req-Id: tool_req__docs_req_link_satisfies_allowed + satisfies: ^stkh_req__.*$ optional_options: - codelink: "^.*$" - testlink: "^.*$" - reqcovered: "^(YES|NO)$" - testcovered: "^(YES|NO)$" - hash: "^.*$" - + codelink: ^.*$ + testlink: ^.*$ + # req-Id: tool_req__docs_req_attr_reqcov + reqcovered: ^(YES|NO)$ + # req-Id: tool_req__docs_req_attr_testcov + testcovered: ^(YES|NO)$ + hash: ^.*$ + + # req-Id: tool_req__docs_req_types comp_req: - title: "Component Requirement" - prefix: "comp_req__" - mandatory_options: - id: "^comp_req__[0-9a-z_]*$" - reqtype: "^(Functional|Interface|Process|Legal|Non-Functional)$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Component Requirement + prefix: comp_req__ + mandatory_options: + id: ^comp_req__[0-9a-z_]*$ + # req-Id: tool_req__docs_req_attr_reqtype + reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - satisfies: "^feat_req__.*$" + # req-Id: tool_req__docs_req_link_satisfies_allowed + satisfies: ^feat_req__.*$ optional_options: - codelink: "^.*$" - testlink: "^.*$" - reqcovered: "^(YES|NO)$" - testcovered: "^(YES|NO)$" - hash: "^.*$" - + codelink: ^.*$ + testlink: ^.*$ + # req-Id: tool_req__docs_req_attr_reqcov + reqcovered: ^(YES|NO)$ + # req-Id: tool_req__docs_req_attr_testcov + testcovered: ^(YES|NO)$ + hash: ^.*$ + + # req-Id: tool_req__docs_req_types tool_req: - title: "Tool Requirement" - prefix: "tool_req__" + title: Tool Requirement + prefix: tool_req__ mandatory_options: - id: "^tool_req__[0-9a-z_]*$" - reqtype: "^(Functional|Interface|Process|Legal|Non-Functional)$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" - mandatory_links: - satisfies: "^.*_req__.*$" + id: ^tool_req__[0-9a-z_]*$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ + optional_links: + # req-Id: tool_req__docs_req_link_satisfies_allowed + satisfies: ^.*$ optional_options: - codelink: "^.*$" - testlink: "^.*$" - reqcovered: "^(YES|NO)$" - testcovered: "^(YES|NO)$" - hash: "^.*$" - + # req-Id: tool_req__docs_req_attr_reqtype + reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + codelink: ^.*$ + tags: ^.*$ + testlink: ^.*$ + # req-Id: tool_req__docs_req_attr_reqcov + reqcovered: ^(YES|NO)$ + # req-Id: tool_req__docs_req_attr_testcov + testcovered: ^(YES|NO)$ + hash: ^.*$ + implemented: ^(YES|PARTIAL|NO)$ + parent_covered: ^.*$ + parent_has_problem: ^.*$ + + # req-Id: tool_req__docs_req_types aou_req: - title: "Assumption of Use" - prefix: "aou_req__" - mandatory_options: - id: "^aou_req__[0-9a-z_]*$" - reqtype: "^(Functional|Interface|Process|Legal|Non-Functional)$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Assumption of Use + prefix: aou_req__ + mandatory_options: + id: ^aou_req__[0-9a-z_]*$ + # req-Id: tool_req__docs_req_attr_reqtype + reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ optional_options: - codelink: "^.*$" - testlink: "^.*$" - reqcovered: "^(YES|NO)$" - testcovered: "^(YES|NO)$" - hash: "^.*$" + codelink: ^.*$ + testlink: ^.*$ + # req-Id: tool_req__docs_req_attr_reqcov + reqcovered: ^(YES|NO)$ + # req-Id: tool_req__docs_req_attr_testcov + testcovered: ^(YES|NO)$ + hash: ^.*$ optional_links: - mitigates: "^.*$" + mitigates: ^.*$ + + # Architecture feat_arc_sta: - title: "Feature Architecture Static View" - prefix: "feat_arc_sta__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^feat_arc_sta__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Feature Architecture Static View + prefix: feat_arc_sta__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^feat_arc_sta__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - includes: "^logic_arc_int(_op)*__.+$" + includes: ^logic_arc_int(_op)*__.+$ optional_links: - fulfils: "^feat_req__.+$" + fulfils: ^feat_req__.+$ feat_arc_dyn: - title: "Feature Architecture Dynamic View" - prefix: "feat_arc_dyn__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^feat_arc_dyn__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Feature Architecture Dynamic View + prefix: feat_arc_dyn__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^feat_arc_dyn__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - fulfils: "^feat_req__.+$" + fulfils: ^feat_req__.+$ logic_arc_int: - title: "Logical Architecture Interfaces" - prefix: "logic_arc_int__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^logic_arc_int__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Logical Architecture Interfaces + prefix: logic_arc_int__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^logic_arc_int__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ optional_links: - includes: "^logic_arc_int_op__.+$" - fulfils: "^comp_req__.+$" + includes: ^logic_arc_int_op__.+$ + fulfils: ^comp_req__.+$ logic_arc_int_op: - title: "Logical Architecture Interface Operation" - prefix: "logic_arc_int_op__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^logic_arc_int_op__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Logical Architecture Interface Operation + prefix: logic_arc_int_op__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^logic_arc_int_op__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - included_by: "^logic_arc_int__.+$" + included_by: ^logic_arc_int__.+$ mod_view_sta: - title: "Module Architecture Static View" - prefix: "mod_view_sta__" - color: "#FEDCD2" - style: "card" + title: Module Architecture Static View + prefix: mod_view_sta__ + color: #FEDCD2 + style: card mandatory_options: - id: "^mod_view_sta__[0-9a-z_]+$" + id: ^mod_view_sta__[0-9a-z_]+$ mandatory_links: - includes: "^comp_arc_sta__.+$" + includes: ^comp_arc_sta__.+$ mod_view_dyn: - title: "Module Architecture Dynamic View" - prefix: "mod_view_dyn__" - color: "#FEDCD2" - style: "card" + title: Module Architecture Dynamic View + prefix: mod_view_dyn__ + color: #FEDCD2 + style: card mandatory_options: - id: "^mod_view_dyn__[0-9a-z_]+$" + id: ^mod_view_dyn__[0-9a-z_]+$ comp_arc_sta: - title: "Component Architecture Static View" - prefix: "comp_arc_sta__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^comp_arc_sta__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Component Architecture Static View + prefix: comp_arc_sta__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^comp_arc_sta__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ optional_links: - implements: "^real_arc_int(_op)*__.+$" - includes: "^comp_arc_sta__.+$" - uses: "^real_arc_int(_op)*__.+$" - fulfils: "^comp_req__.+$" + implements: ^real_arc_int(_op)*__.+$ + includes: ^comp_arc_sta__.+$ + uses: ^real_arc_int(_op)*__.+$ + fulfils: ^comp_req__.+$ comp_arc_dyn: - title: "Component Architecture Dynamic View" - prefix: "comp_arc_dyn__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^comp_arc_dyn__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Component Architecture Dynamic View + prefix: comp_arc_dyn__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^comp_arc_dyn__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ optional_links: - fulfils: "^comp_req__.+$" + fulfils: ^comp_req__.+$ real_arc_int: - title: "Component Architecture Interfaces" - prefix: "real_arc_int__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^real_arc_int__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" - language: "^(cpp|rust)$" + title: Component Architecture Interfaces + prefix: real_arc_int__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^real_arc_int__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ + language: ^(cpp|rust)$ optional_links: - fulfils: "^comp_req__.+$" + fulfils: ^comp_req__.+$ real_arc_int_op: - title: "Component Architecture Interface Operation" - prefix: "real_arc_int_op__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^real_arc_int_op__[0-9a-z_]+$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Component Architecture Interface Operation + prefix: real_arc_int_op__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^real_arc_int_op__[0-9a-z_]+$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - included_by: "^real_arc_int__.+$" + included_by: ^real_arc_int__.+$ optional_links: - implements: "^logic_arc_int_op__.+$" - + implements: ^logic_arc_int_op__.+$ review_header: - prefix: "review__header" - title: "Review Header" + prefix: review__header + title: Review Header mandatory_options: - id: "^review__header__[0-9a-z_]*$" - reviewers: "^.*$" - approvers: "^.*$" - hash: "^.*$" - template: "^.*$" + id: ^review__header__[0-9a-z_]*$ + reviewers: ^.*$ + approvers: ^.*$ + hash: ^.*$ + template: ^.*$ # Implementation dd_sta: - title: "Static detailed design" - prefix: "dd_sta__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^dd_sta__[0-9a-z_]*$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Static detailed design + prefix: dd_sta__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^dd_sta__[0-9a-z_]*$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - implements: "^comp_req__.*$" - satisfies: "^comp_arc_sta__.*$" + implements: ^comp_req__.*$ + satisfies: ^comp_arc_sta__.*$ optional_links: - includes: "^sw_unit__.*$" + includes: ^sw_unit__.*$ + dd_dyn: - title: "Dynamic detailed design" - prefix: "dd_dyn__" - color: "#FEDCD2" - style: "card" - mandatory_options: - id: "^dd_dyn__[0-9a-z_]*$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + title: Dynamic detailed design + prefix: dd_dyn__ + color: #FEDCD2 + style: card + mandatory_options: + id: ^dd_dyn__[0-9a-z_]*$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ mandatory_links: - implements: "^comp_req__.*$" - satisfies: "^comp_arc_sta__.*$" + implements: ^comp_req__.*$ + satisfies: ^comp_arc_sta__.*$ + sw_unit: - title: "Software unit" - prefix: "sw_unit__" + title: Software unit + prefix: sw_unit__ mandatory_options: - id: "^sw_unit__[0-9a-z_]*$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + id: ^sw_unit__[0-9a-z_]*$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ sw_unit_int: - title: "Software unit interfaces" - prefix: "sw_unit_int__" - color: "#FEDCD2" - style: "card" + title: Software unit interfaces + prefix: sw_unit_int__ + color: #FEDCD2 + style: card mandatory_options: - id: "^sw_unit_int__[0-9a-z_]*$" - security: "^(YES|NO)$" - safety: "^(QM|ASIL_B|ASIL_D)$" - status: "^(valid|invalid)$" + id: ^sw_unit_int__[0-9a-z_]*$ + security: ^(YES|NO)$ + safety: ^(QM|ASIL_B|ASIL_D)$ + status: ^(valid|invalid)$ # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from @@ -483,67 +515,67 @@ needs_extra_links: ############################################################## # Workflow contains: - incoming: "contained by" - outgoing: "contains" + incoming: contained by + outgoing: contains has: - incoming: "relates to" - outgoing: "has" + incoming: relates to + outgoing: has input: - incoming: "is input to" - outgoing: "needs input" + incoming: is input to + outgoing: needs input output: - incoming: "is output from" - outgoing: "outputs" + incoming: is output from + outgoing: outputs # Roles responsible: - incoming: "is responsible for" - outgoing: "responsible" + incoming: is responsible for + outgoing: responsible approved_by: - incoming: "approves" - outgoing: "approved by" + incoming: approves + outgoing: approved by supported_by: - incoming: "supports" - outgoing: "supported by" + incoming: supports + outgoing: supported by # Workproduct complies: - incoming: "complies to" - outgoing: "complies" + incoming: complies to + outgoing: complies ############################################################## # S-CORE Metamodel ############################################################## # Requirements satisfies: - incoming: "satisfied by" - outgoing: "satisfies" + incoming: satisfied by + outgoing: satisfies # Architecture fulfils: - incoming: "fulfilled by" - outgoing: "fulfils" + incoming: fulfilled by + outgoing: fulfils implements: - incoming: "implemented by" - outgoing: "implements" + incoming: implemented by + outgoing: implements uses: - incoming: "used by" - outgoing: "uses" + incoming: used by + outgoing: uses includes: - incoming: "included by" - outgoing: "includes" + incoming: included by + outgoing: includes included_by: - incoming: "includes" - outgoing: "included by" + incoming: includes + outgoing: included by ############################################################## # Graph Checks # The graph checks focus on the relation of the needs and their attributes. @@ -552,7 +584,7 @@ needs_extra_links: # needs:defines the needs types to which the check should be applied # - [include / exclude]: need types to which the check should be applied; # multiple need types can be defined by separating them with a comma; -# to perform the check on all needs types, set include to "." +# to perform the check on all needs types, set include to . # - condition: defines (together with apply) the condition which the needs need to fulfill # - [and / or / xor / not] # - check: defines the check that should be applied @@ -566,32 +598,32 @@ graph_checks: # req- Id: gd_req__req__linkage_safety req_safety_linkage: needs: - include: "comp_req, feat_req" + include: comp_req, feat_req condition: and: - - "safety != QM" - - "status == valid" + - safety != QM + - status == valid check: satisfies: and: - - "safety != QM" - - "status == valid" + - safety != QM + - status == valid req_linkage: needs: - include: "comp_req, feat_req" - condition: "status == valid" + include: comp_req, feat_req + condition: status == valid check: - satisfies: "status == valid" - # req- Id: gd_req__req__linkage_architecture + # req- Id: gd_req__req__linkage_architecture + satisfies: status == valid arch_safety_linkage: needs: - include: "comp_req, feat_req" + include: comp_req, feat_req condition: and: - - "safety != QM" - - "status == valid" + - safety != QM + - status == valid check: fulfils: and: - - "safety != QM" - - "status == valid" + - safety != QM + - status == valid diff --git a/src/extensions/score_source_code_linker/parse_source_files.py b/src/extensions/score_source_code_linker/parse_source_files.py index 1ae0f0f0..cbfde5ce 100755 --- a/src/extensions/score_source_code_linker/parse_source_files.py +++ b/src/extensions/score_source_code_linker/parse_source_files.py @@ -13,6 +13,7 @@ import argparse import collections import json +import logging import os import subprocess @@ -20,6 +21,8 @@ from collections.abc import Callable from pathlib import Path +logger = logging.getLogger(__name__) + TAGS = [ "# req-traceability:", "# req-Id:", @@ -118,6 +121,9 @@ def extract_requirements( parser.add_argument("inputs", nargs="*") args, _ = parser.parse_known_args() + + logger.info(f"Parsing source files: {args.inputs}") + requirement_mappings: dict[str, list[str]] = collections.defaultdict(list) for input in args.inputs: with open(input) as f: From 180e1f77d7c022e179ccd734ac6763dd230d86c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 13 Jun 2025 16:12:36 +0200 Subject: [PATCH 047/231] extend source code linker to work with any repo (#84) so far score was hardcoded --- src/extensions/score_metamodel/metamodel.yaml | 2 +- .../score_source_code_linker/__init__.py | 7 +- .../parse_source_files.py | 71 +++++++++++++-- .../tests/test_requirement_links.py | 86 +++++++++++++++++-- .../tests/test_source_link.py | 20 +++-- 5 files changed, 164 insertions(+), 22 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index fb09b29b..fdc120a9 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -14,7 +14,7 @@ needs_types_base_options: optional_options: - source_code_link: ^https://github.com/eclipse-score/.*$ + source_code_link: ^https://github.com/.* # Custom semantic validation rules prohibited_words: # req-Id: tool_req__docs_common_attr_title diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index fac87c12..53ed8f75 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -19,7 +19,9 @@ from sphinx_needs.data import NeedsMutable, SphinxNeedsData, NeedsInfoType from sphinx_needs.logging import get_logger -from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL +from src.extensions.score_source_code_linker.parse_source_files import ( + get_github_base_url, +) LOGGER = get_logger(__name__) LOGGER.setLevel("DEBUG") @@ -108,6 +110,7 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: # For some reason the prefix 'sphinx_needs internally' is CAPSLOCKED. # So we have to make sure we uppercase the prefixes prefixes = [x["id_prefix"].upper() for x in app.config.needs_external_needs] + github_base_url = get_github_base_url() + "/blob/" try: with open(path) as f: gh_json = json.load(f) @@ -117,7 +120,7 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: if need is None: # NOTE: manipulating link to remove git-hash, # making the output file location more readable - files = [x.replace(GITHUB_BASE_URL, "").split("/", 1)[-1] for x in link] + files = [x.replace(github_base_url, "").split("/", 1)[-1] for x in link] LOGGER.warning( f"Could not find {id} in the needs id's. " + f"Found in file(s): {files}", diff --git a/src/extensions/score_source_code_linker/parse_source_files.py b/src/extensions/score_source_code_linker/parse_source_files.py index cbfde5ce..ba1d90fd 100755 --- a/src/extensions/score_source_code_linker/parse_source_files.py +++ b/src/extensions/score_source_code_linker/parse_source_files.py @@ -15,8 +15,10 @@ import json import logging import os +import sys import subprocess + # Importing from collections.abc as typing.Callable is deprecated since Python 3.9 from collections.abc import Callable from pathlib import Path @@ -28,7 +30,63 @@ "# req-Id:", ] -GITHUB_BASE_URL = "https://github.com/eclipse-score/score/blob/" + +def get_github_base_url() -> str: + git_root = find_git_root() + repo = get_github_repo_info(git_root) + return f"https://github.com/{repo}" + + +def parse_git_output(str_line: str) -> str: + if len(str_line.split()) < 2: + logger.warning( + f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. Expected example: 'origin git@github.com:user/repo.git'" + ) + return "" + url = str_line.split()[1] # Get the URL part + # Handle SSH format (git@github.com:user/repo.git) + if url.startswith("git@"): + path = url.split(":")[1] + else: + path = "/".join(url.split("/")[3:]) # Get part after github.com/ + return path.replace(".git", "") + + +def get_github_repo_info(git_root_cwd: Path) -> str: + process = subprocess.run( + ["git", "remote", "-v"], capture_output=True, text=True, cwd=git_root_cwd + ) + repo = "" + for line in process.stdout.split("\n"): + if "origin" in line and "(fetch)" in line: + repo = parse_git_output(line) + break + else: + # If we do not find 'origin' we just take the first line + logger.info( + "Did not find origin remote name. Will now take first result from: 'git remote -v'" + ) + repo = parse_git_output(process.stdout.split("\n")[0]) + assert repo != "", ( + "Remote repository is not defined. Make sure you have a remote set. Check this via 'git remote -v'" + ) + return repo + + +def find_git_root(): + """ + This is copied from 'find_runfiles' as the import does not work for some reason. + This should be fixed. + """ + git_root = Path(__file__).resolve() + while not (git_root / ".git").exists(): + git_root = git_root.parent + if git_root == Path("/"): + sys.exit( + "Could not find git root. Please run this script from the " + "root of the repository." + ) + return git_root def get_git_hash(file_path: str) -> str: @@ -48,7 +106,7 @@ def get_git_hash(file_path: str) -> str: try: abs_path = Path(file_path).resolve() if not os.path.isfile(abs_path): - print(f"File not found: {abs_path}", flush=True) + logger.warning(f"File not found: {abs_path}") return "file_not_found" result = subprocess.run( ["git", "log", "-n", "1", "--pretty=format:%H", "--", abs_path], @@ -61,12 +119,13 @@ def get_git_hash(file_path: str) -> str: assert all(c in "0123456789abcdef" for c in decoded_result) return decoded_result except Exception as e: - print(f"Unexpected error: {abs_path}: {e}", flush=True) + logger.warning(f"Unexpected error: {abs_path}: {e}") return "error" def extract_requirements( source_file: str, + github_base_url: str, git_hash_func: Callable[[str], str] | None = get_git_hash, ) -> dict[str, list[str]]: """ @@ -110,7 +169,7 @@ def extract_requirements( check_tag = cleaned_line.split(":")[1].strip() if check_tag: req_id = cleaned_line.split(":")[-1].strip() - link = f"{GITHUB_BASE_URL}{hash}/{source_file}#L{line_number}" + link = f"{github_base_url}/blob/{hash}/{source_file}#L{line_number}" requirement_mapping[req_id].append(link) return requirement_mapping @@ -124,11 +183,13 @@ def extract_requirements( logger.info(f"Parsing source files: {args.inputs}") + # Finding the GH URL + gh_base_url = get_github_base_url() requirement_mappings: dict[str, list[str]] = collections.defaultdict(list) for input in args.inputs: with open(input) as f: for source_file in f: - rm = extract_requirements(source_file.strip()) + rm = extract_requirements(source_file.strip(), gh_base_url) for k, v in rm.items(): requirement_mappings[k].extend(v) with open(args.output, "w") as f: diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index 43f03f57..fe502510 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -12,14 +12,20 @@ # ******************************************************************************* from collections import defaultdict from collections.abc import Callable +from gettext import find from pathlib import Path import pytest +import logging from pytest import TempPathFactory from src.extensions.score_source_code_linker.parse_source_files import ( - GITHUB_BASE_URL, + get_github_base_url, + find_git_root, + get_github_repo_info, extract_requirements, get_git_hash, + parse_git_output, + logger as scl_logger, ) @@ -74,39 +80,42 @@ def dummy_git_hash_func(input: str) -> Callable[[str], str]: def test_extract_requirements(create_tmp_files: Path): root_dir = create_tmp_files - + github_base_url = get_github_base_url() results_dict1 = extract_requirements( - str(root_dir / "testfile.txt"), dummy_git_hash_func("no-hash") + str(root_dir / "testfile.txt"), github_base_url, dummy_git_hash_func("no-hash") ) expected_dict1: dict[str, list[str]] = defaultdict(list) expected_dict1["TEST_REQ__LINKED_ID"].append( - f"{GITHUB_BASE_URL}no-hash/{root_dir}/testfile.txt#L7" + f"{github_base_url}/blob/no-hash/{root_dir}/testfile.txt#L7" ) expected_dict1["TEST_REQ__LINKED_TRACE"].append( - f"{GITHUB_BASE_URL}no-hash/{root_dir}/testfile.txt#L11" + f"{github_base_url}/blob/no-hash/{root_dir}/testfile.txt#L11" ) # Assumed random hash here to test if passed correctly results_dict2 = extract_requirements( str(root_dir / "testfile2.txt"), + github_base_url, dummy_git_hash_func("aacce4887ceea1f884135242a8c182db1447050"), ) expected_dict2: dict[str, list[str]] = defaultdict(list) expected_dict2["TEST_REQ__LINKED_DIFFERENT_FILE"].append( - f"{GITHUB_BASE_URL}aacce4887ceea1f884135242a8c182db1447050/{root_dir}/testfile2.txt#L3" + f"{github_base_url}/blob/aacce4887ceea1f884135242a8c182db1447050/{root_dir}/testfile2.txt#L3" ) - results_dict3 = extract_requirements(str(root_dir / "testfile3.txt")) + results_dict3 = extract_requirements( + str(root_dir / "testfile3.txt"), github_base_url + ) expected_dict3: dict[str, list[str]] = defaultdict(list) # if there is no git-hash returned from command. # This happens if the file is new and not committed yet. results_dict4 = extract_requirements( - str(root_dir / "testfile2.txt"), dummy_git_hash_func("") + str(root_dir / "testfile2.txt"), github_base_url, dummy_git_hash_func("") ) expected_dict4: dict[str, list[str]] = defaultdict(list) expected_dict4["TEST_REQ__LINKED_DIFFERENT_FILE"].append( - f"{GITHUB_BASE_URL}/{root_dir}/testfile2.txt#L3" + f"{github_base_url}/blob//{root_dir}/testfile2.txt#L3" ) assert results_dict1 == expected_dict1 @@ -118,3 +127,62 @@ def test_extract_requirements(create_tmp_files: Path): def test_get_git_hash(): assert get_git_hash("testfile.x") == "file_not_found" assert get_git_hash("") == "file_not_found" + + +# These tests aren't great / exhaustive, but an okay first step into the right direction. + + +def test_get_github_repo_info(): + # I'd argue the happy path is tested with the other ones? + with pytest.raises(AssertionError): + get_github_repo_info(Path(".")) + + +git_test_data_ok = [ + ( + "origin https://github.com/eclipse-score/test-repo.git (fetch)", + "eclipse-score/test-repo", + ), + ( + "origin git@github.com:eclipse-score/test-repo.git (fetch)", + "eclipse-score/test-repo", + ), + ("origin git@github.com:eclipse-score/test-repo.git", "eclipse-score/test-repo"), + ("upstream git@github.com:upstream/repo.git (fetch)", "upstream/repo"), +] + + +@pytest.mark.parametrize("input,output", git_test_data_ok) +def test_parse_git_output_ok(input, output): + assert output == parse_git_output(input) + + +git_test_data_bad = [ + ("origin ", ""), + ( + " ", + "", + ), +] + + +@pytest.mark.parametrize("input,output", git_test_data_bad) +def test_parse_git_output_bad(caplog, input, output): + with caplog.at_level(logging.WARNING, logger=scl_logger.name): + result = parse_git_output(input) + assert len(caplog.messages) == 1 + assert caplog.records[0].levelname == "WARNING" + assert ( + f"Got wrong input line from 'get_github_repo_info'. Input: {input}. Expected example: 'origin git@github.com:user/repo.git'" + in caplog.records[0].message + ) + assert output == result + + +def test_get_github_base_url(): + # Not really a great test imo. + git_root = find_git_root() + repo = get_github_repo_info(git_root) + expected = f"https://github.com/{repo}" + actual = get_github_base_url() + assert expected == actual diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index fd33a542..591e2d66 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -16,11 +16,18 @@ import pytest from pytest import TempPathFactory -from src.extensions.score_source_code_linker.parse_source_files import GITHUB_BASE_URL +from src.extensions.score_source_code_linker.parse_source_files import ( + get_github_base_url, +) from sphinx.testing.util import SphinxTestApp from sphinx_needs.data import SphinxNeedsData +def construct_gh_url() -> str: + gh = get_github_base_url() + return f"{gh}/blob/" + + @pytest.fixture(scope="session") def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: return tmp_path_factory.mktemp("sphinx") @@ -105,22 +112,24 @@ def basic_needs(): @pytest.fixture(scope="session") def example_source_link_text_all_ok(): + github_base_url = construct_gh_url() return { "TREQ_ID_1": [ - f"{GITHUB_BASE_URL}aacce4887ceea1f884135242a8c182db1447050/tools/sources/implementation1.py#L2", - f"{GITHUB_BASE_URL}/tools/sources/implementation_2_new_file.py#L20", + f"{github_base_url}aacce4887ceea1f884135242a8c182db1447050/tools/sources/implementation1.py#L2", + f"{github_base_url}/tools/sources/implementation_2_new_file.py#L20", ], "TREQ_ID_2": [ - f"{GITHUB_BASE_URL}f53f50a0ab1186329292e6b28b8e6c93b37ea41/tools/sources/implementation1.py#L18" + f"{github_base_url}f53f50a0ab1186329292e6b28b8e6c93b37ea41/tools/sources/implementation1.py#L18" ], } @pytest.fixture(scope="session") def example_source_link_text_non_existent(): + github_base_url = construct_gh_url() return { "TREQ_ID_200": [ - f"{GITHUB_BASE_URL}f53f50a0ab1186329292e6b28b8e6c93b37ea41/tools/sources/bad_implementation.py#L17" + f"{github_base_url}f53f50a0ab1186329292e6b28b8e6c93b37ea41/tools/sources/bad_implementation.py#L17" ], } @@ -132,6 +141,7 @@ def test_source_link_integration_ok( example_source_link_text_all_ok: dict[str, list[str]], sphinx_base_dir: Path, ): + github_url = construct_gh_url() app = sphinx_app_setup(basic_conf, basic_needs, example_source_link_text_all_ok) try: app.build() From db1682d42e83ee5a0dfc2b805a4faa0a980fd1f1 Mon Sep 17 00:00:00 2001 From: Simon Duerr Date: Fri, 20 Jun 2025 11:15:07 +0200 Subject: [PATCH 048/231] fix BUILD file of score_source_code_linker (#91) The BUILD file was missing a dependency to py_binary, causing errors when building with bootstrap_impl=script. --- src/extensions/score_source_code_linker/BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 2c0402cf..597ed795 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -10,7 +10,7 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_python_basics//:defs.bzl", "score_py_pytest") From ab549c645f38d20ca5052280fb55b6644a731509 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Sun, 22 Jun 2025 13:53:20 +0200 Subject: [PATCH 049/231] add CODEOWNERS (#88) --- .github/CODEOWNERS | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..6918bf3c --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,9 @@ +# 👋 Code owners help maintain this repository and keep it aligned with our technical vision. +# You're responsible for reviewing changes, ensuring quality, and guiding contributors. +# You're also encouraged to help triage issues and keep discussions constructive and focused. +# Ownership can be shared, delegated, or updated as the project evolves. + +# For more information about CODEOWNERS, see: +# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners + +* @AlexanderLanin @MaximilianSoerenPollak @dcalavrezo-qorix From 467ed9cc737d3057fc6c2dea796d8d9ea5b0da88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 23 Jun 2025 17:32:52 +0200 Subject: [PATCH 050/231] fix: remove formatting from tests (#93) --- src/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/src/BUILD b/src/BUILD index e0e71ad1..def4bbef 100644 --- a/src/BUILD +++ b/src/BUILD @@ -185,6 +185,7 @@ format_test( no_sandbox = True, python = "@aspect_rules_lint//format:ruff", starlark = "@buildifier_prebuilt//:buildifier", + tags = ["manual"], visibility = [ "//visibility:public", ], From 65ae15814ee52c928e17279939762d9db98747b6 Mon Sep 17 00:00:00 2001 From: Simon Duerr Date: Thu, 26 Jun 2025 09:38:27 +0200 Subject: [PATCH 051/231] add support to inject other pip index (#103) --- MODULE.bazel | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MODULE.bazel b/MODULE.bazel index 24182c5e..7edf970b 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -48,6 +48,8 @@ use_repo(python) ############################################################################### pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip") pip.parse( + envsubst = ["PIP_INDEX_URL"], + extra_pip_args = ["--index-url=${PIP_INDEX_URL:-https://pypi.org/simple/}"], hub_name = "pip_process", python_version = PYTHON_VERSION, requirements_lock = "//src:requirements.txt", From 316102abe059460f9e4fc25ca265aa0c68836429 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Thu, 26 Jun 2025 14:19:06 +0200 Subject: [PATCH 052/231] add caching to some workflows (#95) --- .github/workflows/format.yml | 14 ++++++++++++-- .github/workflows/test.yml | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index b3b0b477..f664708f 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -23,8 +23,18 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4.2.2 - - name: Setup Bazel - uses: bazel-contrib/setup-bazel@0.9.1 + - name: Cache Bazel + uses: actions/cache@v4 + with: + path: ~/.cache/bazel + key: ${{ runner.os }}-format-${{ hashFiles('**/*.bazel', '**/BUILD', '**/*.bzl') }} + + - name: Setup Bazel with cache + uses: bazel-contrib/setup-bazel@0.15.0 + with: + disk-cache: true + repository-cache: true + bazelisk-cache: true - name: Run formatting checks run: | bazel run //src:ide_support diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ab0984ab..33c4f4d2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -21,6 +21,20 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4.2.2 + - name: Cache Bazel and pip + uses: actions/cache@v4 + with: + path: | + ~/.cache/bazel + ~/.cache/pip + key: ${{ runner.os }}-test-${{ hashFiles('**/*.bazel', '**/BUILD', '**/*.bzl', 'src/requirements.txt', 'src/**/*.py') }} + + - name: Setup Bazel with cache + uses: bazel-contrib/setup-bazel@0.15.0 + with: + disk-cache: true + repository-cache: true + bazelisk-cache: true - name: Run test targets run: | bazel run //src:ide_support From 1c4352ed86fc1e86d16447c180d8a652556e029b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 26 Jun 2025 17:37:04 +0200 Subject: [PATCH 053/231] Remove dependency to score_platform (#108) * Remove dependency to score_platform Co-authored-by: Alexander Lanin --- MODULE.bazel | 13 ++++++------- docs/BUILD | 11 ----------- docs/how-to-integrate/example/index.rst | 10 +++++----- .../example/testing/index.rst | 10 +++++----- examples/linking-both/BUILD | 18 ++++++++---------- examples/linking-both/index.rst | 10 +++++----- examples/linking-both/testing/test.rst | 10 +++++----- examples/linking-latest/BUILD | 11 ++++------- examples/linking-latest/index.rst | 7 ++++++- examples/linking-release/BUILD | 19 ++++--------------- examples/linking-release/index.rst | 5 ++--- 11 files changed, 50 insertions(+), 74 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 7edf970b..aedcff4b 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.3.3", + version = "0.4.0", compatibility_level = 0, ) @@ -22,7 +22,7 @@ module( # Packaging dependencies # ############################################################################### -bazel_dep(name = "rules_pkg", version = "1.0.1") +bazel_dep(name = "rules_pkg", version = "1.1.0") ############################################################################### # @@ -58,17 +58,17 @@ use_repo(pip, "pip_process") # Additional Python rules provided by aspect, e.g. an improved version of bazel_dep(name = "aspect_rules_py", version = "1.4.0") -bazel_dep(name = "buildifier_prebuilt", version = "7.3.1") +bazel_dep(name = "buildifier_prebuilt", version = "8.2.0.2") ############################################################################### # # Generic linting and formatting rules # ############################################################################### -bazel_dep(name = "aspect_rules_lint", version = "1.4.2") +bazel_dep(name = "aspect_rules_lint", version = "1.4.4") # PlantUML for docs -bazel_dep(name = "rules_java", version = "8.11.0") +bazel_dep(name = "rules_java", version = "8.13.0") http_jar = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_jar") @@ -94,8 +94,7 @@ bazel_dep(name = "score_python_basics", version = "0.3.2") bazel_dep(name = "score_cr_checker", version = "0.2.2") # This is only needed to build the examples. -bazel_dep(name = "score_platform", version = "0.1.1") # Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") -bazel_dep(name = "score_process", version = "0.2.0") +bazel_dep(name = "score_process", version = "1.0.1") diff --git a/docs/BUILD b/docs/BUILD index a3afa873..2ac04b22 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -26,11 +26,6 @@ docs( { "suffix": "latest", # latest main branch documentation build "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/score/main", - "json_url": "https://eclipse-score.github.io/score/main/needs.json", - "id_prefix": "score_", - }, { "base_url": "https://eclipse-score.github.io/process_description/main", "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", @@ -41,15 +36,9 @@ docs( { "suffix": "release", # The version imported from MODULE.bazel "target": [ - "@score_platform//docs:docs_needs", "@score_process//process:docs_needs_latest", ], "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/score/main", - "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", - "id_prefix": "score_", - }, { "base_url": "https://eclipse-score.github.io/process_description/main", "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", diff --git a/docs/how-to-integrate/example/index.rst b/docs/how-to-integrate/example/index.rst index 51379cf8..e7e0339a 100644 --- a/docs/how-to-integrate/example/index.rst +++ b/docs/how-to-integrate/example/index.rst @@ -30,21 +30,21 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__persistency__config_file`. + :need:`PROCESS_gd_req__req__attr_uid`. Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ -.. feat_req:: Some Title - :id: feat_req__example__some_title +.. tool_req:: Some Title + :id: tool_req__example__some_title :reqtype: Process :security: YES :safety: ASIL_D - :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + It should remove id_prefix (PROCESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value against the allowed defined regex in the metamodel Note: The ID is different here as the 'folder structure' is as well diff --git a/docs/how-to-integrate/example/testing/index.rst b/docs/how-to-integrate/example/testing/index.rst index 802a7ced..353694d4 100644 --- a/docs/how-to-integrate/example/testing/index.rst +++ b/docs/how-to-integrate/example/testing/index.rst @@ -25,18 +25,18 @@ This example will help catch things and bugs when rst's are defined inside a fol Some content to make sure we also can render this. This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__persistency__config_file`. + :need:`PROCESS_gd_req__req__attr_uid` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ -.. feat_req:: Some Title - :id: feat_req__testing__some_title +.. tool_req:: Some Title + :id: tool_req__testing__some_title :reqtype: Process :security: YES :safety: ASIL_D - :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + It should remove id_prefix (PROCESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value against the 'allowed' defined regex in the metamodel diff --git a/examples/linking-both/BUILD b/examples/linking-both/BUILD index b0863bed..c03e9017 100644 --- a/examples/linking-both/BUILD +++ b/examples/linking-both/BUILD @@ -27,26 +27,24 @@ docs( "suffix": "latest", # latest main branch documentation build "external_needs_info": [ { - "base_url": "https://eclipse-score.github.io/score/main", - "json_url": "https://eclipse-score.github.io/score/main/needs.json", - "id_prefix": "score_", + "base_url": "https://eclipse-score.github.io/process_description/main/", + "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", + "id_prefix": "process_", }, ], }, { "suffix": "release", # The version imported from MODULE.bazel - "target": ["@score_platform//docs:docs_needs"], + "target": ["@score_process//process:docs_needs_latest"], "external_needs_info": [ { - "base_url": "https://eclipse-score.github.io/score/main", - "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", - "id_prefix": "score_", + "base_url": "https://eclipse-score.github.io/process_description/main", + "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", + "id_prefix": "process_", }, ], }, ], source_dir = "examples/linking-both", - source_files_to_scan_for_needs_links = [ - "//src:score_extension_files", - ], + source_files_to_scan_for_needs_links = [], ) diff --git a/examples/linking-both/index.rst b/examples/linking-both/index.rst index 22d023a4..24f0d55c 100644 --- a/examples/linking-both/index.rst +++ b/examples/linking-both/index.rst @@ -33,20 +33,20 @@ This is a simple example of a documentation page using the `docs` tool. Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__kvs__config_file` + :need:`PROCESS_gd_req__req__attr_uid` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ -.. feat_req:: Some Title - :id: feat_req__index__some_title +.. tool_req:: Some Title + :id: tool_req__index__some_title :reqtype: Process :security: YES :safety: ASIL_D - :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + It should remove id_prefix (PROCESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value against the allowed defined regex in the metamodel diff --git a/examples/linking-both/testing/test.rst b/examples/linking-both/testing/test.rst index 5dcfa9d1..d5b2ecef 100644 --- a/examples/linking-both/testing/test.rst +++ b/examples/linking-both/testing/test.rst @@ -25,19 +25,19 @@ This example will help catch things and bugs when rst's are defined inside a fol Some content to make sure we also can render this. This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__kvs__config_file`. + :need:`PROCESS_gd_req__req__attr_uid` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ -.. feat_req:: Some Title - :id: feat_req__testing__some_title +.. tool_req:: Some Title + :id: tool_req__testing__some_title :reqtype: Process :security: YES :safety: ASIL_D - :satisfies: SCORE_stkh_req__overall_goals__reuse_of_app_soft + :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (SCORE _) as it's defined inside the BUILD file and remove it before it checks the leftover value + It should remove id_prefix (PRCOESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value against the 'allowed' defined regex in the metamodel diff --git a/examples/linking-latest/BUILD b/examples/linking-latest/BUILD index f3663c0e..8866b0a2 100644 --- a/examples/linking-latest/BUILD +++ b/examples/linking-latest/BUILD @@ -27,16 +27,13 @@ docs( "suffix": "latest", # latest main branch documentation build "external_needs_info": [ { - "base_url": "https://eclipse-score.github.io/score/main", - "json_url": "https://eclipse-score.github.io/score/main/needs.json", - "version": "0.1", - "id_prefix": "score_", + "base_url": "https://eclipse-score.github.io/process_description/main/", + "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", + "id_prefix": "process_", }, ], }, ], source_dir = "examples/linking-latest", - source_files_to_scan_for_needs_links = [ - "//src:score_extension_files", - ], + source_files_to_scan_for_needs_links = [], ) diff --git a/examples/linking-latest/index.rst b/examples/linking-latest/index.rst index 336987e6..6213fac5 100644 --- a/examples/linking-latest/index.rst +++ b/examples/linking-latest/index.rst @@ -16,6 +16,11 @@ Hello World ================= This is a simple example of a documentation page using the `docs` tool. + +Hello World +================= +This is a simple example of a documentation page using the `docs` tool. + .. stkh_req:: TestTitle :id: stkh_req__docs__test_requirement :status: valid @@ -25,6 +30,6 @@ This is a simple example of a documentation page using the `docs` tool. Some content to make sure we also can render this This is a link to an external need inside the 'score' documentation. - :need:`SCORE_feat_req__kvs__config_file`. + :need:`PROCESS_gd_req__req__attr_uid` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/examples/linking-release/BUILD b/examples/linking-release/BUILD index 5265b8c5..33beef3c 100644 --- a/examples/linking-release/BUILD +++ b/examples/linking-release/BUILD @@ -25,27 +25,16 @@ docs( docs_targets = [ { "suffix": "release", # The version imported from MODULE.bazel - "target": ["@score_platform//docs:docs_needs"], + "target": ["@score_process//process:docs_needs_latest"], "external_needs_info": [ { "base_url": "https://eclipse-score.github.io/score/main", - "json_path": "/score_platform~/docs/docs_needs/_build/needs/needs.json", - "version": "0.1", - "id_prefix": "score_", + "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", + "id_prefix": "process_", }, ], }, ], source_dir = "examples/linking-release", - source_files_to_scan_for_needs_links = [ - # Note: you can add filegroups, globs, or entire targets here. - "//src:score_extension_files", - ], + source_files_to_scan_for_needs_links = [], ) - -# ╭───────────────────────────────────────╮ -# │ This is commented out until local │ -# │ multi-repo testing is implemented │ -# ╰───────────────────────────────────────╯ - -# { diff --git a/examples/linking-release/index.rst b/examples/linking-release/index.rst index d2d30725..3cc89afc 100644 --- a/examples/linking-release/index.rst +++ b/examples/linking-release/index.rst @@ -24,7 +24,6 @@ This is a simple example of a documentation page using the `docs` tool. :reqtype: Functional Some content to make sure we also can render this - This is a link to an external need inside the 'score' documentation - :need:`SCORE_stkh_req__overall_goals__reuse_of_app_soft` + This is a link to an external need inside the 'score' documentation. + :need:`PROCESS_gd_req__req__attr_uid` Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ - From d9df2443576794472b03b1ac3b222b2da8cc6ce6 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 27 Jun 2025 09:33:28 +0200 Subject: [PATCH 054/231] update process requirements (#111) --- .vscode/settings.json | 2 +- MODULE.bazel | 2 +- docs/product/requirements.rst | 153 ++++++++++++++++------------------ src/incremental.py | 14 +++- 4 files changed, 89 insertions(+), 82 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index c1b5cceb..81b05b3b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -26,7 +26,7 @@ "python.testing.pytestArgs": [ ".", "--ignore-glob=bazel-*/*", - "--ignore-glob=.venv_docs/*", + "--ignore-glob=.venv*/*", "--ignore-glob=_build/*" ], "python.testing.unittestEnabled": false, diff --git a/MODULE.bazel b/MODULE.bazel index aedcff4b..eee1f8bd 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -97,4 +97,4 @@ bazel_dep(name = "score_cr_checker", version = "0.2.2") # Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") -bazel_dep(name = "score_process", version = "1.0.1") +bazel_dep(name = "score_process", version = "1.0.2") diff --git a/docs/product/requirements.rst b/docs/product/requirements.rst index a8125516..b9e4150d 100644 --- a/docs/product/requirements.rst +++ b/docs/product/requirements.rst @@ -9,6 +9,12 @@ Requirements (Process Compliance) This section provides an overview of current process requirements and their clarification & implementation status. +.. note:: + All open issues and pull requests in the process repository are considered as if they + are already part of the process requirements. They address a lot of the + requirements that are referenced in this document, so we would be blocked if we would + not consider them as part of the process requirements. + .. needbar:: Docs-As-Code Requirements Status :stacked: :show_sum: @@ -20,16 +26,15 @@ This section provides an overview of current process requirements and their clar :xlabels_rotation: 45 :horizontal: - , implemented , partially / not quite clear , not implemented / not clear - Common, 'tool_req__docs' in id and implemented == "YES" and "Common Attributes" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Common Attributes" in tags, 'tool_req__docs' in id and implemented == "NO" and "Common Attributes" in tags - Doc, 'tool_req__docs' in id and implemented == "YES" and "Documents" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Documents" in tags, 'tool_req__docs' in id and implemented == "NO" and "Documents" in tags - Req, 'tool_req__docs' in id and implemented == "YES" and "Requirements" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Requirements" in tags, 'tool_req__docs' in id and implemented == "NO" and "Requirements" in tags - Arch, 'tool_req__docs' in id and implemented == "YES" and "Architecture" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Architecture" in tags, 'tool_req__docs' in id and implemented == "NO" and "Architecture" in tags - DDesign, 'tool_req__docs' in id and implemented == "YES" and "Detailed Design & Code" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Detailed Design & Code" in tags, 'tool_req__docs' in id and implemented == "NO" and "Detailed Design & Code" in tags - TVR, 'tool_req__docs' in id and implemented == "YES" and "Tool Verification Reports" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Tool Verification Reports" in tags, 'tool_req__docs' in id and implemented == "NO" and "Tool Verification Reports" in tags - Other, 'tool_req__docs' in id and implemented == "YES" and "Process / Other" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Process / Other" in tags, 'tool_req__docs' in id and implemented == "NO" and "Process / Other" in tags - SftyAn, 'tool_req__docs' in id and implemented == "YES" and "Safety Analysis" in tags, 'tool_req__docs' in id and implemented == "PARTIAL" and "Safety Analysis" in tags, 'tool_req__docs' in id and implemented == "NO" and "Safety Analysis" in tags - + , implemented , partially implemented , not implemented, process not clear + Common, 'tool_req__docs' in id and implemented == "YES" and "Common Attributes" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Common Attributes" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Common Attributes" in tags and status == "valid", 'tool_req__docs' in id and "Common Attributes" in tags and status != "valid" + Doc, 'tool_req__docs' in id and implemented == "YES" and "Documents" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Documents" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Documents" in tags and status == "valid", 'tool_req__docs' in id and "Documents" in tags and status != "valid" + Req, 'tool_req__docs' in id and implemented == "YES" and "Requirements" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Requirements" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Requirements" in tags and status == "valid", 'tool_req__docs' in id and "Requirements" in tags and status != "valid" + Arch, 'tool_req__docs' in id and implemented == "YES" and "Architecture" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Architecture" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Architecture" in tags and status == "valid", 'tool_req__docs' in id and "Architecture" in tags and status != "valid" + DDesign, 'tool_req__docs' in id and implemented == "YES" and "Detailed Design & Code" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Detailed Design & Code" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Detailed Design & Code" in tags and status == "valid", 'tool_req__docs' in id and "Detailed Design & Code" in tags and status != "valid" + TVR, 'tool_req__docs' in id and implemented == "YES" and "Tool Verification Reports" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Tool Verification Reports" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Tool Verification Reports" in tags and status == "valid", 'tool_req__docs' in id and "Tool Verification Reports" in tags and status != "valid" + Other, 'tool_req__docs' in id and implemented == "YES" and "Process / Other" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Process / Other" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Process / Other" in tags and status == "valid", 'tool_req__docs' in id and "Process / Other" in tags and status != "valid" + SftyAn, 'tool_req__docs' in id and implemented == "YES" and "Safety Analysis" in tags and status == "valid", 'tool_req__docs' in id and implemented == "PARTIAL" and "Safety Analysis" in tags and status == "valid", 'tool_req__docs' in id and implemented == "NO" and "Safety Analysis" in tags and status == "valid", 'tool_req__docs' in id and "Safety Analysis" in tags and status != "valid" 🗂️ Common Attributes @@ -53,7 +58,6 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__req__attr_uid, PROCESS_gd_req__tool__attr_uid, PROCESS_gd_req__arch__attribute_uid - :parent_has_problem: NO :parent_covered: YES: together with tool_req__docs_attr_id_scheme Docs-as-Code shall enforce that all Need IDs are globally unique across all included @@ -68,18 +72,14 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :tags: Common Attributes :satisfies: PROCESS_gd_req__req__attr_uid, PROCESS_gd_req__arch__attribute_uid - :parent_has_problem: YES: Parents are not aligned :parent_covered: YES: together with tool_req__docs_attr_id Docs-as-Code shall enforce that Need IDs follow the following naming scheme: - .. TODO: is it "indicating" or "perfect match"? - e.g. workflow -> wf would be ok for "indicating", but not for "perfect match" - * A prefix indicating the need type (e.g. `feature__`) - * A middle part indicating the hierarchical structure of the need: + * A middle part matching the hierarchical structure of the need: * For requirements: a portion of the feature tree or a component acronym - * For architecture elements: the final part of the feature tree + * For architecture elements: the structural element (e.g. some part of the feature tree, component acronym) * Additional descriptive text to ensure human readability @@ -92,7 +92,6 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :tags: Common Attributes :satisfies: PROCESS_gd_req__requirements_attr_title - :parent_has_problem: NO :parent_covered: NO: Can not ensure summary @@ -126,7 +125,6 @@ This section provides an overview of current process requirements and their clar :satisfies: PROCESS_gd_req__requirements_attr_security, PROCESS_gd_req__arch_attr_security, - :parent_has_problem: YES: Architecture talks about requirements. Parents not aligned. Docs-as-Code shall enforce that the ``security`` attribute has one of the following values: @@ -136,7 +134,7 @@ This section provides an overview of current process requirements and their clar This rule applies to: * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. - * all architecture elements (TODO; see https://github.com/eclipse-score/process_description/issues/34) + * all architecture elements defined in :need:`tool_req__docs_arch_types`. --------------------------- @@ -148,7 +146,6 @@ This section provides an overview of current process requirements and their clar :tags: Common Attributes :implemented: YES :parent_covered: YES - :parent_has_problem: YES: Architecture talks about requirements. Parents not aligned :satisfies: PROCESS_gd_req__req__attr_safety, PROCESS_gd_req__arch__attr_safety @@ -162,7 +159,7 @@ This section provides an overview of current process requirements and their clar This rule applies to: * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. - * all architecture elements (TODO; see https://github.com/eclipse-score/process_description/issues/34) + * all architecture elements defined in :need:`tool_req__docs_arch_types`. ---------- 🚦 Status @@ -172,7 +169,6 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_status :tags: Common Attributes :implemented: YES - :parent_has_problem: YES: Architecture talks about requirements, currently we have valid|draft :parent_covered: YES :satisfies: PROCESS_gd_req__req__attr_status, @@ -186,7 +182,7 @@ This section provides an overview of current process requirements and their clar This rule applies to: * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. - * all architecture elements (TODO; see https://github.com/eclipse-score/process_description/issues/34) + * all architecture elements defined in :need:`tool_req__docs_arch_types`. 📚 Documents ############# @@ -196,12 +192,13 @@ This section provides an overview of current process requirements and their clar :tags: Documents :implemented: YES + .. :satisfies: PROCESS_gd_req__doc_types (next process release) + Docs-as-Code shall support the following document types: * Generic Document (document) -.. NOTE: Header_service trigger/working execution is disabled .. tool_req:: Mandatory Document attributes :id: tool_req__docs_doc_attr :tags: Documents @@ -211,9 +208,9 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__doc_approver, PROCESS_gd_req__doc_reviewer, :parent_covered: NO - :parent_has_problem: YES: Which need type to use for this? - Docs-as-Code shall enforce that each document model element has the following attributes: + Docs-as-Code shall enforce that each :need:`tool_req__docs_doc_types` has the + following attributes: * author * approver @@ -226,7 +223,7 @@ This section provides an overview of current process requirements and their clar :implemented: NO :satisfies: PROCESS_gd_req__doc_author :parent_covered: YES: Together with tool_req__docs_doc_attr - :parent_has_problem: YES: Unclear how the contribution % is counted and how to accumulate %. Committer is a reserved role. + :status: invalid Docs-as-Code shall provide an automatic mechanism to determine document authors. @@ -234,6 +231,9 @@ This section provides an overview of current process requirements and their clar document author. Contributors are accumulated over all commits to the file containing the document. + .. note:: + The requirement is currently invalid as it's currently unclear how the contribution + % are counted and how to accumulate %. .. tool_req:: Document approver is autofilled :id: tool_req__docs_doc_attr_approver_autofill @@ -241,13 +241,11 @@ This section provides an overview of current process requirements and their clar :implemented: NO :satisfies: PROCESS_gd_req__doc_approver :parent_covered: YES: Together with tool_req__docs_doc_attr - :parent_has_problem: YES: CODEOWNER is Github specific. Docs-as-Code shall provide an automatic mechanism to determine the document approver. - The approver shall be the last approver listed in *CODEOWNERS* of the file containing - the document. The determination is based on the last pull request (PR) that modified - the relevant file. + The approver shall be the approvers listed in *CODEOWNERS* of the last pull request of + the file containing the document. .. tool_req:: Document reviewer is autofilled @@ -256,13 +254,11 @@ This section provides an overview of current process requirements and their clar :implemented: NO :satisfies: PROCESS_gd_req__doc_reviewer :parent_covered: YES: Together with tool_req__docs_doc_attr - :parent_has_problem: NO Docs-as-Code shall provide an automatic mechanism to determine the document reviewers. - The ``reviewer`` attribute shall include all reviewers who are not listed as - approvers. The determination is based on the last pull request (PR) that modified the - relevant file. + The reviewer shall be the approvers NOT listed in *CODEOWNERS* of the last pull + request of the file containing the document. 📋 Requirements @@ -277,7 +273,6 @@ This section provides an overview of current process requirements and their clar :tags: Requirements :implemented: YES :satisfies: PROCESS_gd_req__req__structure - :parent_has_problem: NO :parent_covered: YES: Together with tool_req__docs_linkage Docs-as-Code shall support the following requirement types: @@ -306,11 +301,11 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_req_attr_reqtype :tags: Requirements :implemented: PARTIAL - :parent_has_problem: YES: tool_req shall not have 'reqtype' as discussed. process not excluded! :satisfies: PROCESS_gd_req__req__attr_type - Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has - a ``reqtype`` attribute with one of the following values: + Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` + except process and tool requirements has a ``reqtype`` attribute with one of the + following values: * Functional * Interface @@ -323,7 +318,7 @@ This section provides an overview of current process requirements and their clar :tags: Requirements :implemented: NO :satisfies: PROCESS_gd_req__req__attr_req_cov - :parent_has_problem: YES: Not understandable what is required. + :status: invalid .. warning:: This requirement is not yet specified. The corresponding parent requirement is @@ -335,6 +330,7 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :parent_covered: YES :satisfies: PROCESS_gd_req__req__attr_test_covered + :status: invalid Docs-As-Code shall allow for every need of type :need:`tool_req__docs_req_types` to have a ``testcovered`` attribute, which must be one of: @@ -342,6 +338,10 @@ This section provides an overview of current process requirements and their clar * Yes * No + .. warning:: + This requirement is not yet specified. The corresponding parent requirement is + unclear and must be clarified before a precise tool requirement can be defined. + ------------------------- 🔗 Links ------------------------- @@ -352,24 +352,23 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :satisfies: PROCESS_gd_req__req__linkage, PROCESS_gd_req__req__traceability :parent_covered: YES - :parent_has_problem: YES: Mandatory for all needs? Especially some tool_reqs do not have a process requirement. Docs-as-Code shall enforce that linking between model elements via the ``satisfies`` - attribute follows defined rules. + attribute follows defined rules. Having at least one link is mandatory. Allowed source and target combinations are defined in the following table: .. table:: :widths: auto - ======================== =========================== - Requirement Type Allowed Link Target - ======================== =========================== - Feature Requirements Stakeholder Requirements - Component Requirements Feature Requirements - Process Requirements Workflows - Tooling Requirements Process Requirements - ======================== =========================== + ================================ =========================== + Source Type Allowed Link Target + ================================ =========================== + Feature Requirements Stakeholder Requirements + Component Requirements Feature Requirements + Process Requirements Workflows + Tooling Requirements Process Requirements + ================================ =========================== 🏛️ Architecture ################ @@ -387,30 +386,37 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__arch__build_blocks, PROCESS_gd_req__arch__build_blocks_corr :implemented: PARTIAL - :parent_has_problem: YES: Referenced in https://github.com/eclipse-score/process_description/issues/34 :parent_covered: NO :status: invalid - .. warning:: - **OPEN ISSUE** → Architecture types are not yet understood - See: https://github.com/eclipse-score/process_description/issues/34 - - The list below is tentative at best. - Docs-as-Code shall support the following architecture types: - * Feature Architecture Static View (feat_arch_static) - does this count as an architecture type, or is it a view? - * Feature Architecture Dynamic View (feat_arch_dyn) - the views below have view in their type name!! - * Logical Architecture Interfaces (logic_arc_int) - That's a single interface and not "interfaces"? Or is it a view? - * Logical Architecture Interface Operation (logic_arc_int_op) - * Module Architecture Static View (mod_view_static) - * Module Architecture Dynamic View (mod_view_dyn) + * Feature (Architecture Element) = Feature Architecture Static View (feat_arch_static) + * Feature Architecture Dynamic View (feat_arch_dyn) + * Feature: Logical Architecture Interface (incl Logical Interface View) (logic_arc_int) + * Feature: Logical Architecture Interface Operation (logic_arc_int_op) * Component Architecture Static View (comp_arc_sta) * Component Architecture Dynamic View (comp_arc_dyn) - * Component Architecture Interfaces (comp_arc_int) - * Component Architecture Interface Operation (comp_arc_int_op) - * Real interface?? (see gd_req__arch__build_blocks_corr) - * Feature Architecture Interface?? (see gd_req__arch__traceability) + * Component Architecture Interface = Real Interface (comp_arc_int) + * Component Architecture Interface Operation = Real Interface Operation (comp_arc_int_op) + + +.. tool_req::Module Views + :id: tool_req__docs_module_views + :tags: Architecture + :satisfies: + PROCESS_gd_req__arch__hierarchical_structure, + PROCESS_gd_req__arch__viewpoints, + PROCESS_gd_req__arch__build_blocks, + PROCESS_gd_req__arch__build_blocks_corr + :implemented: PARTIAL + :parent_covered: NO + :status: invalid + + Docs-as-Code shall support the following module view-types: + + * Module = Module Architecture Static View = Top Level SW component container (mod_view_static) + * Module Architecture Dynamic View = Top Level SW component container (mod_view_dyn) ------------------------ @@ -426,7 +432,6 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__arch__attr_fulfils, PROCESS_gd_req__arch__traceability, :parent_covered: YES - :parent_has_problem: YES: Attribute is not mentioned. Link direction not clear. Fig. 22 does not contain 'fulfils' Docs-as-Code shall enforce that linking via the ``fulfils`` attribute follows defined rules. @@ -450,7 +455,6 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :satisfies: PROCESS_gd_req__arch__linkage_requirement :parent_covered: YES - :parent_has_problem: NO Docs-as-Code shall enforce that architecture model elements of type :need:`tool_req__docs_arch_types` with ``safety != QM`` are linked to requirements of @@ -463,7 +467,6 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :satisfies: PROCESS_gd_req__arch__linkage_safety_trace :parent_covered: NO - :parent_has_problem: NO Docs-as-Code shall enforce that architecture model elements of type :need:`tool_req__docs_arch_types` with ``safety != QM`` can only be linked to other @@ -489,7 +492,6 @@ This section provides an overview of current process requirements and their clar :implemented: YES :satisfies: PROCESS_doc_concept__arch__process, PROCESS_gd_req__arch__viewpoints :parent_covered: YES - :parent_has_problem: NO Docs-as-Code shall enable the rendering of diagrams for the following architecture views: @@ -524,7 +526,6 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_dd_link_testcase :tags: Detailed Design & Code :implemented: NO - :parent_has_problem: YES: Test vs Testcase unclear. Direction unclear. Goal unclear. :satisfies: PROCESS_gd_req__req__attr_testlink Docs-as-Code shall allow requirements of type :need:`tool_req__docs_req_types` to @@ -551,7 +552,6 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_tvr_safety :tags: Tool Verification Reports :implemented: NO - :parent_has_problem: YES: Safety affected vs Safety relevance :parent_covered: YES :satisfies: PROCESS_gd_req__tool__attr_safety_affected @@ -566,7 +566,6 @@ This section provides an overview of current process requirements and their clar :tags: Tool Verification Reports :implemented: NO :parent_covered: YES - :parent_has_problem: YES: Safety affected vs Safety relevance :satisfies: PROCESS_gd_req__tool_attr_security_affected Docs-as-Code shall enforce that every Tool Verification Report includes a @@ -580,7 +579,6 @@ This section provides an overview of current process requirements and their clar :tags: Tool Verification Reports :implemented: NO :satisfies: PROCESS_gd_req__tool__attr_status - :parent_has_problem: NO :parent_covered: YES Docs-as-Code shall enforce that every Tool Verification Report includes a ``status`` @@ -630,8 +628,5 @@ This section provides an overview of current process requirements and their clar :safety: ASIL_B :security: NO -.. needextend:: c.this_doc() and type == 'tool_req' and "YES" in parent_has_problem - :status: invalid - .. needextend:: c.this_doc() and type == 'tool_req' and not status :status: valid diff --git a/src/incremental.py b/src/incremental.py index de5456dc..132b468e 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -53,6 +53,14 @@ def get_env(name: str) -> str: "--github_repo", help="GitHub repository to embed in the Sphinx build", ) + parser.add_argument( + "--port", + type=int, + help="Port to use for the live_preview ACTION. Default is 8000. " + "Use 0 for auto detection of a free port.", + default=8000, + ) + args = parser.parse_args() if args.debug: debugpy.listen(("0.0.0.0", args.debug_port)) @@ -87,7 +95,11 @@ def get_env(name: str) -> str: if action == "live_preview": sphinx_autobuild_main( # Note: bools need to be passed via '0' and '1' from the command line. - base_arguments + ["--define=disable_source_code_linker=1"] + base_arguments + + [ + "--define=disable_source_code_linker=1", + f"--port={args.port}", + ] ) else: sphinx_main(base_arguments) From 86b5a3739398218872c29fad291c934e607df3dd Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Tue, 1 Jul 2025 12:17:24 +0300 Subject: [PATCH 055/231] fix: esbonio support, registration of HeaderService (#120) --- src/extensions/score_header_service/header_service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/extensions/score_header_service/header_service.py b/src/extensions/score_header_service/header_service.py index 78d43440..8529cf5d 100644 --- a/src/extensions/score_header_service/header_service.py +++ b/src/extensions/score_header_service/header_service.py @@ -56,7 +56,8 @@ def register(app: Sphinx, env: BuildEnvironment, _: str | None) -> None: :param env: The Sphinx build environment. :param _: Additional argument not used. """ - app.add_config_value("header_service_use_github_data", True, "env") + if not hasattr(app.config, "header_service_use_github_data"): + app.add_config_value("header_service_use_github_data", True, "env") data = SphinxNeedsData(env) services = data.get_or_create_services() services.register("header-service", HeaderService) From de4b8210b3ad0140449e10e1b9679df5f9462508 Mon Sep 17 00:00:00 2001 From: Simon Duerr Date: Fri, 4 Jul 2025 10:01:49 +0200 Subject: [PATCH 056/231] Add support for injecting dependencies (#119) Users of docs-as-code can now provide additional dependencies to the docs, and ide_support macros. fixes: #105 --- docs.bzl | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs.bzl b/docs.bzl index 9e9f95bf..f4b85c4f 100644 --- a/docs.bzl +++ b/docs.bzl @@ -58,7 +58,7 @@ sphinx_requirements = all_requirements + [ "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", ] -def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = []): +def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = [], deps = []): """ Creates all targets related to documentation. By using this function, you'll get any and all updates for documentation targets in one place. @@ -84,7 +84,7 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ name = "sphinx_build" + suffix, visibility = ["//visibility:public"], data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:score_extension_files"] + external_needs_deps, - deps = sphinx_requirements, + deps = sphinx_requirements + deps, ) _incremental( incremental_name = "incremental" + suffix, @@ -94,6 +94,7 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ build_dir = build_dir_for_incremental, external_needs_deps = external_needs_deps, external_needs_def = external_needs_def, + extra_dependencies = deps, ) _docs( name = "docs" + suffix, @@ -113,7 +114,7 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ # Virtual python environment for working on the documentation (esbonio). # incl. python support when working on conf.py and sphinx extensions. # creates :ide_support target for virtualenv - _ide_support() + _ide_support(deps) # creates 'needs.json' build target @@ -160,11 +161,11 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s }, ) -def _ide_support(): +def _ide_support(extra_dependencies): score_virtualenv( name = "ide_support", venv_name = ".venv_docs", - reqs = sphinx_requirements, + reqs = sphinx_requirements + extra_dependencies, ) def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = list(), external_needs_def = list()): From ebebf9e83aabbed3f70d3231bbd48a94ac03a93e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 7 Jul 2025 12:13:44 +0200 Subject: [PATCH 057/231] Source code linker partial re-write (#125) * Re-engineered working of source code linker * Adapted tests to conform to new working of the extension * Changed parsing & writing of json * Small adaptation in README --------- Co-authored-by: Alexander Lanin --- docs.bzl | 11 +- docs/BUILD | 3 - docs/product/extensions/source_code_linker.md | 30 +- src/extensions/score_metamodel/__init__.py | 11 +- src/extensions/score_source_code_linker/BUILD | 20 +- .../score_source_code_linker/__init__.py | 253 ++++-- .../collect_source_files.bzl | 143 ---- .../generate_source_code_links_json.py | 177 ++++ .../score_source_code_linker/needlinks.py | 91 ++ .../parse_source_files.py | 196 ----- .../tests/scl_golden_file.json | 30 + .../tests/test_requirement_links.py | 787 +++++++++++++++--- .../tests/test_source_link.py | 319 +++++-- src/incremental.py | 30 +- 14 files changed, 1439 insertions(+), 662 deletions(-) delete mode 100755 src/extensions/score_source_code_linker/collect_source_files.bzl create mode 100644 src/extensions/score_source_code_linker/generate_source_code_links_json.py create mode 100644 src/extensions/score_source_code_linker/needlinks.py delete mode 100755 src/extensions/score_source_code_linker/parse_source_files.py create mode 100644 src/extensions/score_source_code_linker/tests/scl_golden_file.json diff --git a/docs.bzl b/docs.bzl index f4b85c4f..204d9984 100644 --- a/docs.bzl +++ b/docs.bzl @@ -44,7 +44,6 @@ load("@rules_pkg//pkg:mappings.bzl", "pkg_files") load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") -load("@score_docs_as_code//src/extensions/score_source_code_linker:collect_source_files.bzl", "parse_source_files_for_needs_links") load("@score_python_basics//:defs.bzl", "score_virtualenv") sphinx_requirements = all_requirements + [ @@ -66,13 +65,6 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ * only callable from 'docs/BUILD' """ - # Parse source files for needs links - # This needs to be created to generate a target, otherwise it won't execute as dependency for other macros - parse_source_files_for_needs_links( - name = "score_source_code_parser", - srcs_and_deps = source_files_to_scan_for_needs_links if source_files_to_scan_for_needs_links else [], - ) - # We are iterating over all provided 'targets' in order to allow for automatic generation of them without # needing to modify the underlying 'docs.bzl' file. for target in docs_targets: @@ -137,7 +129,7 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s srcs = ["@score_docs_as_code//src:incremental.py"], deps = dependencies, # TODO: Figure out if we need all dependencies as data here or not. - data = [":score_source_code_parser", "@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies + external_needs_deps, + data = ["@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies + external_needs_deps, env = { "SOURCE_DIRECTORY": source_dir, "CONF_DIRECTORY": conf_dir, @@ -205,7 +197,6 @@ def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = lis "manual", ], tools = [ - ":score_source_code_parser", "@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets", ] + external_needs_deps, diff --git a/docs/BUILD b/docs/BUILD index 2ac04b22..3d553f31 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -48,7 +48,4 @@ docs( }, ], source_dir = "docs", - source_files_to_scan_for_needs_links = [ - "//src:score_extension_files", - ], ) diff --git a/docs/product/extensions/source_code_linker.md b/docs/product/extensions/source_code_linker.md index 5abddd0e..54321f75 100644 --- a/docs/product/extensions/source_code_linker.md +++ b/docs/product/extensions/source_code_linker.md @@ -16,7 +16,7 @@ The extension uses two main components to integrate with Bazel: - Handles dependency tracking for incremental builds 2. `parse_source_files.py` - - Scans input files for template tags (e.g., `# req-traceability:`) + - Scans input files for template tags (e.g., "# req-traceability:") - Retrieves git information (hash, file location) - Generates mapping file with requirement IDs and links @@ -37,16 +37,26 @@ The extension uses two main components to integrate with Bazel: - Gets current git hash for each file - Constructs GitHub URLs with format: `{base_url}/{repo}/blob/{hash}/{file}#L{line_nr}` - **Note:** The base_url is defined in `parse_source_files.py`. Currently set to: `https://github.com/eclipse-score/score/blob/` + **Note:** The base_url is defined in `parse_source_files.py`. Currently set to: `https://github.com/eclipse-score/score/blob/` Produces JSON mapping file: ```json -{ - "REQ_ID": [ - "github_link1", - "github_link2" // If multiple code-links exist - ] -} +[ + { + "file": "src/implementation1.py", + "line": 3, + "tag":"# req-Id:", + "need": "TREQ_ID_1", + "full_line": "# req-Id: TREQ_ID_1" + }, + { + "file": "src/implementation2.py", + "line": 3, + "tag":"# req-Id:", + "need": "TREQ_ID_1", + "full_line": "# req-Id: TREQ_ID_1" + }, +] ```
@@ -86,7 +96,7 @@ The extension hooks into Sphinx's build process. It attaches to the `env-updated ### Adding Places to Search -You can easily add files to be searched by adding targets / files to the deps inside the +You can easily add files to be searched by adding targets / files to the deps inside the `collect_source_files_for_score_source_code_linker` in `docs/BUILD`. See here: @@ -126,7 +136,7 @@ WARNING: Could not find TREQ_ID_200 in the needs id's. Found in file(s):['_tooli ### Quickly Finding Source Links -The easiest and quickest way to find source_code_link options is to just search for the option `source_code_link`. It should give you all rst files +The easiest and quickest way to find source_code_link options is to just search for the option `source_code_link`. It should give you all rst files where the option is not empty. ### Executing Tests diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 26c53823..a19ce1f9 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -11,18 +11,17 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import importlib -import pkgutil import json import os - +import pkgutil from collections.abc import Callable from pathlib import Path - from ruamel.yaml import YAML from sphinx.application import Sphinx from sphinx_needs import logging -from sphinx_needs.data import NeedsInfoType, SphinxNeedsData, NeedsView +from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData + from .log import CheckLogger logger = logging.get_logger(__name__) @@ -81,7 +80,9 @@ def _run_checks(app: Sphinx, exception: Exception | None) -> None: logger.debug(f"Running checks for {len(needs_all_needs)} needs") - prefix = str(Path(app.srcdir).relative_to(Path.cwd())) + ws_root = os.environ.get("BUILD_WORKSPACE_DIRECTORY", None) + cwd_or_ws_root = Path(ws_root) if ws_root else Path.cwd() + prefix = str(Path(app.srcdir).relative_to(cwd_or_ws_root)) log = CheckLogger(logger, prefix) diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 597ed795..48113e1c 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -29,16 +29,18 @@ py_library( score_py_pytest( name = "score_source_code_linker_test", size = "small", - srcs = glob(["tests/**/*.py"]), + srcs = glob([ + "tests/**/*.py", + "test/**/*.json", + ]), + args = [ + "-s", + "-vv", + ], + data = glob(["**/*.json"]), + imports = ["."], deps = [ ":score_source_code_linker", + "@score_docs_as_code//src/extensions/score_metamodel", ] + all_requirements, ) - -# Needed to make the file parser executeable and findable for the source_code_linker aspect -py_binary( - name = "parsed_source_files_for_source_code_linker", - srcs = ["parse_source_files.py"], - main = "parse_source_files.py", - visibility = ["//visibility:public"], -) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 53ed8f75..aebac616 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -10,50 +10,102 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -import json + +"""In this file the actual sphinx extension is defined. It will read pre-generated +source code links from a JSON file and add them to the needs. +""" + +import subprocess +import os +from pprint import pprint +from collections import defaultdict from copy import deepcopy from pathlib import Path +from typing import cast from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment -from sphinx_needs.data import NeedsMutable, SphinxNeedsData, NeedsInfoType +from sphinx.config import Config +from sphinx_needs.data import NeedsInfoType, NeedsMutable, SphinxNeedsData from sphinx_needs.logging import get_logger -from src.extensions.score_source_code_linker.parse_source_files import ( - get_github_base_url, +from src.extensions.score_source_code_linker.generate_source_code_links_json import ( + find_git_root, + find_ws_root, + generate_source_code_links_json, +) +from src.extensions.score_source_code_linker.needlinks import ( + NeedLink, + load_source_code_links_json, + DefaultNeedLink, ) LOGGER = get_logger(__name__) LOGGER.setLevel("DEBUG") -def setup(app: Sphinx) -> dict[str, str | bool]: +def get_cache_filename(build_dir: Path) -> Path: + """ + Returns the path to the cache file for the source code linker. + This is used to store the generated source code links. + """ + return build_dir / "score_source_code_linker_cache.json" + + +def setup_once(app: Sphinx, config: Config): + # might be the only way to solve this? + if "skip_rescanning_via_source_code_linker" in app.config: + return + print(f"DEBUG: Workspace root is {find_ws_root()}") + print(f"DEBUG: Current working directory is {Path('.')} = {Path('.').resolve()}") + print(f"DEBUG: Git root is {find_git_root()}") + + # Run only for local files! + # ws_root is not set when running on external repositories (dependencies). + ws_root = find_ws_root() + if not ws_root: + return + + # When BUILD_WORKSPACE_DIRECTORY is set, we are inside a git repository. + assert find_git_root(ws_root) + # Extension: score_source_code_linker - # TODO: can we detect live_preview & esbonio here? Until then we have a flag: - app.add_config_value("disable_source_code_linker", False, rebuild="env", types=bool) app.add_config_value( - "score_source_code_linker_file_overwrite", "", rebuild="env", types=str + "skip_rescanning_via_source_code_linker", + False, + rebuild="env", + types=bool, + description="Skip rescanning source code files via the source code linker.", ) # Define need_string_links here to not have it in conf.py app.config.needs_string_links = { "source_code_linker": { - "regex": r"(?P[^,]+)", - "link_url": "{{value}}", - "link_name": "Source Code Link", + "regex": r"(?P.+)<>(?P.+)", + "link_url": "{{url}}", + "link_name": "{{name}}", "options": ["source_code_link"], }, } - if app.config.disable_source_code_linker: - LOGGER.info( - "INFO: Disabled source code linker. Not loading extension.", - type="score_source_code_linker", - ) - else: + + cache_json = get_cache_filename(Path(app.outdir)) + + if not cache_json.exists() or not app.config.skip_rescanning_via_source_code_linker: LOGGER.debug( - "INFO: Loading source code linker...", type="score_source_code_linker" + "INFO: Generating source code links JSON file.", + type="score_source_code_linker", ) - app.connect("env-updated", add_source_link) + + generate_source_code_links_json(ws_root, cache_json) + + app.connect("env-updated", inject_links_into_needs) + + +def setup(app: Sphinx) -> dict[str, str | bool]: + # Esbonio will execute setup() on every iteration. + # setup_once will only be called once. + app.connect("config-inited", setup_once) + return { "version": "0.1", "parallel_read_safe": True, @@ -80,8 +132,100 @@ def find_need( return None +def group_by_need(source_code_links: list[NeedLink]) -> dict[str, list[NeedLink]]: + """ + Groups the given need links by their need ID. + """ + source_code_links_by_need: dict[str, list[NeedLink]] = defaultdict(list) + for needlink in source_code_links: + source_code_links_by_need[needlink.need].append(needlink) + return source_code_links_by_need + + +def parse_git_output(str_line: str) -> str: + if len(str_line.split()) < 2: + LOGGER.warning( + f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. Expected example: 'origin git@github.com:user/repo.git'" + ) + return "" + url = str_line.split()[1] # Get the URL part + # Handle SSH format (git@github.com:user/repo.git) + if url.startswith("git@"): + path = url.split(":")[1] + else: + path = "/".join(url.split("/")[3:]) # Get part after github.com/ + return path.replace(".git", "") + + +def get_github_repo_info(git_root_cwd: Path) -> str: + process = subprocess.run( + ["git", "remote", "-v"], capture_output=True, text=True, cwd=git_root_cwd + ) + repo = "" + for line in process.stdout.split("\n"): + if "origin" in line and "(fetch)" in line: + repo = parse_git_output(line) + break + else: + # If we do not find 'origin' we just take the first line + LOGGER.info( + "Did not find origin remote name. Will now take first result from: 'git remote -v'" + ) + repo = parse_git_output(process.stdout.split("\n")[0]) + assert repo != "", ( + "Remote repository is not defined. Make sure you have a remote set. Check this via 'git remote -v'" + ) + return repo + + +def get_git_root(git_root: Path = Path()) -> Path: + # This is kinda ugly, doing this to reduce type errors. + # There might be a nicer way to do this + if git_root == Path(): + passed_git_root = find_git_root() + if passed_git_root is None: + return Path() + else: + passed_git_root = git_root + return passed_git_root + + +def get_github_base_url(git_root: Path = Path()) -> str: + passed_git_root = get_git_root(git_root) + repo_info = get_github_repo_info(passed_git_root) + return f"https://github.com/{repo_info}" + + +def get_github_link( + git_root: Path = Path(), needlink: NeedLink = DefaultNeedLink() +) -> str: + passed_git_root = get_git_root(git_root) + base_url = get_github_base_url( + passed_git_root + ) # Pass git_root to avoid double lookup + current_hash = get_current_git_hash(passed_git_root) + return f"{base_url}/blob/{current_hash}/{needlink.file}#L{needlink.line}" + + +def get_current_git_hash(ws_root: Path) -> str: + try: + result = subprocess.run( + ["git", "log", "-n", "1", "--pretty=format:%H"], + cwd=ws_root, + capture_output=True, + check=True, + ) + decoded_result = result.stdout.strip().decode() + + assert all(c in "0123456789abcdef" for c in decoded_result) + return decoded_result + except Exception as e: + LOGGER.warning(f"Unexpected error: {ws_root}", exc_info=e) + raise + + # re-qid: gd_req__req__attr_impl -def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: +def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: """ 'Main' function that facilitates the running of all other functions in correct order. @@ -91,54 +235,55 @@ def add_source_link(app: Sphinx, env: BuildEnvironment) -> None: env: Buildenvironment, this is filled automatically app: Sphinx app application, this is filled automatically """ + print("inject_links_into_needs!!!!") + + ws_root = find_ws_root() + assert ws_root Needs_Data = SphinxNeedsData(env) needs = Needs_Data.get_needs_mutable() - needs_copy = deepcopy(needs) - p5 = Path(__file__).parents[5] + needs_copy = deepcopy( + needs + ) # TODO: why do we create a copy? Can we also needs_copy = needs[:]? copy(needs)? - if str(p5).endswith("src"): - LOGGER.debug("DEBUG: WE ARE IN THE IF") - path = str(p5.parent / Path(app.confdir).name / "score_source_code_parser.json") - else: - LOGGER.debug("DEBUG: WE ARE IN THE ELSE") - path = str(p5 / "score_source_code_parser.json") + for id, need in needs.items(): + if need["source_code_link"]: + print( + f"?? Need {need['id']} already has source_code_link: {need['source_code_link']}" + ) + + source_code_links = load_source_code_links_json(get_cache_filename(app.outdir)) - if app.config.score_source_code_linker_file_overwrite: - path = app.config.score_source_code_linker_file_overwrite + # group source_code_links by need + # groupby requires the input to be sorted by the key + + source_code_links_by_need = group_by_need(source_code_links) # For some reason the prefix 'sphinx_needs internally' is CAPSLOCKED. # So we have to make sure we uppercase the prefixes prefixes = [x["id_prefix"].upper() for x in app.config.needs_external_needs] - github_base_url = get_github_base_url() + "/blob/" - try: - with open(path) as f: - gh_json = json.load(f) - for id, link in gh_json.items(): - id = id.strip() - need = find_need(needs_copy, id, prefixes) - if need is None: - # NOTE: manipulating link to remove git-hash, - # making the output file location more readable - files = [x.replace(github_base_url, "").split("/", 1)[-1] for x in link] + for need_id, needlinks in source_code_links_by_need.items(): + need = find_need(needs_copy, need_id, prefixes) + if need is None: + # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 + for n in needlinks: LOGGER.warning( - f"Could not find {id} in the needs id's. " - + f"Found in file(s): {files}", + f"{n.file}:{n.line}: Could not find {need_id} in documentation", type="score_source_code_linker", ) - continue + else: + need_as_dict = cast(dict[str, object], need) + + need_as_dict["source_code_link"] = ", ".join( + f"{get_github_link(ws_root, n)}<>{n.file}:{n.line}" for n in needlinks + ) # NOTE: Removing & adding the need is important to make sure # the needs gets 're-evaluated'. Needs_Data.remove_need(need["id"]) - need["source_code_link"] = ",".join(link) Needs_Data.add_need(need) - except Exception as e: - LOGGER.warning( - f"An unexpected error occurred while adding source_code_links to needs." - + f"Error: {e}", - type="score_source_code_linker", - ) - LOGGER.warning( - f"Reading file: {path} right now", type="score_source_code_linker" - ) + + # source_code_link of affected needs was overwritten. Make sure it's empty in all others! + for need in needs.values(): + if need["id"] not in source_code_links_by_need: + need["source_code_link"] = "" diff --git a/src/extensions/score_source_code_linker/collect_source_files.bzl b/src/extensions/score_source_code_linker/collect_source_files.bzl deleted file mode 100755 index 5dff0503..00000000 --- a/src/extensions/score_source_code_linker/collect_source_files.bzl +++ /dev/null @@ -1,143 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -""" -Bazel rules and aspects for linking source code to documentation. - -This module provides: -- `SourceCodeLinksInfo`: A provider encapsulating parsed source code links. -- `parse_source_files_for_needs_links`: A function to set up the parsing rule. -""" - -# ----------------------------------------------------------------------------- -# Aspect to Collect Source Files (Internal) -# ----------------------------------------------------------------------------- - -_CollectedFilesInfo = provider( - doc = "Internal provider for collecting all source files.", - fields = { - "files": "depset of source files", - }, -) - -def _extract_files_from_attr(ctx, attr_name): - """Extracts source files from a given attribute if it exists.""" - return [ - f - for src in getattr(ctx.rule.attr, attr_name, []) - for f in src.files.to_list() - if not f.path.startswith("external") - ] - -def _extract_source_files(ctx): - # type: (ctx) -> list[File] - """Extracts source files from the context's attributes.""" - srcs = _extract_files_from_attr(ctx, "srcs") - hdrs = _extract_files_from_attr(ctx, "hdrs") - - return srcs + hdrs - -def _get_transitive_deps(attr, attr_name): - # type: (struct) -> list[Depset] - """Extracts previously collected transitive dependencies.""" - return [ - dep[_CollectedFilesInfo].files - for dep in getattr(attr, attr_name, []) - if _CollectedFilesInfo in dep - ] - -def _collect_source_files_aspect_impl(_target, ctx): - """Aspect implementation to collect source files from rules and dependencies.""" - - return [ - _CollectedFilesInfo( - files = depset( - _extract_source_files(ctx), - # Follow deps to collect source files from dependencies. - transitive = _get_transitive_deps(ctx.rule.attr, "deps"), - ), - ), - ] - -_collect_source_files_aspect = aspect( - implementation = _collect_source_files_aspect_impl, - # Follow deps to collect source files from dependencies. - attr_aspects = ["deps"], - doc = "Aspect that collects source files from a rule and its dependencies. (Internal)", -) - -# ----------------------------------------------------------------------------- -# Rule to Collect and Parse Source Files -# ----------------------------------------------------------------------------- - -SourceCodeLinksInfo = provider( - doc = "Provider containing a JSON file with source code links.", - fields = { - "file": "Path to JSON file containing source code links.", - }, -) - -def _collect_and_parse_source_files_impl(ctx): - """Implementation of a rule that collects and parses source files.""" - sources_file = ctx.actions.declare_file("%s_sources.txt" % ctx.label.name) - - all_files = depset( - # Collect source files from the current rule. - # The rule has an "srcs" attribute. - transitive = _get_transitive_deps(ctx.attr, "srcs_and_deps"), - ).to_list() - - ctx.actions.write(sources_file, "\n".join([f.path for f in all_files])) - parsed_sources_json_file = ctx.actions.declare_file("%s.json" % ctx.label.name) - - args = ctx.actions.args() - args.add(sources_file) - args.add("--output", parsed_sources_json_file) - - ctx.actions.run( - arguments = [args], - executable = ctx.executable._source_files_parser, - inputs = [sources_file] + all_files, - outputs = [parsed_sources_json_file], - ) - - return [ - DefaultInfo( - files = depset([parsed_sources_json_file]), - runfiles = ctx.runfiles([parsed_sources_json_file]), - ), - SourceCodeLinksInfo( - file = parsed_sources_json_file, - ), - ] - -parse_source_files_for_needs_links = rule( - implementation = _collect_and_parse_source_files_impl, - attrs = { - "srcs_and_deps": attr.label_list( - aspects = [_collect_source_files_aspect], - allow_files = True, - doc = "Dependencies and files to scan for links to documentation elements.", - ), - "_source_files_parser": attr.label( - # TODO: rename to source_files_parser in next PR - default = Label(":parsed_source_files_for_source_code_linker"), - executable = True, - cfg = "exec", - ), - }, - provides = [ - DefaultInfo, - SourceCodeLinksInfo, - ], - doc = "Rule that collects and parses source files for linking documentation. (Internal)", -) diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py new file mode 100644 index 00000000..4444d408 --- /dev/null +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -0,0 +1,177 @@ +# ******************************************************************************* +# Copyright (c) 2024 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +""" +This file is used by incremental.py to generate a JSON file with all source code links +for the needs. It's split this way, so that the live_preview action does not need to +parse everything on every run. +""" + +import os +import sys +from pathlib import Path +from pprint import pprint + +from src.extensions.score_source_code_linker.needlinks import ( + NeedLink, + store_source_code_links_json, +) + + +def find_ws_root() -> Path | None: + """Find the current MODULE.bazel file""" + ws_dir = os.environ.get("BUILD_WORKSPACE_DIRECTORY", None) + return Path(ws_dir) if ws_dir else None + + +def find_git_root(start_path: str | Path = "") -> Path | None: + """Find the git root directory starting from the given path or __file__.""" + if start_path == "": + start_path = __file__ + + git_root = Path(start_path).resolve() + while not (git_root / ".git").exists(): + git_root = git_root.parent + if git_root == Path("/"): + return None + return git_root + + +TAGS = [ + "# " + "req-traceability:", + "# " + "req-Id:", +] + + +def _extract_references_from_line(line: str): + """Extract requirement IDs from a line containing a tag.""" + + for tag in TAGS: + tag_index = line.find(tag) + if tag_index >= 0: + line_after_tag = line[tag_index + len(tag) :].strip() + # Split by comma or space to get multiple requirements + for req in line_after_tag.replace(",", " ").split(): + yield tag, req.strip() + + +def _extract_references_from_file(root: Path, file_path: Path) -> list[NeedLink]: + """Scan a single file for template strings and return findings.""" + assert root.is_absolute(), "Root path must be absolute" + assert not file_path.is_absolute(), "File path must be relative to the root" + # assert file_path.is_relative_to(root), f"File path ({file_path}) must be relative to the root ({root})" + assert (root / file_path).exists(), ( + f"File {file_path} does not exist in root {root}." + ) + + findings: list[NeedLink] = [] + + try: + with open(root / file_path, encoding="utf-8", errors="ignore") as f: + for line_num, line in enumerate(f, 1): + for tag, req in _extract_references_from_line(line): + findings.append( + NeedLink( + file=file_path, + line=line_num, + tag=tag, + need=req, + full_line=line.strip(), + ) + ) + except (UnicodeDecodeError, PermissionError, OSError): + # Skip files that can't be read as text + pass + + return findings + + +def iterate_files_recursively(search_path: Path): + def _should_skip_file(file_path: Path) -> bool: + """Check if a file should be skipped during scanning.""" + # TODO: consider using .gitignore + return ( + file_path.is_dir() + or file_path.name.startswith((".", "_")) + or file_path.suffix in [".pyc", ".so", ".exe", ".bin"] + ) + + for root, dirs, files in os.walk(search_path): + root_path = Path(root) + + # Skip directories that start with '.' or '_' by modifying dirs in-place + # This prevents os.walk from descending into these directories + dirs[:] = [d for d in dirs if not d.startswith((".", "_", "bazel-"))] + + for file in files: + f = root_path / file + if not _should_skip_file(f): + yield f.relative_to(search_path) + + +def find_all_need_references(search_path: Path) -> list[NeedLink]: + """ + Find all need references in all files in git root. + Search for any appearance of TAGS and collect line numbers and referenced + requirements. + + Returns: + list[FileFindings]: List of FileFindings objects containing all findings + for each file that contains template strings. + """ + start_time = os.times().elapsed + + all_need_references: list[NeedLink] = [] + + # Use os.walk to have better control over directory traversal + for file in iterate_files_recursively(search_path): + references = _extract_references_from_file(search_path, file) + all_need_references.extend(references) + + elapsed_time = os.times().elapsed - start_time + print( + f"DEBUG: Found {len(all_need_references)} need references " + f"in {elapsed_time:.2f} seconds" + ) + + return all_need_references + + +def generate_source_code_links_json(search_path: Path, file: Path): + """ + Generate a JSON file with all source code links for the needs. + This is used to link the needs to the source code in the documentation. + """ + needlinks = find_all_need_references(search_path) + store_source_code_links_json(file, needlinks) + + +# incremental_latest: +# DEBUG: Workspace root is /home/lla2hi/score/docs-as-code +# DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/execroot/_main/bazel-out/k8-fastbuild/bin/docs/incremental_latest.runfiles/_main +# DEBUG: Git root is /home/lla2hi/score/docs-as-code + +# incremental_release: (-> bazel build sandbox of process repository) +# DEBUG: Workspace root is None +# DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/sandbox/linux-sandbox/25/execroot/_main (-> process repo!!) +# rst files are in .../bazel-out/k8-fastbuild/bin/external/score_process~/process/_docs_needs_latest/score_process~/* +# DEBUG: Git root is /home/lla2hi/score/docs-as-code + +# docs_latest: +# DEBUG: Workspace root is None +# DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/sandbox/linux-sandbox/26/execroot/_main +# DEBUG: Git root is /home/lla2hi/score/docs-as-code + +# TODO docu: +# docs:docs has no source code links +# external repositories have no source code links (to their code) diff --git a/src/extensions/score_source_code_linker/needlinks.py b/src/extensions/score_source_code_linker/needlinks.py new file mode 100644 index 00000000..dbb52b38 --- /dev/null +++ b/src/extensions/score_source_code_linker/needlinks.py @@ -0,0 +1,91 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import json +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import Any + + +@dataclass(frozen=True) +class NeedLink: + """Represents a single template string finding in a file.""" + + file: Path + line: int + tag: str + need: str + full_line: str + + +def DefaultNeedLink() -> NeedLink: + """ + Return a default NeedLinks to be used as 'default args' or so + Like this better than adding defaults to the dataclass, as it is deliberate + """ + return NeedLink( + file=Path("."), + line=0, + tag="", + need="", + full_line="", + ) + + +class NeedLinkEncoder(json.JSONEncoder): + def default(self, o: object): + if isinstance(o, NeedLink): + return asdict(o) + if isinstance(o, Path): + return str(o) + return super().default(o) + + +def needlink_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: + if {"file", "line", "tag", "need", "full_line"} <= d.keys(): + return NeedLink( + file=Path(d["file"]), + line=d["line"], + tag=d["tag"], + need=d["need"], + full_line=d["full_line"], + ) + else: + # It's something else, pass it on to other decoders + return d + + +def store_source_code_links_json(file: Path, needlist: list[NeedLink]): + # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + file.parent.mkdir(exist_ok=True) + with open(file, "w") as f: + json.dump( + needlist, + f, + cls=NeedLinkEncoder, # use your custom encoder + indent=2, + ensure_ascii=False, + ) + + +def load_source_code_links_json(file: Path) -> list[NeedLink]: + links: list[NeedLink] = json.loads( + file.read_text(encoding="utf-8"), + object_hook=needlink_decoder, + ) + assert isinstance(links, list), ( + "The source code links should be a list of NeedLink objects." + ) + assert all(isinstance(link, NeedLink) for link in links), ( + "All items in source_code_links should be NeedLink objects." + ) + return links diff --git a/src/extensions/score_source_code_linker/parse_source_files.py b/src/extensions/score_source_code_linker/parse_source_files.py deleted file mode 100755 index ba1d90fd..00000000 --- a/src/extensions/score_source_code_linker/parse_source_files.py +++ /dev/null @@ -1,196 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -import argparse -import collections -import json -import logging -import os -import sys -import subprocess - - -# Importing from collections.abc as typing.Callable is deprecated since Python 3.9 -from collections.abc import Callable -from pathlib import Path - -logger = logging.getLogger(__name__) - -TAGS = [ - "# req-traceability:", - "# req-Id:", -] - - -def get_github_base_url() -> str: - git_root = find_git_root() - repo = get_github_repo_info(git_root) - return f"https://github.com/{repo}" - - -def parse_git_output(str_line: str) -> str: - if len(str_line.split()) < 2: - logger.warning( - f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. Expected example: 'origin git@github.com:user/repo.git'" - ) - return "" - url = str_line.split()[1] # Get the URL part - # Handle SSH format (git@github.com:user/repo.git) - if url.startswith("git@"): - path = url.split(":")[1] - else: - path = "/".join(url.split("/")[3:]) # Get part after github.com/ - return path.replace(".git", "") - - -def get_github_repo_info(git_root_cwd: Path) -> str: - process = subprocess.run( - ["git", "remote", "-v"], capture_output=True, text=True, cwd=git_root_cwd - ) - repo = "" - for line in process.stdout.split("\n"): - if "origin" in line and "(fetch)" in line: - repo = parse_git_output(line) - break - else: - # If we do not find 'origin' we just take the first line - logger.info( - "Did not find origin remote name. Will now take first result from: 'git remote -v'" - ) - repo = parse_git_output(process.stdout.split("\n")[0]) - assert repo != "", ( - "Remote repository is not defined. Make sure you have a remote set. Check this via 'git remote -v'" - ) - return repo - - -def find_git_root(): - """ - This is copied from 'find_runfiles' as the import does not work for some reason. - This should be fixed. - """ - git_root = Path(__file__).resolve() - while not (git_root / ".git").exists(): - git_root = git_root.parent - if git_root == Path("/"): - sys.exit( - "Could not find git root. Please run this script from the " - "root of the repository." - ) - return git_root - - -def get_git_hash(file_path: str) -> str: - """ - Grabs the latest git hash found for particular file - - Args: - file_path (str): Filepath of for which the githash should be retrieved. - - Returns: - (str): Full 40char length githash of the latest commit this file was changed. - - Example: - 3b3397ebc2777f47b1ae5258afc4d738095adb83 - """ - abs_path = None - try: - abs_path = Path(file_path).resolve() - if not os.path.isfile(abs_path): - logger.warning(f"File not found: {abs_path}") - return "file_not_found" - result = subprocess.run( - ["git", "log", "-n", "1", "--pretty=format:%H", "--", abs_path], - cwd=Path(abs_path).parent, - capture_output=True, - ) - decoded_result = result.stdout.strip().decode() - - # sanity check - assert all(c in "0123456789abcdef" for c in decoded_result) - return decoded_result - except Exception as e: - logger.warning(f"Unexpected error: {abs_path}: {e}") - return "error" - - -def extract_requirements( - source_file: str, - github_base_url: str, - git_hash_func: Callable[[str], str] | None = get_git_hash, -) -> dict[str, list[str]]: - """ - This extracts the file-path, lineNr as well as the git hash of the file - where a tag was found. - - Args: - source_file (str): path to source file that should be parsed. - git_hash_func (Optional[callable]): Optional parameter - only supplied during testing. - If left empty func 'get_git_hash' is used. - - Returns: - # TODO: change these links - Returns dictionary per file like this: - { - "TOOL_REQ__toolchain_sphinx_needs_build__requirement_linkage_types": [ - https://github.com/eclipse-score/score/blob/3b3397ebc2777f47b1ae5258afc4d738095adb83/_tooling/extensions/score_metamodel/utils.py, - ... # further found places of the same ID if there are any - ] - "TOOL_REQ__toolchain_sphinx_needs_build__...": [ - https://github.com/eclipse-score/score/blob/3b3397ebc2777f47b1ae5258afc4d738095adb83/_tooling/extensions/score_metamodel/checks/id.py, - ... # places where this ID as found - ] - } - """ - # force None to get_git_hash - if git_hash_func is None: - git_hash_func = get_git_hash - - requirement_mapping: dict[str, list[str]] = collections.defaultdict(list) - with open(source_file) as f: - for line_number, line in enumerate(f): - line_number = line_number + 1 - line = line.strip() - if any(x in line for x in TAGS): - hash = git_hash_func(source_file) - cleaned_line = ( - line.replace("'", "").replace('"', "").replace(",", "").strip() - ) - check_tag = cleaned_line.split(":")[1].strip() - if check_tag: - req_id = cleaned_line.split(":")[-1].strip() - link = f"{github_base_url}/blob/{hash}/{source_file}#L{line_number}" - requirement_mapping[req_id].append(link) - return requirement_mapping - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-o", "--output") - parser.add_argument("inputs", nargs="*") - - args, _ = parser.parse_known_args() - - logger.info(f"Parsing source files: {args.inputs}") - - # Finding the GH URL - gh_base_url = get_github_base_url() - requirement_mappings: dict[str, list[str]] = collections.defaultdict(list) - for input in args.inputs: - with open(input) as f: - for source_file in f: - rm = extract_requirements(source_file.strip(), gh_base_url) - for k, v in rm.items(): - requirement_mappings[k].extend(v) - with open(args.output, "w") as f: - f.write(json.dumps(requirement_mappings, indent=2)) diff --git a/src/extensions/score_source_code_linker/tests/scl_golden_file.json b/src/extensions/score_source_code_linker/tests/scl_golden_file.json new file mode 100644 index 00000000..e5584a12 --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/scl_golden_file.json @@ -0,0 +1,30 @@ +[ + { + "file": "src/implementation1.py", + "line": 3, + "tag":"#-----req-Id:", + "need": "TREQ_ID_1", + "full_line": "#-----req-Id: TREQ_ID_1" + }, + { + "file": "src/implementation2.py", + "line": 3, + "tag":"#-----req-Id:", + "need": "TREQ_ID_1", + "full_line": "#-----req-Id: TREQ_ID_1" + }, + { + "file": "src/implementation1.py", + "line": 9, + "tag":"#-----req-Id:", + "need": "TREQ_ID_2", + "full_line":"#-----req-Id: TREQ_ID_2" + }, + { + "file": "src/bad_implementation.py", + "line":2, + "tag":"#-----req-Id:", + "need": "TREQ_ID_200", + "full_line":"#-----req-Id: TREQ_ID_200" + } +] diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index fe502510..1d6dd301 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -10,179 +10,696 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from collections import defaultdict -from collections.abc import Callable -from gettext import find +import json +import os +import subprocess +import tempfile from pathlib import Path +from sphinx_needs.data import NeedsMutable +from src.extensions.score_metamodel.tests import need as test_need +from dataclasses import asdict +from typing import Any + import pytest -import logging -from pytest import TempPathFactory -from src.extensions.score_source_code_linker.parse_source_files import ( + +# Import the module under test +# Note: You'll need to adjust these imports based on your actual module structure +from src.extensions.score_source_code_linker import ( + find_need, + get_cache_filename, + get_current_git_hash, get_github_base_url, - find_git_root, + get_github_link, get_github_repo_info, - extract_requirements, - get_git_hash, + group_by_need, parse_git_output, - logger as scl_logger, +) +from src.extensions.score_source_code_linker.needlinks import ( + NeedLink, + store_source_code_links_json, + load_source_code_links_json, ) +""" +# ────────────────ATTENTION─────────────── -@pytest.fixture(scope="session") -def create_tmp_files(tmp_path_factory: TempPathFactory) -> Path: - root_dir: Path = tmp_path_factory.mktemp("test_root") - test_file_contents = """ +# ╭──────────────────────────────────────╮ +# │ !!!!! │ +# │ BOILERPLATE TEST MADE VIA │ +# │ GENERATION. NOT YET FULLY LOOKED │ +# │ THROUGH │ +# │ !!!! │ +# ╰──────────────────────────────────────╯ +""" -def implementation_1(): - pass -# req-Id: TEST_REQ__LINKED_ID -def implementation_tagged(): - pass +def encode_comment(s: str) -> str: + return s.replace(" ", "-----", 1) -# req-traceability: TEST_REQ__LINKED_TRACE -def implementation_tagged_2(): - pass -""" - with open(root_dir / "testfile.txt", "w") as f: - f.write(test_file_contents) - test_file_contents2 = """ +def decode_comment(s: str) -> str: + return s.replace("-----", " ", 1) -# req-Id: TEST_REQ__LINKED_DIFFERENT_FILE -def implementation_separate(): - pass -""" - with open(root_dir / "testfile2.txt", "w") as f: - f.write(test_file_contents2) - test_file_contents3 = """ -def implementation_14(): - pass +class NeedLinkTestEncoder(json.JSONEncoder): + def default(self, o: object): + if isinstance(o, NeedLink): + d = asdict(o) + d["tag"] = encode_comment(d.get("tag", "")) + d["full_line"] = encode_comment(d.get("full_line", "")) + return d + if isinstance(o, Path): + return str(o) + return super().default(o) -def implementation_4(): - pass -# comments -def implementation_4(): - pass - """ - with open(root_dir / "testfile3.txt", "w") as f: - f.write(test_file_contents3) - return root_dir +def needlink_test_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: + if {"file", "line", "tag", "need", "full_line"} <= d.keys(): + return NeedLink( + file=Path(d["file"]), + line=d["line"], + tag=decode_comment(d["tag"]), + need=d["need"], + full_line=decode_comment(d["full_line"]), + ) + else: + # It's something else, pass it on to other decoders + return d -def dummy_git_hash_func(input: str) -> Callable[[str], str]: - return lambda _: input +@pytest.fixture +def temp_dir(): + """Create a temporary directory for tests.""" + with tempfile.TemporaryDirectory() as temp_dir: + yield Path(temp_dir) -def test_extract_requirements(create_tmp_files: Path): - root_dir = create_tmp_files - github_base_url = get_github_base_url() - results_dict1 = extract_requirements( - str(root_dir / "testfile.txt"), github_base_url, dummy_git_hash_func("no-hash") +@pytest.fixture +def git_repo(temp_dir): + """Create a real git repository for testing.""" + git_dir = temp_dir / "test_repo" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True ) - expected_dict1: dict[str, list[str]] = defaultdict(list) - expected_dict1["TEST_REQ__LINKED_ID"].append( - f"{github_base_url}/blob/no-hash/{root_dir}/testfile.txt#L7" + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + + # Add a remote + subprocess.run( + ["git", "remote", "add", "origin", "git@github.com:test-user/test-repo.git"], + cwd=git_dir, + check=True, ) - expected_dict1["TEST_REQ__LINKED_TRACE"].append( - f"{github_base_url}/blob/no-hash/{root_dir}/testfile.txt#L11" + + return git_dir + + +@pytest.fixture +def git_repo_with_https_remote(temp_dir): + """Create a git repository with HTTPS remote for testing.""" + git_dir = temp_dir / "test_repo_https" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + + # Add HTTPS remote + subprocess.run( + [ + "git", + "remote", + "add", + "origin", + "https://github.com/test-user/test-repo.git", + ], + cwd=git_dir, + check=True, ) - # Assumed random hash here to test if passed correctly - results_dict2 = extract_requirements( - str(root_dir / "testfile2.txt"), - github_base_url, - dummy_git_hash_func("aacce4887ceea1f884135242a8c182db1447050"), + return git_dir + + +@pytest.fixture +def git_repo_multiple_remotes(temp_dir): + """Create a git repository with multiple remotes for testing.""" + git_dir = temp_dir / "test_repo_multiple" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + + # Add multiple remotes + subprocess.run( + ["git", "remote", "add", "upstream", "git@github.com:upstream/test-repo.git"], + cwd=git_dir, + check=True, ) - expected_dict2: dict[str, list[str]] = defaultdict(list) - expected_dict2["TEST_REQ__LINKED_DIFFERENT_FILE"].append( - f"{github_base_url}/blob/aacce4887ceea1f884135242a8c182db1447050/{root_dir}/testfile2.txt#L3" + subprocess.run( + ["git", "remote", "add", "origin", "git@github.com:test-user/test-repo.git"], + cwd=git_dir, + check=True, ) - results_dict3 = extract_requirements( - str(root_dir / "testfile3.txt"), github_base_url + return git_dir + + +@pytest.fixture +def sample_needlinks(): + """Create sample NeedLink objects for testing.""" + return [ + NeedLink( + file=Path("src/implementation1.py"), + line=3, + tag="# req-Id:", + need="TREQ_ID_1", + full_line="# req-Id: TREQ_ID_1", + ), + NeedLink( + file=Path("src/implementation2.py"), + line=3, + tag="# req-Id:", + need="TREQ_ID_1", + full_line="# req-Id: TREQ_ID_1", + ), + NeedLink( + file=Path("src/implementation1.py"), + line=9, + tag="# req-Id:", + need="TREQ_ID_2", + full_line="# req-Id: TREQ_ID_2", + ), + NeedLink( + file=Path("src/bad_implementation.py"), + line=2, + tag="# req-Id:", + need="TREQ_ID_200", + full_line="# req-Id: TREQ_ID_200", + ), + ] + + +@pytest.fixture +def cache_file_with_links(temp_dir, sample_needlinks): + """Create a cache file with sample needlinks.""" + cache_file = temp_dir / "cache.json" + store_source_code_links_json(cache_file, sample_needlinks) + return cache_file + + +@pytest.fixture +def sample_needs(): + """Create sample needs data for testing.""" + return { + "TREQ_ID_1": { + "id": "TREQ_ID_1", + "source_code_link": "", + "title": "Test requirement 1", + }, + "TREQ_ID_2": { + "id": "TREQ_ID_2", + "source_code_link": "", + "title": "Test requirement 2", + }, + "TREQ_ID_3": { + "id": "TREQ_ID_3", + "source_code_link": "", + "title": "Test requirement 3", + }, + } + + +# Test utility functions +def test_get_cache_filename(): + """Test cache filename generation.""" + build_dir = Path("/tmp/build") + expected = build_dir / "score_source_code_linker_cache.json" + result = get_cache_filename(build_dir) + assert result == expected + + +def make_needs(needs_dict): + return NeedsMutable( + {need_id: test_need(**params) for need_id, params in needs_dict.items()} ) - expected_dict3: dict[str, list[str]] = defaultdict(list) - # if there is no git-hash returned from command. - # This happens if the file is new and not committed yet. - results_dict4 = extract_requirements( - str(root_dir / "testfile2.txt"), github_base_url, dummy_git_hash_func("") + +def test_find_need_direct_match(): + """Test finding a need with direct ID match.""" + all_needs = make_needs( + { + "REQ_001": {"id": "REQ_001", "title": "Test requirement"}, + "REQ_002": {"id": "REQ_002", "title": "Another requirement"}, + } ) - expected_dict4: dict[str, list[str]] = defaultdict(list) - expected_dict4["TEST_REQ__LINKED_DIFFERENT_FILE"].append( - f"{github_base_url}/blob//{root_dir}/testfile2.txt#L3" + result = find_need(all_needs, "REQ_001", []) + assert result is not None + assert result["id"] == "REQ_001" + + +def test_find_need_with_prefix(): + """Test finding a need with prefix matching.""" + + all_needs = make_needs( + { + "PREFIX_REQ_001": {"id": "PREFIX_REQ_001", "title": "Prefixed requirement"}, + "REQ_002": {"id": "REQ_002", "title": "Another requirement"}, + } + ) + result = find_need(all_needs, "REQ_001", ["PREFIX_"]) + assert result is not None + assert result["id"] == "PREFIX_REQ_001" + + +def test_find_need_multiple_prefixes(): + """Test finding a need with multiple prefixes.""" + all_needs = make_needs( + { + "SECOND_REQ_001": { + "id": "SECOND_REQ_001", + "title": "Second prefixed requirement", + }, + "REQ_002": {"id": "REQ_002", "title": "Another requirement"}, + } + ) + + result = find_need(all_needs, "REQ_001", ["FIRST_", "SECOND_"]) + assert result is not None + assert result["id"] == "SECOND_REQ_001" + + +def test_find_need_not_found(): + """Test finding a need that doesn't exist.""" + all_needs = make_needs( + { + "REQ_001": {"id": "REQ_001", "title": "Test requirement"}, + } ) - assert results_dict1 == expected_dict1 - assert results_dict2 == expected_dict2 - assert results_dict3 == expected_dict3 - assert results_dict4 == expected_dict4 + result = find_need(all_needs, "REQ_999", ["PREFIX_"]) + assert result is None + + +def test_group_by_need(sample_needlinks): + """Test grouping source code links by need ID.""" + result = group_by_need(sample_needlinks) + + assert len(result) == 3 + assert len(result["TREQ_ID_1"]) == 2 + assert len(result["TREQ_ID_2"]) == 1 + assert len(result["TREQ_ID_200"]) == 1 + + # Check that the grouping is correct + assert result["TREQ_ID_1"][0].file == Path("src/implementation1.py") + assert result["TREQ_ID_1"][1].file == Path("src/implementation2.py") + assert result["TREQ_ID_2"][0].file == Path("src/implementation1.py") + assert result["TREQ_ID_2"][0].line == 9 + + +def test_group_by_need_empty_list(): + """Test grouping empty list of needlinks.""" + result = group_by_need([]) + assert len(result) == 0 + + +# Test git-related functions +def test_parse_git_output_ssh_format(): + """Test parsing git remote output in SSH format.""" + git_line = "origin git@github.com:test-user/test-repo.git (fetch)" + result = parse_git_output(git_line) + assert result == "test-user/test-repo" + + +def test_parse_git_output_https_format(): + """Test parsing git remote output in HTTPS format.""" + git_line = "origin https://github.com/test-user/test-repo.git (fetch)" + result = parse_git_output(git_line) + assert result == "test-user/test-repo" -def test_get_git_hash(): - assert get_git_hash("testfile.x") == "file_not_found" - assert get_git_hash("") == "file_not_found" +def test_parse_git_output_ssh_format_without_git_suffix(): + """Test parsing git remote output in SSH format without .git suffix.""" + git_line = "origin git@github.com:test-user/test-repo (fetch)" + result = parse_git_output(git_line) + assert result == "test-user/test-repo" -# These tests aren't great / exhaustive, but an okay first step into the right direction. +def test_parse_git_output_invalid_format(): + """Test parsing invalid git remote output.""" + git_line = "invalid" + result = parse_git_output(git_line) + assert result == "" + + +def test_parse_git_output_empty_string(): + """Test parsing empty git remote output.""" + git_line = "" + result = parse_git_output(git_line) + assert result == "" + + +def test_get_github_repo_info_ssh_remote(git_repo): + """Test getting GitHub repository information with SSH remote.""" + result = get_github_repo_info(git_repo) + assert result == "test-user/test-repo" + + +def test_get_github_repo_info_https_remote(git_repo_with_https_remote): + """Test getting GitHub repository information with HTTPS remote.""" + result = get_github_repo_info(git_repo_with_https_remote) + assert result == "test-user/test-repo" + + +def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes): + """Test getting GitHub repository information with multiple remotes (should prefer origin).""" + result = get_github_repo_info(git_repo_multiple_remotes) + assert result == "test-user/test-repo" + + +def test_get_current_git_hash(git_repo): + """Test getting current git hash.""" + print("==== GIt REPO====") + a = git_repo + print(a) + result = get_current_git_hash(git_repo) + + # Verify it's a valid git hash (40 hex characters) + assert len(result) == 40 + assert all(c in "0123456789abcdef" for c in result) + + +def test_get_current_git_hash_invalid_repo(temp_dir): + """Test getting git hash from invalid repository.""" + with pytest.raises(Exception): + get_current_git_hash(temp_dir) + + +# def test_get_github_base_url_with_real_repo(git_repo): +# """Test getting GitHub base URL with real repository.""" +# # Temporarily set the git repo as the current directory context +# original_cwd = os.getcwd() +# os.chdir(git_repo) +# +# try: +# # We need to temporarily patch find_git_root to return our test repo +# import src.extensions.score_source_code_linker as module +# +# original_find_git_root = module.find_git_root +# module.find_git_root = lambda: git_repo +# +# result = get_github_base_url() +# expected = "https://github.com/test-user/test-repo" +# assert result == expected +# +# finally: +# module.find_git_root = original_find_git_root +# os.chdir(original_cwd) + + +def test_get_github_link_with_real_repo(git_repo): + """Test generating GitHub link with real repository.""" + # Create a needlink + needlink = NeedLink( + file=Path("src/test.py"), + line=42, + tag="# req-Id:", + need="REQ_001", + full_line="# req-Id: REQ_001", + ) + + result = get_github_link(git_repo, needlink) + + # Should contain the base URL, hash, file path, and line number + assert "https://github.com/test-user/test-repo/blob/" in result + assert "src/test.py#L42" in result + assert len(result.split("/")) >= 7 # Should have proper URL structure + + # Verify the hash is actually from the repo + hash_from_link = result.split("/blob/")[1].split("/")[0] + actual_hash = get_current_git_hash(git_repo) + assert hash_from_link == actual_hash + + +# Test cache file operations +def test_cache_file_operations(temp_dir, sample_needlinks): + """Test storing and loading cache files.""" + cache_file = temp_dir / "test_cache.json" + + # Store links + store_source_code_links_json(cache_file, sample_needlinks) + + # Verify file was created + assert cache_file.exists() + + # Load and verify links + loaded_links = load_source_code_links_json(cache_file) + + assert len(loaded_links) == 4 + assert loaded_links[0].need == "TREQ_ID_1" + assert loaded_links[1].need == "TREQ_ID_1" + assert loaded_links[2].need == "TREQ_ID_2" + assert loaded_links[3].need == "TREQ_ID_200" + assert loaded_links[0].line == 3 + assert loaded_links[1].line == 3 + assert loaded_links[2].line == 9 + assert loaded_links[3].line == 2 + + +def test_cache_file_with_encoded_comments(temp_dir): + """Test that cache file properly handles encoded comments.""" + # Create needlinks with spaces in tags and full_line + needlinks = [ + NeedLink( + file=Path("src/test.py"), + line=1, + tag="# req-Id:", + need="TEST_001", + full_line="# req-Id: TEST_001", + ) + ] + + cache_file = temp_dir / "encoded_cache.json" + store_source_code_links_json(cache_file, needlinks) + + # Check the raw JSON to verify encoding + with open(cache_file, "r") as f: + raw_content = f.read() + assert "# req-Id:" in raw_content # Should be encoded + assert "#-----req-Id:" not in raw_content # Original should not be present + + # Load and verify decoding + loaded_links = load_source_code_links_json(cache_file) + assert len(loaded_links) == 1 + assert loaded_links[0].tag == "# req-Id:" # Should be decoded back + assert loaded_links[0].full_line == "# req-Id: TEST_001" + + +# Integration tests +def test_group_by_need_and_find_need_integration(sample_needlinks): + """Test grouping links and finding needs together.""" + # Group the test links + grouped = group_by_need(sample_needlinks) + + # Create mock needs + all_needs = make_needs( + { + "TREQ_ID_1": {"id": "TREQ_ID_1", "title": "Test requirement 1"}, + "TREQ_ID_2": {"id": "TREQ_ID_2", "title": "Test requirement 2"}, + "PREFIX_TREQ_ID_200": { + "id": "PREFIX_TREQ_ID_200", + "title": "Prefixed requirement", + }, + } + ) + + # Test finding needs for each group + for need_id, links in grouped.items(): + found_need = find_need(all_needs, need_id, ["PREFIX_"]) + if need_id in ["TREQ_ID_1", "TREQ_ID_2"]: + assert found_need is not None + assert found_need["id"] == need_id + elif need_id == "TREQ_ID_200": + assert found_need is not None + assert found_need["id"] == "PREFIX_TREQ_ID_200" + + +def test_end_to_end_with_real_files(temp_dir, git_repo): + """Test end-to-end workflow with real files and git repo.""" + # Create source files with requirement IDs + src_dir = git_repo / "src" + src_dir.mkdir() + + (src_dir / "implementation1.py").write_text(""" +# Some implementation +# req-Id: TREQ_ID_1 +def function1(): + pass + +# Another function +# req-Id: TREQ_ID_2 +def function2(): + pass +""") + + (src_dir / "implementation2.py").write_text(""" +# Another implementation +# req-Id: TREQ_ID_1 +def another_function(): + pass +""") + + # Commit the changes + subprocess.run(["git", "add", "."], cwd=git_repo, check=True) + subprocess.run( + ["git", "commit", "-m", "Add implementation files"], cwd=git_repo, check=True + ) + + # Create needlinks manually (simulating what generate_source_code_links_json would do) + needlinks = [ + NeedLink( + file=Path("src/implementation1.py"), + line=3, + tag="# req-Id:", + need="TREQ_ID_1", + full_line="# req-Id: TREQ_ID_1", + ), + NeedLink( + file=Path("src/implementation1.py"), + line=8, + tag="# req-Id:", + need="TREQ_ID_2", + full_line="# req-Id: TREQ_ID_2", + ), + NeedLink( + file=Path("src/implementation2.py"), + line=3, + tag="# req-Id:", + need="TREQ_ID_1", + full_line="# req-Id: TREQ_ID_1", + ), + ] + + # Test cache operations + cache_file = temp_dir / "cache.json" + store_source_code_links_json(cache_file, needlinks) + loaded_links = load_source_code_links_json(cache_file) + + assert len(loaded_links) == 3 + + # Test grouping + grouped = group_by_need(loaded_links) + assert len(grouped["TREQ_ID_1"]) == 2 + assert len(grouped["TREQ_ID_2"]) == 1 + + # Test GitHub link generation + + os.chdir(Path(git_repo).absolute()) + for needlink in loaded_links: + github_link = get_github_link(git_repo, needlink) + assert "https://github.com/test-user/test-repo/blob/" in github_link + assert f"src/{needlink.file.name}#L{needlink.line}" in github_link + + +def test_multiple_commits_hash_consistency(git_repo): + """Test that git hash remains consistent and links update properly.""" + # Get initial hash + initial_hash = get_current_git_hash(git_repo) + + # Create and commit a new file + new_file = git_repo / "new_file.py" + new_file.write_text("# New file\nprint('new')") + subprocess.run(["git", "add", "."], cwd=git_repo, check=True) + subprocess.run(["git", "commit", "-m", "Add new file"], cwd=git_repo, check=True) + + # Get new hash + new_hash = get_current_git_hash(git_repo) + + # Hashes should be different + assert initial_hash != new_hash + assert len(new_hash) == 40 + + # Test that links use the current hash + needlink = NeedLink( + file=Path("new_file.py"), + line=1, + tag="# req-Id:", + need="TEST_001", + full_line="# req-Id: TEST_001", + ) + + os.chdir(Path(git_repo).absolute()) + github_link = get_github_link(git_repo, needlink) + assert new_hash in github_link + + +# Test error handling +def test_git_operations_with_no_commits(temp_dir): + """Test git operations on repo with no commits.""" + git_dir = temp_dir / "empty_repo" + git_dir.mkdir() + + # Initialize git repo but don't commit anything + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + os.chdir(Path(git_dir).absolute()) + # Should raise an exception when trying to get hash + with pytest.raises(Exception): + a = get_current_git_hash(git_dir) + + +def test_git_repo_with_no_remotes(temp_dir): + """Test git repository with no remotes.""" + git_dir = temp_dir / "no_remote_repo" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + os.chdir(git_dir) -def test_get_github_repo_info(): - # I'd argue the happy path is tested with the other ones? + # Should raise an exception when trying to get repo info with pytest.raises(AssertionError): - get_github_repo_info(Path(".")) - - -git_test_data_ok = [ - ( - "origin https://github.com/eclipse-score/test-repo.git (fetch)", - "eclipse-score/test-repo", - ), - ( - "origin git@github.com:eclipse-score/test-repo.git (fetch)", - "eclipse-score/test-repo", - ), - ("origin git@github.com:eclipse-score/test-repo.git", "eclipse-score/test-repo"), - ("upstream git@github.com:upstream/repo.git (fetch)", "upstream/repo"), -] - - -@pytest.mark.parametrize("input,output", git_test_data_ok) -def test_parse_git_output_ok(input, output): - assert output == parse_git_output(input) - - -git_test_data_bad = [ - ("origin ", ""), - ( - " ", - "", - ), -] - - -@pytest.mark.parametrize("input,output", git_test_data_bad) -def test_parse_git_output_bad(caplog, input, output): - with caplog.at_level(logging.WARNING, logger=scl_logger.name): - result = parse_git_output(input) - assert len(caplog.messages) == 1 - assert caplog.records[0].levelname == "WARNING" - assert ( - f"Got wrong input line from 'get_github_repo_info'. Input: {input}. Expected example: 'origin git@github.com:user/repo.git'" - in caplog.records[0].message - ) - assert output == result - - -def test_get_github_base_url(): - # Not really a great test imo. - git_root = find_git_root() - repo = get_github_repo_info(git_root) - expected = f"https://github.com/{repo}" - actual = get_github_base_url() - assert expected == actual + get_github_repo_info(git_dir) diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index 591e2d66..b9bbd87f 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -11,62 +11,171 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import json +from collections import Counter from collections.abc import Callable from pathlib import Path import pytest +import os +import subprocess +import shutil + +from typing import cast from pytest import TempPathFactory -from src.extensions.score_source_code_linker.parse_source_files import ( - get_github_base_url, -) from sphinx.testing.util import SphinxTestApp from sphinx_needs.data import SphinxNeedsData +from test_requirement_links import needlink_test_decoder +from src.extensions.score_source_code_linker import get_github_base_url, get_github_link +from src.extensions.score_source_code_linker.needlinks import NeedLink +from src.extensions.score_source_code_linker.generate_source_code_links_json import ( + find_ws_root, +) + + +@pytest.fixture() +def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: + repo_path = tmp_path_factory.mktemp("test_git_repo") + return repo_path + + +@pytest.fixture() +def git_repo_setup(sphinx_base_dir) -> Path: + """Creating git repo, to make testing possible""" + + repo_path = sphinx_base_dir + subprocess.run(["git", "init"], cwd=repo_path, check=True) + subprocess.run( + ["git", "config", "user.name", "Test User"], cwd=repo_path, check=True + ) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=repo_path, check=True + ) + + subprocess.run( + ["git", "remote", "add", "origin", "https://github.com/testorg/testrepo.git"], + cwd=repo_path, + check=True, + ) + os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(repo_path) + return repo_path + + +@pytest.fixture() +def create_demo_files(sphinx_base_dir, git_repo_setup): + repo_path = sphinx_base_dir + + # Create some source files with requirement IDs + source_dir = repo_path / "src" + source_dir.mkdir() + + # Create source files that contain requirement references + (source_dir / "implementation1.py").write_text(make_source_1()) + + (source_dir / "implementation2.py").write_text(make_source_2()) + (source_dir / "bad_implementation.py").write_text(make_bad_source()) + # Create a docs directory for Sphinx + docs_dir = repo_path / "docs" + docs_dir.mkdir() + (docs_dir / "index.rst").write_text(basic_needs()) + (docs_dir / "conf.py").write_text(basic_conf()) + curr_dir = Path(__file__).absolute().parent + # print("CURR_dir", curr_dir) + shutil.copyfile(curr_dir / "scl_golden_file.json", repo_path / ".golden_file.json") + + # Add files to git and commit + subprocess.run(["git", "add", "."], cwd=repo_path, check=True) + subprocess.run( + ["git", "commit", "-m", "Initial commit with test files"], + cwd=repo_path, + check=True, + ) + + # Cleanup + # Don't know if we need this? + # os.environ.pop("BUILD_WORKSPACE_DIRECTORY", None) + + +def make_source_1(): + return """ +# This is a test implementation file +# req-Id: TREQ_ID_1 +def some_function(): + pass + +# Some other code here +# More code... +# req-Id: TREQ_ID_2 +def another_function(): + pass +""" + + +def make_source_2(): + return """ +# Another implementation file +# req-Id: TREQ_ID_1 +class SomeClass: + def method(self): + pass + +""" + + +def make_bad_source(): + return """ +# req-Id: TREQ_ID_200 +def This_Should_Error(self): + pass + +""" + def construct_gh_url() -> str: gh = get_github_base_url() return f"{gh}/blob/" -@pytest.fixture(scope="session") -def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: - return tmp_path_factory.mktemp("sphinx") - - -@pytest.fixture(scope="session") +@pytest.fixture() def sphinx_app_setup( - sphinx_base_dir: Path, -) -> Callable[[str, str, dict[str, list[str]]], SphinxTestApp]: - def _create_app( - conf_content: str, rst_content: str, requierments_text: dict[str, list[str]] - ): - src_dir = sphinx_base_dir / "src" - src_dir.mkdir(exist_ok=True) - - (src_dir / "conf.py").write_text(conf_content) - (src_dir / "index.rst").write_text(rst_content) - (src_dir / "score_source_code_parser.json").write_text( - json.dumps(requierments_text) - ) + sphinx_base_dir, create_demo_files, git_repo_setup +) -> Callable[[], SphinxTestApp]: + def _create_app(): + base_dir = sphinx_base_dir + docs_dir = base_dir / "docs" - return SphinxTestApp( - freshenv=True, - srcdir=Path(src_dir), - confdir=Path(src_dir), - outdir=sphinx_base_dir / "out", - buildername="html", - warningiserror=True, - confoverrides={ - "score_source_code_linker_file_overwrite": str( - src_dir / "score_source_code_parser.json" - ) - }, - ) + # CRITICAL: Change to a directory that exists and is accessible + # This fixes the "no such file or directory" error in Bazel + original_cwd = None + try: + original_cwd = os.getcwd() + except FileNotFoundError: + # Current working directory doesn't exist, which is the problem + pass + + # Change to the base_dir before creating SphinxTestApp + os.chdir(base_dir) + try: + return SphinxTestApp( + freshenv=True, + srcdir=docs_dir, + confdir=docs_dir, + outdir=sphinx_base_dir / "out", + buildername="html", + warningiserror=True, + ) + finally: + # Try to restore original directory, but don't fail if it doesn't exist + if original_cwd is not None: + try: + os.chdir(original_cwd) + except (FileNotFoundError, OSError): + # Original directory might not exist anymore in Bazel sandbox + pass return _create_app -@pytest.fixture(scope="session") def basic_conf(): return """ extensions = [ @@ -83,18 +192,9 @@ def basic_conf(): ), ] needs_extra_options = ["source_code_link"] -needs_string_links = { - "source_code_linker": { - "regex": r"(?P[^,]+)", - "link_url": "{{value}}", - "link_name": "Source Code Link", - "options": ["source_code_link"], - }, -} """ -@pytest.fixture(scope="session") def basic_needs(): return """ TESTING SOURCE LINK @@ -110,74 +210,125 @@ def basic_needs(): """ -@pytest.fixture(scope="session") -def example_source_link_text_all_ok(): - github_base_url = construct_gh_url() +@pytest.fixture() +def example_source_link_text_all_ok(sphinx_base_dir): + repo_path = sphinx_base_dir return { "TREQ_ID_1": [ - f"{github_base_url}aacce4887ceea1f884135242a8c182db1447050/tools/sources/implementation1.py#L2", - f"{github_base_url}/tools/sources/implementation_2_new_file.py#L20", + NeedLink( + file=Path("src/implementation1.py"), + line=3, + tag="#" + " req-Id:", + need="TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", + ), + NeedLink( + file=Path("src/implementation2.py"), + line=3, + tag="#" + " req-Id:", + need="TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", + ), ], "TREQ_ID_2": [ - f"{github_base_url}f53f50a0ab1186329292e6b28b8e6c93b37ea41/tools/sources/implementation1.py#L18" + NeedLink( + file=Path("src/implementation1.py"), + line=9, + tag="#" + " req-Id:", + need="TREQ_ID_2", + full_line="#" + " req-Id: TREQ_ID_2", + ) ], } -@pytest.fixture(scope="session") -def example_source_link_text_non_existent(): - github_base_url = construct_gh_url() - return { - "TREQ_ID_200": [ - f"{github_base_url}f53f50a0ab1186329292e6b28b8e6c93b37ea41/tools/sources/bad_implementation.py#L17" - ], - } +@pytest.fixture() +def example_source_link_text_non_existent(sphinx_base_dir): + repo_path = sphinx_base_dir + return [ + { + "TREQ_ID_200": [ + NeedLink( + file=Path(f"src/bad_implementation.py"), + line=2, + tag="#" + " req-Id:", + need="TREQ_ID_200", + full_line="#" + " req-Id: TREQ_ID_200", + ) + ] + } + ] + + +def make_source_link(ws_root: Path, needlinks): + return ", ".join( + f"{get_github_link(ws_root, n)}<>{n.file}:{n.line}" for n in needlinks + ) + + +def compare_json_files(file1: Path, golden_file: Path): + with open(file1, "r") as f1: + json1 = json.load(f1, object_hook=needlink_test_decoder) + with open(golden_file, "r") as f2: + json2 = json.load(f2, object_hook=needlink_test_decoder) + assert len(json1) == len(json2), ( + f"{file1}'s lenth are not the same as in the golden file lenght. Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" + ) + c1 = Counter(n for n in json1) + c2 = Counter(n for n in json2) + assert c1 == c2, ( + f"Testfile does not have same needs as golden file. Testfile: {c1}\nGoldenFile: {c2}" + ) def test_source_link_integration_ok( - sphinx_app_setup: Callable[[str, str, dict[str, list[str]]], SphinxTestApp], - basic_conf: str, - basic_needs: str, + sphinx_app_setup: Callable[[], SphinxTestApp], example_source_link_text_all_ok: dict[str, list[str]], - sphinx_base_dir: Path, + sphinx_base_dir, + git_repo_setup, + create_demo_files, ): - github_url = construct_gh_url() - app = sphinx_app_setup(basic_conf, basic_needs, example_source_link_text_all_ok) + app = sphinx_app_setup() try: + os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(sphinx_base_dir) app.build() + ws_root = find_ws_root() + if ws_root is None: + # This should never happen + pytest.fail(f"WS_root is none. WS_root: {ws_root}") Needs_Data = SphinxNeedsData(app.env) needs_data = {x["id"]: x for x in Needs_Data.get_needs_view().values()} - assert "TREQ_ID_1" in needs_data - assert "TREQ_ID_2" in needs_data - # extra_options are only available at runtime - assert ( - ",".join(example_source_link_text_all_ok["TREQ_ID_1"]) - == needs_data["TREQ_ID_1"]["source_code_link"] # type: ignore - ) - assert ( - ",".join(example_source_link_text_all_ok["TREQ_ID_2"]) - == needs_data["TREQ_ID_2"]["source_code_link"] # type: ignore + compare_json_files( + app.outdir / "score_source_code_linker_cache.json", + sphinx_base_dir / ".golden_file.json", ) + # Testing TREQ_ID_1 & TREQ_ID_2 + for i in range(1, 3): + assert f"TREQ_ID_{i}" in needs_data + need_as_dict = cast(dict[str, object], needs_data[f"TREQ_ID_{i}"]) + expected_link = make_source_link( + ws_root, example_source_link_text_all_ok[f"TREQ_ID_{i}"] + ) + # extra_options are only available at runtime + assert expected_link == need_as_dict["source_code_link"] # type: ignore) finally: app.cleanup() def test_source_link_integration_non_existent_id( - sphinx_app_setup: Callable[[str, str, dict[str, list[str]]], SphinxTestApp], - basic_conf: str, - basic_needs: str, + sphinx_app_setup: Callable[[], SphinxTestApp], example_source_link_text_non_existent: dict[str, list[str]], - sphinx_base_dir: Path, + sphinx_base_dir, + git_repo_setup, + create_demo_files, ): - app = sphinx_app_setup( - basic_conf, basic_needs, example_source_link_text_non_existent - ) + app = sphinx_app_setup() try: app.build() warnings = app.warning.getvalue() assert ( - "WARNING: Could not find TREQ_ID_200 in the needs id's. Found in " - "file(s): ['tools/sources/bad_implementation.py#L17']" in warnings + "src/bad_implementation.py:2: Could not find TREQ_ID_200 in documentation" + in warnings ) finally: app.cleanup() diff --git a/src/incremental.py b/src/incremental.py index 132b468e..a5e4bdb5 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -12,9 +12,9 @@ # ******************************************************************************* import argparse -import json import logging import os +from pathlib import Path import debugpy from sphinx.cmd.build import main as sphinx_main @@ -22,10 +22,6 @@ logger = logging.getLogger(__name__) -logger.debug("DEBUG: CWD: ", os.getcwd()) -logger.debug("DEBUG: SOURCE_DIRECTORY: ", os.getenv("SOURCE_DIRECTORY")) -logger.debug("DEBUG: RUNFILES_DIR: ", os.getenv("RUNFILES_DIR")) - def get_env(name: str) -> str: val = os.environ.get(name, None) @@ -68,19 +64,23 @@ def get_env(name: str) -> str: debugpy.wait_for_client() workspace = os.getenv("BUILD_WORKSPACE_DIRECTORY") + # if workspace: + # os.chdir(workspace) if workspace: - os.chdir(workspace) + workspace += "/" + else: + workspace = "" base_arguments = [ - get_env("SOURCE_DIRECTORY"), - get_env("BUILD_DIRECTORY"), + workspace + get_env("SOURCE_DIRECTORY"), + workspace + get_env("BUILD_DIRECTORY"), "-W", # treat warning as errors "--keep-going", # do not abort after one error "-T", # show details in case of errors in extensions "--jobs", "auto", "--conf-dir", - get_env("CONF_DIRECTORY"), + workspace + get_env("CONF_DIRECTORY"), f"--define=external_needs_source={get_env('EXTERNAL_NEEDS_INFO')}", ] @@ -88,16 +88,20 @@ def get_env(name: str) -> str: if args.github_user and args.github_repo: base_arguments.append(f"-A=github_user={args.github_user}") base_arguments.append(f"-A=github_repo={args.github_repo}") - base_arguments.append(f"-A=github_version=main") - base_arguments.append(f"-A=doc_path=docs") + base_arguments.append("-A=github_version=main") + base_arguments.append("-A=doc_path=docs") action = get_env("ACTION") if action == "live_preview": + build_dir = Path(get_env("BUILD_DIRECTORY")) + (workspace / build_dir / "score_source_code_linker_cache.json").unlink( + missing_ok=False + ) sphinx_autobuild_main( - # Note: bools need to be passed via '0' and '1' from the command line. base_arguments + [ - "--define=disable_source_code_linker=1", + # Note: bools need to be passed via '0' and '1' from the command line. + "--define=skip_rescanning_via_source_code_linker=1", f"--port={args.port}", ] ) From 1c436f15e43701535368b0c5e9dbf2575ff87a87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jochen=20H=C3=B6nle?= Date: Mon, 7 Jul 2025 14:19:54 +0200 Subject: [PATCH 058/231] checks: bugfix id contains feature (#114) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * checks: bugfix id contains feature - separate feature in single parts - search for each part individually * Update src/extensions/score_metamodel/checks/id_contains_feature.py * Fix rst file based tests --------- Signed-off-by: Jochen Hönle Co-authored-by: Maximilian Sören Pollak --- .../checks/id_contains_feature.py | 21 +++++++++++++++++-- .../test_id_contains_feature.rst | 5 +++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index 02825189..2c0712b7 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -11,6 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import os +import re from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType @@ -37,6 +38,7 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): # Get the part of the string after the first two underscores: the path feature = parts[1] + featureparts = re.split(r"[_-]", feature) dir_docname = os.path.dirname(str(need.get("docname", ""))) @@ -45,7 +47,22 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): # NOTE: This does not match the process requirements docname = dir_docname if dir_docname else need.get("docname", "") - if feature not in docname: + # allow if any feature part is contained in UID + foundfeatpart = any( + featurepart.lower() in docname.lower() + for featurepart in featureparts + if featureparts + ) + + # allow abbreviation of the feature + initials = "".join( + featurepart[0].lower() for featurepart in featureparts if len(featureparts) > 1 + ) + foundinitials = initials in docname.lower() + + if not (foundfeatpart or foundinitials): log.warning_for_option( - need, "id", f"Feature '{feature}' not in path '{docname}'." + need, + "id", + f"Featurepart '{featureparts}' not in path '{docname}' or abbreviation not ok, expected: '{initials}'.", ) diff --git a/src/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst b/src/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst index 5815db3c..5b6aa74a 100644 --- a/src/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst +++ b/src/extensions/score_metamodel/tests/rst/id_contains_feature/test_id_contains_feature.rst @@ -13,8 +13,9 @@ # ******************************************************************************* #CHECK: id_contains_feature -.. Feature is not in the path of the RST file -#EXPECT: std_wp__test__abcd.id (std_wp__test__abcd): Feature 'test' not in path +.. Feature is deeper in the path of the RST file +.. This is now explicitly allowed +#EXPECT-NOT: std_wp__test__abcd.id (std_wp__test__abcd): Feature 'test' not in path .. std_wp:: This is a test :id: std_wp__test__abcd From 68fe59ed21c4d6f8ee31cb63a84d1063eea67317 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Tue, 8 Jul 2025 10:32:49 +0300 Subject: [PATCH 059/231] Fix direct sphinx build. (#127) * Fix direct sphinx build. * Fix source code linker for python sphinx build --------- Signed-off-by: Nicolae Dicu --- src/extensions/score_plantuml.py | 2 +- .../generate_source_code_links_json.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index ba5b5da1..1690315a 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -53,7 +53,7 @@ def get_runfiles_dir() -> Path: # But we need to find it first. logger.debug("Running outside bazel.") - git_root = Path(__file__).resolve() + git_root = Path.cwd().resolve() while not (git_root / ".git").exists(): git_root = git_root.parent if git_root == Path("/"): diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py index 4444d408..90cd607e 100644 --- a/src/extensions/score_source_code_linker/generate_source_code_links_json.py +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -40,10 +40,15 @@ def find_git_root(start_path: str | Path = "") -> Path | None: start_path = __file__ git_root = Path(start_path).resolve() + esbonio_search = False while not (git_root / ".git").exists(): git_root = git_root.parent if git_root == Path("/"): - return None + # fallback to cwd when building with python -m sphinx docs _build -T + if esbonio_search: + return None + git_root = Path.cwd().resolve() + esbonio_search = True return git_root From f57ba65c988217514c1de13edf39481196b6b508 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 8 Jul 2025 11:26:05 +0200 Subject: [PATCH 060/231] Fix source code linker errors (#128) * Increase version * Fixing parsing errors Source code linker errored when it parsed the test & documentaiton files. This removes those tags in order to stop those errors --- MODULE.bazel | 2 +- docs/product/extensions/source_code_linker.md | 11 +-- .../tests/test_requirement_links.py | 67 ++++++++++--------- .../tests/test_source_link.py | 24 +++++-- 4 files changed, 61 insertions(+), 43 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index eee1f8bd..143e6af8 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.4.0", + version = "0.4.1", compatibility_level = 0, ) diff --git a/docs/product/extensions/source_code_linker.md b/docs/product/extensions/source_code_linker.md index 54321f75..2e7085d5 100644 --- a/docs/product/extensions/source_code_linker.md +++ b/docs/product/extensions/source_code_linker.md @@ -40,21 +40,22 @@ The extension uses two main components to integrate with Bazel: **Note:** The base_url is defined in `parse_source_files.py`. Currently set to: `https://github.com/eclipse-score/score/blob/` Produces JSON mapping file: -```json +The strings are split here to not enable tracking by the source code linker. +```python [ { "file": "src/implementation1.py", "line": 3, - "tag":"# req-Id:", + "tag":"#" + " req-Id:", "need": "TREQ_ID_1", - "full_line": "# req-Id: TREQ_ID_1" + "full_line": "#"+" req-Id: TREQ_ID_1" }, { "file": "src/implementation2.py", "line": 3, - "tag":"# req-Id:", + "tag":"#" + " req-Id:", "need": "TREQ_ID_1", - "full_line": "# req-Id: TREQ_ID_1" + "full_line": "#"+" req-Id: TREQ_ID_1" }, ] ``` diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index 1d6dd301..d706884e 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -201,30 +201,30 @@ def sample_needlinks(): NeedLink( file=Path("src/implementation1.py"), line=3, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_1", - full_line="# req-Id: TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( file=Path("src/implementation2.py"), line=3, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_1", - full_line="# req-Id: TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( file=Path("src/implementation1.py"), line=9, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_2", - full_line="# req-Id: TREQ_ID_2", + full_line="#" + " req-Id: TREQ_ID_2", ), NeedLink( file=Path("src/bad_implementation.py"), line=2, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_200", - full_line="# req-Id: TREQ_ID_200", + full_line="#" + " req-Id: TREQ_ID_200", ), ] @@ -452,9 +452,9 @@ def test_get_github_link_with_real_repo(git_repo): needlink = NeedLink( file=Path("src/test.py"), line=42, - tag="# req-Id:", + tag="#" + " req-Id:", need="REQ_001", - full_line="# req-Id: REQ_001", + full_line="#" + " req-Id: REQ_001", ) result = get_github_link(git_repo, needlink) @@ -502,9 +502,9 @@ def test_cache_file_with_encoded_comments(temp_dir): NeedLink( file=Path("src/test.py"), line=1, - tag="# req-Id:", + tag="#" + " req-Id:", need="TEST_001", - full_line="# req-Id: TEST_001", + full_line="#" + " req-Id: TEST_001", ) ] @@ -514,14 +514,14 @@ def test_cache_file_with_encoded_comments(temp_dir): # Check the raw JSON to verify encoding with open(cache_file, "r") as f: raw_content = f.read() - assert "# req-Id:" in raw_content # Should be encoded + assert "#" + " req-Id:" in raw_content # Should be encoded assert "#-----req-Id:" not in raw_content # Original should not be present # Load and verify decoding loaded_links = load_source_code_links_json(cache_file) assert len(loaded_links) == 1 - assert loaded_links[0].tag == "# req-Id:" # Should be decoded back - assert loaded_links[0].full_line == "# req-Id: TEST_001" + assert loaded_links[0].tag == "#" + " req-Id:" # Should be decoded back + assert loaded_links[0].full_line == "#" + " req-Id: TEST_001" # Integration tests @@ -559,24 +559,31 @@ def test_end_to_end_with_real_files(temp_dir, git_repo): src_dir = git_repo / "src" src_dir.mkdir() - (src_dir / "implementation1.py").write_text(""" + (src_dir / "implementation1.py").write_text( + """ # Some implementation -# req-Id: TREQ_ID_1 +#""" + + """ req-Id: TREQ_ID_1 def function1(): pass # Another function -# req-Id: TREQ_ID_2 +#""" + + """ req-Id: TREQ_ID_2 def function2(): pass -""") +""" + ) - (src_dir / "implementation2.py").write_text(""" + (src_dir / "implementation2.py").write_text( + """ # Another implementation -# req-Id: TREQ_ID_1 +#""" + + """ req-Id: TREQ_ID_1 def another_function(): pass -""") +""" + ) # Commit the changes subprocess.run(["git", "add", "."], cwd=git_repo, check=True) @@ -589,23 +596,23 @@ def another_function(): NeedLink( file=Path("src/implementation1.py"), line=3, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_1", - full_line="# req-Id: TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( file=Path("src/implementation1.py"), line=8, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_2", - full_line="# req-Id: TREQ_ID_2", + full_line="#" + " req-Id: TREQ_ID_2", ), NeedLink( file=Path("src/implementation2.py"), line=3, - tag="# req-Id:", + tag="#" + " req-Id:", need="TREQ_ID_1", - full_line="# req-Id: TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", ), ] @@ -652,9 +659,9 @@ def test_multiple_commits_hash_consistency(git_repo): needlink = NeedLink( file=Path("new_file.py"), line=1, - tag="# req-Id:", + tag="#" + " req-Id:", need="TEST_001", - full_line="# req-Id: TEST_001", + full_line="#" + " req-Id: TREQ_ID_1", ) os.chdir(Path(git_repo).absolute()) diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index b9bbd87f..ce050960 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -97,38 +97,48 @@ def create_demo_files(sphinx_base_dir, git_repo_setup): def make_source_1(): - return """ + return ( + """ # This is a test implementation file -# req-Id: TREQ_ID_1 +#""" + + """ req-Id: TREQ_ID_1 def some_function(): pass # Some other code here # More code... -# req-Id: TREQ_ID_2 +#""" + """ req-Id: TREQ_ID_2 def another_function(): pass """ + ) def make_source_2(): - return """ + return ( + """ # Another implementation file -# req-Id: TREQ_ID_1 +#""" + + """ req-Id: TREQ_ID_1 class SomeClass: def method(self): pass """ + ) def make_bad_source(): - return """ -# req-Id: TREQ_ID_200 + return ( + """ +#""" + + """ req-Id: TREQ_ID_200 def This_Should_Error(self): pass """ + ) def construct_gh_url() -> str: From 69ad70bc39b10715f5764d836819896abf2a243b Mon Sep 17 00:00:00 2001 From: Simon Duerr Date: Wed, 9 Jul 2025 05:54:17 +0200 Subject: [PATCH 061/231] Remove empty globs (#124) * Remove empty globs * Enable bazel disallow empty globs Enable the option to support an early detection of invalid globs in bazel functions. --- .bazelrc | 2 ++ docs.bzl | 6 ++--- src/BUILD | 26 ------------------- src/extensions/score_metamodel/BUILD | 2 +- src/extensions/score_source_code_linker/BUILD | 3 +-- 5 files changed, 7 insertions(+), 32 deletions(-) diff --git a/.bazelrc b/.bazelrc index 85aed888..6863d998 100644 --- a/.bazelrc +++ b/.bazelrc @@ -10,3 +10,5 @@ build --incompatible_default_to_explicit_init_py common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ common --registry=https://bcr.bazel.build + +common --incompatible_disallow_empty_glob diff --git a/docs.bzl b/docs.bzl index 204d9984..f1c9b023 100644 --- a/docs.bzl +++ b/docs.bzl @@ -75,7 +75,7 @@ def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_ sphinx_build_binary( name = "sphinx_build" + suffix, visibility = ["//visibility:public"], - data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:score_extension_files"] + external_needs_deps, + data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:docs_as_code_py_modules"] + external_needs_deps, deps = sphinx_requirements + deps, ) _incremental( @@ -183,7 +183,7 @@ def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = lis "**/*.json", "**/*.csv", "**/*.inc", - ], exclude = ["**/tests/*"]), + ], exclude = ["**/tests/*"], allow_empty = True), config = ":conf.py", extra_opts = [ "-W", @@ -205,7 +205,7 @@ def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = lis native.filegroup( name = "assets" + target_suffix, - srcs = native.glob(["_assets/**"]), + srcs = native.glob(["_assets/**"], allow_empty = True), visibility = ["//visibility:public"], ) diff --git a/src/BUILD b/src/BUILD index def4bbef..d3204016 100644 --- a/src/BUILD +++ b/src/BUILD @@ -118,21 +118,6 @@ py_library( visibility = ["//visibility:public"], ) -filegroup( - name = "score_extension_files", - srcs = glob( - [ - "src/**", - ], - exclude = [ - "**/test/**", - "**/tests/**", - "**/__pycache__/**", - ], - ) + [":docs_as_code_py_modules"], - visibility = ["//visibility:public"], -) - filegroup( name = "docs_assets", srcs = glob([ @@ -153,17 +138,6 @@ filegroup( visibility = ["//visibility:public"], ) -# Needed for 'test_rules_file_based' -filegroup( - name = "test_rst_files", - srcs = glob([ - "extensions/**/*.rst", - "extensions/**/*.py", - "conf.py", - ]), - visibility = ["//visibility:public"], -) - dash_license_checker( src = ":requirements_lock", file_type = "requirements", diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index bc328f7e..fa6b976e 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -33,7 +33,7 @@ score_py_pytest( size = "small", srcs = glob(["tests/*.py"]), # All requirements already in the library so no need to have it double - data = ["//src:test_rst_files"] + glob( + data = glob( ["tests/**/*.rst"], ), deps = [":score_metamodel"], diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 48113e1c..9828b22b 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -30,8 +30,7 @@ score_py_pytest( name = "score_source_code_linker_test", size = "small", srcs = glob([ - "tests/**/*.py", - "test/**/*.json", + "tests/*.py", ]), args = [ "-s", From 36a225257357f88b651096d501568f6308245520 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Volker=20H=C3=A4ussler?= Date: Wed, 9 Jul 2025 10:51:36 +0200 Subject: [PATCH 062/231] process: update metamodel safety analysis (#104) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * process: update metamodel safety analysis * process: graph_check ASIL update * process: update safety analysis templates * process: update graph check * process: add test cases requirements * process: add checks safety analysis * process: delete ASIL_D option. Add testcases * process: fix test required options * Update ASIL_D check * increase version Ref: closes #102 --------- Signed-off-by: Volker Häussler Co-authored-by: Sven Bachmann --- .vscode/settings.json | 10 +- MODULE.bazel | 2 +- .../score_metamodel/checks/check_options.py | 5 +- .../score_metamodel/checks/graph_checks.py | 2 - src/extensions/score_metamodel/metamodel.yaml | 166 ++++-- .../tests/rst/graph/test_metamodel_graph.rst | 242 ++++++++- .../rst/options/test_options_extra_option.rst | 2 + .../rst/options/test_options_options.rst | 492 +++++++++++++++++- .../tests/test_rules_file_based.py | 2 +- 9 files changed, 849 insertions(+), 74 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 81b05b3b..478df016 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -38,5 +38,13 @@ ], // Disable internal type checking, since we use basedpyright - "python.analysis.typeCheckingMode": "off" + "python.analysis.typeCheckingMode": "off", + "cSpell.words": [ + "ASIL", + "FMEA", + "isopas", + "isosae", + "stkh", + "workproduct" + ] } diff --git a/MODULE.bazel b/MODULE.bazel index 143e6af8..cae1f74c 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.4.1", + version = "0.4.2", compatibility_level = 0, ) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index ca675cd5..57e0c073 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -82,7 +82,10 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: try: if not re.match(pattern, value): log.warning_for_option( - need, field, f"does not follow pattern `{pattern}`." + need, + field, + f"does not follow pattern `{pattern}`.", + new_check="ASIL_D" in value, ) except TypeError: log.warning_for_option( diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index dfa38fe9..ed914388 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -137,11 +137,9 @@ def check_metamodel_graph( # Convert list to dictionary for easy lookup needs_dict_all = {need["id"]: need for need in all_needs.values()} needs_local = list(all_needs.filter_is_external(False).values()) - # Iterate over all graph checks for check in graph_checks_global.items(): apply, eval = check[1].values() - # Get all needs that match the selection criteria selected_needs = get_need_selection(needs_local, apply, log) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index fdc120a9..4aca501d 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -188,7 +188,7 @@ needs_types: id: ^doc__[0-9a-z_]*$ status: ^(valid|draft|invalid)$ optional_options: - safety: "^(QM|ASIL_B|ASIL_D)$" + safety: "^(QM|ASIL_B)$" security: "^(YES|NO)$" realizes: "^wp__.+$" @@ -201,7 +201,7 @@ needs_types: id: ^stkh_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_req_attr_rationale rationale: ^.+$ @@ -225,7 +225,7 @@ needs_types: # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed @@ -248,7 +248,7 @@ needs_types: # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed @@ -269,7 +269,7 @@ needs_types: mandatory_options: id: ^tool_req__[0-9a-z_]*$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed @@ -298,7 +298,7 @@ needs_types: # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ optional_options: codelink: ^.*$ @@ -321,7 +321,7 @@ needs_types: mandatory_options: id: ^feat_arc_sta__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: includes: ^logic_arc_int(_op)*__.+$ @@ -336,7 +336,7 @@ needs_types: mandatory_options: id: ^feat_arc_dyn__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: fulfils: ^feat_req__.+$ @@ -349,7 +349,7 @@ needs_types: mandatory_options: id: ^logic_arc_int__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ optional_links: includes: ^logic_arc_int_op__.+$ @@ -363,7 +363,7 @@ needs_types: mandatory_options: id: ^logic_arc_int_op__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: included_by: ^logic_arc_int__.+$ @@ -394,7 +394,7 @@ needs_types: mandatory_options: id: ^comp_arc_sta__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ optional_links: implements: ^real_arc_int(_op)*__.+$ @@ -410,7 +410,7 @@ needs_types: mandatory_options: id: ^comp_arc_dyn__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ optional_links: fulfils: ^comp_req__.+$ @@ -423,7 +423,7 @@ needs_types: mandatory_options: id: ^real_arc_int__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ language: ^(cpp|rust)$ optional_links: @@ -437,7 +437,7 @@ needs_types: mandatory_options: id: ^real_arc_int_op__[0-9a-z_]+$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: included_by: ^real_arc_int__.+$ @@ -463,7 +463,7 @@ needs_types: mandatory_options: id: ^dd_sta__[0-9a-z_]*$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: implements: ^comp_req__.*$ @@ -479,7 +479,7 @@ needs_types: mandatory_options: id: ^dd_dyn__[0-9a-z_]*$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: implements: ^comp_req__.*$ @@ -491,7 +491,7 @@ needs_types: mandatory_options: id: ^sw_unit__[0-9a-z_]*$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ sw_unit_int: title: Software unit interfaces @@ -501,9 +501,86 @@ needs_types: mandatory_options: id: ^sw_unit_int__[0-9a-z_]*$ security: ^(YES|NO)$ - safety: ^(QM|ASIL_B|ASIL_D)$ + safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + # Safety Analysis DFA + feat_plat_saf_dfa: + title: DFA + prefix: feat_plat_saf_dfa__ + mandatory_options: + id: ^feat_plat_saf_dfa__[0-9a-z_]+$ + violation_id: ^.*$ + violation_cause: ^.*$ + sufficient: ^(yes|no)$ + status: ^(valid|invalid)$ + mandatory_links: + mitigates: ^(feat_req__.*|aou_req__.*)$ + verifies: ^feat_arc_dyn__[0-9a-z_]*$ + optional_links: + mitigation_issue: ^https://github.com/.*$ + + feat_saf_dfa: + title: DFA + prefix: feat_saf_dfa__ + mandatory_options: + id: ^feat_saf_dfa__[0-9a-z_]+$ + violation_id: ^.*$ + violation_cause: ^.*$ + sufficient: ^(yes|no)$ + status: ^(valid|invalid)$ + mandatory_links: + mitigates: ^(feat_req__.*|aou_req__.*)$ + verifies: ^feat_arc_dyn__[0-9a-z_]*$ + optional_links: + mitigation_issue: ^https://github.com/.*$ + + comp_saf_dfa: + title: DFA + prefix: comp_saf_dfa__ + mandatory_options: + id: ^comp_saf_dfa__[0-9a-z_]+$ + violation_id: ^.*$ + violation_cause: ^.*$ + sufficient: ^(yes|no)$ + status: ^(valid|invalid)$ + mandatory_links: + mitigates: ^(comp_req__.*|aou_req__.*)$ + verifies: ^comp_arc_dyn__[0-9a-z_]*$ + optional_links: + mitigation_issue: ^https://github.com/.*$ + + # # Safety Analysis FMEA + feat_saf_fmea: + title: FMEA + prefix: feat_saf_fmea__ + mandatory_options: + id: ^feat_saf_fmea__[0-9a-z_]+$ + violation_id: ^.*$ + violation_cause: ^.*$ + sufficient: ^(yes|no)$ + status: ^(valid|invalid)$ + mandatory_links: + mitigates: ^(feat_req__.*|aou_req__.*)$ + verifies: ^feat_arc_dyn__[0-9a-z_]*$ + optional_links: + mitigation_issue: ^https://github.com/.*$ + + comp_saf_fmea: + title: FMEA + prefix: comp_saf_fmea__ + mandatory_options: + id: ^comp_saf_fmea__[0-9a-z_]+$ + violation_id: ^.*$ + violation_cause: ^.*$ + sufficient: ^(yes|no)$ + status: ^(valid|invalid)$ + mandatory_links: + mitigates: ^(comp_req__.*|aou_req__.*)$ + verifies: ^comp_arc_dyn__[0-9a-z_]*$ + optional_links: + mitigation_issue: ^https://github.com/.*$ + # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from # a specification to a requirement. This makes filtering and visualization of such connections @@ -576,6 +653,14 @@ needs_extra_links: included_by: incoming: includes outgoing: included by + + mitigates: + incoming: mitigated by + outgoing: mitigates + + verifies: + incoming: verified by + outgoing: verifies ############################################################## # Graph Checks # The graph checks focus on the relation of the needs and their attributes. @@ -594,36 +679,33 @@ needs_extra_links: ############################################################## # req- Id: gd_req__req__linkage_architecture # req- Id: gd_req__req__linkage_safety + +# Checks if the child requirement has the at least the same safety level as the parent requirement. It's allowed to "overfill" the safety level of the parent. +# ASIL decomposition is not foreseen in S-CORE. Therefore it's not allowed to have a child requirement with a lower safety level than the parent requirement as +# it is possible in an decomposition case. +# If need-req is `QM`, parent must be `QM`. graph_checks: - # req- Id: gd_req__req__linkage_safety - req_safety_linkage: + req_safety_linkage_qm: needs: include: comp_req, feat_req - condition: - and: - - safety != QM - - status == valid + condition: safety == QM check: - satisfies: - and: - - safety != QM - - status == valid - req_linkage: + satisfies: safety == QM + # If need-req is `ASIL_B`, parent must be `QM` or `ASIL_B`. + req_safety_linkage_asil_b: needs: include: comp_req, feat_req - condition: status == valid + condition: safety == ASIL_B check: - # req- Id: gd_req__req__linkage_architecture - satisfies: status == valid - arch_safety_linkage: + satisfies: safety != ASIL_D + # saf - ID gd_req__saf_linkage_safety + # It shall be checked that Safety Analysis (DFA and FMEA) can only be linked via mitigate against + # - Requirements with the same ASIL or + # - Requirements with a higher ASIL + # as the corresponding ASIL of the Feature or Component that is analyzed. + saf_linkage_safety: needs: - include: comp_req, feat_req - condition: - and: - - safety != QM - - status == valid + include: feat_saf_fmea, comp_saf_fmea, feat_plat_saf_dfa, feat_saf_dfa, comp_saf_dfa + condition: safety == ASIL_B check: - fulfils: - and: - - safety != QM - - status == valid + mitigates: safety != QM diff --git a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst index 65271498..b193749e 100644 --- a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst +++ b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst @@ -11,53 +11,245 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* + #CHECK: check_metamodel_graph -.. feat_req:: Parent requirement - :id: feat_req__parent__abcd + +.. Checks if the child requirement has the at least the same safety level as the parent requirement. It's allowed to "overfill" the safety level of the parent. +.. ASIL decomposition is not foreseen in S-CORE. Therefore it's not allowed to have a child requirement with a lower safety level than the parent requirement as +.. it is possible in an decomposition case. +.. feat_req:: Parent requirement QM + :id: feat_req__parent__QM + :safety: QM + :status: valid + +.. feat_req:: Parent requirement ASIL_B + :id: feat_req__parent__ASIL_B + :safety: ASIL_B + :status: valid + +.. feat_req:: Parent requirement ASIL_D + :id: feat_req__parent__ASIL_D + :safety: ASIL_D + :status: valid + + +.. Positive Test: Child requirement QM. Parent requirement has the correct related safety level. Parent requirement is `QM`. +#EXPECT-NOT: feat_req__child__1: parent need `feat_req__parent__QM` does not fulfill condition `safety == QM`. + +.. feat_req:: Child requirement 1 + :id: feat_req__child__1 :safety: QM + :satisfies: feat_req__parent__QM :status: valid -.. Parent requirement has not the correct safety level -#EXPECT: feat_req__child__abce: parent need `feat_req__parent__abcd` does not fulfill condition `{'and': ['safety != QM', 'status == valid']}`. +.. Positive Test: Child requirement ASIL B. Parent requirement has the correct related safety level. Parent requirement is `QM`. +#EXPECT-NOT: feat_req__child__2: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. -.. feat_req:: Child requirement - :id: feat_req__child__abce +.. feat_req:: Child requirement 2 + :id: feat_req__child__2 :safety: ASIL_B + :satisfies: feat_req__parent__ASIL_B + :status: valid + +.. Positive Test: Child requirement ASIL D. Parent requirement has the correct related safety level. Parent requirement is `QM`. +#EXPECT-NOT: feat_req__child__3: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety == QM`. + +.. feat_req:: Child requirement 3 + :id: feat_req__child__3 + :safety: ASIL_D + :satisfies: feat_req__parent__ASIL_D + :status: valid + +.. Negative Test: Child requirement QM. Parent requirement is `ASIL_B`. Child cant fulfill the safety level of the parent. +#EXPECT: feat_req__child__4: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. + +.. comp_req:: Child requirement 4 + :id: feat_req__child__4 + :safety: QM + :satisfies: feat_req__parent__ASIL_B :status: valid - :satisfies: feat_req__parent__abcd -.. feat_req:: Parent requirement 2 - :id: feat_req__parent2__abcd +.. Negative Test: Child requirement QM. Parent requirement is `ASIL_D`. Child cant fulfill the safety level of the parent. +#EXPECT: feat_req__child__5: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety == QM`. + +.. comp_req:: Child requirement 5 + :id: feat_req__child__5 + :safety: QM + :satisfies: feat_req__parent__ASIL_D + :status: valid + +.. Positive Test: Child requirement ASIL_B. Parent requirement has the correct related safety level. Parent requirement is `QM`. +#EXPECT-NOT: feat_req__child__6: parent need `feat_req__parent__QM` does not fulfill condition `safety != ASIL_D`. + +.. feat_req:: Child requirement 6 + :id: feat_req__child__6 :safety: ASIL_B + :satisfies: feat_req__parent__QM :status: valid -.. Parent requirement has the correct safety level -#EXPECT-NOT: feat_req__child2__abce: parent need `feat_req__parent2__abcd` does not fulfill condition +.. Positive Test: Child requirement ASIL_B. Parent requirement has the correct related safety level. Parent requirement is `ASIL_B`. +#EXPECT-NOT: feat_req__child__7: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != ASIL_D`. -.. feat_req:: Child requirement 2 - :id: feat_req__child2__abce +.. feat_req:: Child requirement 7 + :id: feat_req__child__7 + :safety: ASIL_B + :satisfies: feat_req__parent__ASIL_B + :status: valid + +.. Negative Test: Child requirement ASIL_B. Parent requirement is `ASIL_D`. Child cant fulfill the safety level of the parent. +#EXPECT: feat_req__child__8: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != ASIL_D`. + +.. comp_req:: Child requirement 8 + :id: feat_req__child__8 :safety: ASIL_B + :satisfies: feat_req__parent__ASIL_D :status: valid - :satisfies: feat_req__parent__abcd + + .. Parent requirement does not exist -#EXPECT: feat_req__child3__abce: Parent need `feat_req__parent0__abcd` not found in needs_dict. +#EXPECT: feat_req__child__9: Parent need `feat_req__parent0__abcd` not found in needs_dict. -.. feat_req:: Child requirement 3 - :id: feat_req__child3__abce +.. feat_req:: Child requirement 9 + :id: feat_req__child__9 :safety: ASIL_B :status: valid :satisfies: feat_req__parent0__abcd -.. feat_req:: Parent requirement 3 - :id: feat_req__parent3__abcd - :status: invalid -.. Graph check without combined condition (no and or or) -#EXPECT: comp_req__parent4__abcd: parent need `feat_req__parent3__abcd` does not fulfill condition `status == valid`. +.. Mitigation of Safety Analysis (FMEA and DFA) shall be checked. Mitigation shall have the same or higher safety level than the analysed item. +.. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. +#EXPECT: feat_saf_dfa__child__10: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. -.. comp_req:: Child requirement 4 - :id: comp_req__parent4__abcd +.. feat_saf_dfa:: Child requirement 10 + :id: feat_saf_dfa__child__10 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__QM + +.. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. +#EXPECT-NOT: feat_saf_dfa__child__11: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. + +.. feat_saf_dfa:: Child requirement 11 + :id: feat_saf_dfa__child__11 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_B + +.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. +#EXPECT-NOT: feat_saf_dfa__child__12: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. + +.. feat_saf_dfa:: Child requirement 12 + :id: feat_saf_dfa__child__12 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_D + +.. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. +#EXPECT: comp_saf_dfa__child__13: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. + +.. comp_saf_dfa:: Child requirement 13 + :id: comp_saf_dfa__child__13 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__QM + +.. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. +#EXPECT-NOT: comp_saf_dfa__child__14: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. + +.. comp_saf_dfa:: Child requirement 14 + :id: comp_saf_dfa__child__14 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_B + +.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. +#EXPECT-NOT: comp_saf_dfa__child__15: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. + +.. comp_saf_dfa:: Child requirement 15 + :id: comp_saf_dfa__child__15 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_D + +.. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. +#EXPECT: feat_plat_saf_dfa__child__16: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. + +.. feat_plat_saf_dfa:: Child requirement 16 + :id: feat_plat_saf_dfa__child__16 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__QM + +.. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. +#EXPECT-NOT: feat_plat_saf_dfa__child__17: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. + +.. feat_plat_saf_dfa:: Child requirement 17 + :id: feat_plat_saf_dfa__child__17 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_B + +.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. +#EXPECT-NOT: feat_plat_saf_dfa__child__18: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. + +.. feat_plat_saf_dfa:: Child requirement 18 + :id: feat_plat_saf_dfa__child__15 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_D + +.. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. +#EXPECT: feat_saf_fmea__child__19: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. + +.. feat_saf_fmea:: Child requirement 19 + :id: feat_saf_fmea__child__19 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__QM + +.. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. +#EXPECT-NOT: feat_saf_fmea__child__20: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. + +.. feat_saf_fmea:: Child requirement 20 + :id: feat_saf_fmea__child__20 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_B + +.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. +#EXPECT-NOT: feat_saf_fmea__child__21: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. + +.. feat_saf_fmea:: Child requirement 21 + :id: feat_saf_fmea__child__21 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_D + +.. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. +#EXPECT: comp_saf_fmea__child__22: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. + +.. comp_saf_fmea:: Child requirement 22 + :id: comp_saf_fmea__child__22 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__QM + +.. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. +#EXPECT-NOT: comp_saf_fmea__child__23: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. + +.. comp_saf_fmea:: Child requirement 23 + :id: comp_saf_fmea__child__23 + :safety: ASIL_B + :status: valid + :mitigates: feat_req__parent__ASIL_B + +.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. +#EXPECT-NOT: comp_saf_fmea__child__24: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. + +.. comp_saf_fmea:: Child requirement 24 + :id: comp_saf_fmea__child__24 + :safety: ASIL_B :status: valid - :satisfies: feat_req__parent3__abcd + :mitigates: feat_req__parent__ASIL_D diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst index a363f8ac..fd7de88e 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_extra_option.rst @@ -25,3 +25,5 @@ .. std_wp:: This is a test :id: std_wp__test__abce + + diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index c829fe92..016f27f1 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -13,19 +13,22 @@ # ******************************************************************************* #CHECK: check_options + .. Required option: `status` is missing #EXPECT: std_wp__test__abcd: is missing required option: `status`. .. std_wp:: This is a test :id: std_wp__test__abcd + .. All required options are present -#EXPECT-NOT: is missing required option +#EXPECT-NOT: std_wp__test__abcd: is missing required option .. std_wp:: This is a test :id: std_wp__test__abce :status: active + .. Required link `satisfies` refers to wrong requirement type #EXPECT: feat_req__abce.satisfies (['std_wp__test__abce']): does not follow pattern `^stkh_req__.*$`. @@ -58,6 +61,7 @@ .. feat_req:: Child requirement :id: feat_req__abcf + .. All required links are present #EXPECT-NOT: feat_req__abcg: is missing required link @@ -67,3 +71,489 @@ .. stkh_req:: Parent requirement :id: stkh_req__abcd + + +.. Test if the `sufficient` option for Safety Analysis (FMEA and DFA) follows the pattern `^(yes|no)$` +#EXPECT: feat_saf_fmea__test__bad_1.sufficient (QM): does not follow pattern `^(yes|no)$`. + +.. feat_saf_fmea:: This is a test + :id: feat_saf_fmea__test__bad_1 + :sufficient: QM + +#EXPECT-NOT: feat_saf_fmea__test__good_2.sufficient (yes): does not follow pattern `^(yes|no)$`. + +.. feat_saf_fmea:: This is a test + :id: feat_saf_fmea__test__2 + :sufficient: yes + +#EXPECT-NOT: feat_saf_fmea__test__good_3.sufficient (no): does not follow pattern `^(yes|no)$`. + +.. feat_saf_fmea:: This is a test + :id: feat_saf_fmea__test__3 + :sufficient: no + +#EXPECT: comp_saf_fmea__test__bad_4.sufficient (QM): does not follow pattern `^(yes|no)$`. + +.. comp_saf_fmea:: This is a test + :id: comp_saf_fmea__test__bad_4 + :sufficient: QM + +#EXPECT-NOT: comp_saf_fmea__test__good_5.sufficient (yes): does not follow pattern `^(yes|no)$`. + +.. comp_saf_fmea:: This is a test + :id: comp_saf_fmea__test__5 + :sufficient: yes + +#EXPECT-NOT: comp_saf_fmea__test__good_6.sufficient (no): does not follow pattern `^(yes|no)$`. + +.. comp_saf_fmea:: This is a test + :id: comp_saf_fmea__test__6 + :sufficient: no + +#EXPECT: feat_plat_saf_dfa__test__bad_7.sufficient (QM): does not follow pattern `^(yes|no)$`. + +.. feat_plat_saf_dfa:: This is a test + :id: feat_plat_saf_dfa__test__bad_7 + :sufficient: QM + +#EXPECT-NOT: feat_plat_saf_dfa__test__good_8.sufficient (yes): does not follow pattern `^(yes|no)$`. + +.. feat_plat_saf_dfa:: This is a test + :id: feat_plat_saf_dfa__test__8 + :sufficient: yes + +#EXPECT-NOT: feat_plat_saf_dfa__test__good_9.sufficient (no): does not follow pattern `^(yes|no)$`. + +.. feat_plat_saf_dfa:: This is a test + :id: feat_plat_saf_dfa__test__9 + :sufficient: no + +#EXPECT: feat_saf_dfa__test__bad_10.sufficient (QM): does not follow pattern `^(yes|no)$`. + +.. feat_saf_dfa:: This is a test + :id: feat_saf_dfa__test__bad_10 + :sufficient: QM + +#EXPECT-NOT: feat_saf_dfa__test__good_11.sufficient (yes): does not follow pattern `^(yes|no)$`. + +.. feat_saf_dfa:: This is a test + :id: feat_saf_dfa__test__11 + :sufficient: yes + +#EXPECT-NOT: feat_saf_dfa__test__good_12.sufficient (no): does not follow pattern `^(yes|no)$`. + +.. feat_saf_dfa:: This is a test + :id: feat_saf_dfa__test__12 + :sufficient: no + +#EXPECT: comp_saf_dfa__test__bad_13.sufficient (QM): does not follow pattern `^(yes|no)$`. + +.. comp_saf_dfa:: This is a test + :id: comp_saf_dfa__test__bad_13 + :sufficient: QM + +#EXPECT-NOT: comp_saf_dfa__test__good_14.sufficient (yes): does not follow pattern `^(yes|no)$`. + +.. comp_saf_dfa:: This is a test + :id: comp_saf_dfa__test__14 + :sufficient: yes + +#EXPECT-NOT: comp_saf_dfa__test__good_15.sufficient (no): does not follow pattern `^(yes|no)$`. + +.. comp_saf_dfa:: This is a test + :id: comp_saf_dfa__test__15 + :sufficient: no + + +.. Test that the `sufficient` option is case sensitive and does not accept values other than `yes` or `no` +#EXPECT: feat_saf_fmea__test__bad_16.sufficient (yEs): does not follow pattern `^(yes|no)$`. + +.. feat_saf_fmea:: This is a test + :id: feat_saf_fmea__test__bad_16 + :sufficient: yEs + + + +.. comp_req:: Child requirement ASIL_B + :id: comp_req__child__ASIL_B + :safety: ASIL_B + :status: valid + + +.. Negative Test: Linked to a non-allowed requirement type. +#EXPECT: feat_saf_fmea__child__25.mitigates (['comp_req__child__ASIL_B']): does not follow pattern `^(feat_req__.*|aou_req__.*)$`. + +.. feat_saf_fmea:: Child requirement 25 + :id: feat_saf_fmea__child__25 + :safety: ASIL_B + :status: valid + :mitigates: comp_req__child__ASIL_B + + +.. Negative Test: Linked to a non-allowed requirement type. +#EXPECT: feat_saf_fmea__child__26.verifies (['comp_req__child__ASIL_B']): does not follow pattern `^feat_arc_dyn__[0-9a-z_]*$`. + +.. feat_saf_fmea:: Child requirement 26 + :id: feat_saf_fmea__child__26 + :safety: ASIL_B + :status: valid + :verifies: comp_req__child__ASIL_B + + +.. Tests if the attribute `safety` follows the pattern `^(QM|ASIL_B)$` +#EXPECT-NOT: doc__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. document:: This is a test document + :id: doc__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: doc__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. document:: This is a test document + :id: doc__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: doc__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. document:: This is a test document +.. :id: doc__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: stkh_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. stkh_req:: This is a test + :id: stkh_req__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: stkh_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. stkh_req:: This is a test + :id: stkh_req__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: stkh_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. stkh_req:: This is a test +.. :id: stkh_req__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: feat_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. feat_req:: This is a test + :id: feat_req__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: feat_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. feat_req:: This is a test + :id: feat_req__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: feat_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. feat_req:: This is a test +.. :id: feat_req__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: comp_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. comp_req:: This is a test + :id: comp_req__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: comp_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. comp_req:: This is a test + :id: comp_req__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: comp_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. comp_req:: This is a test +.. :id: comp_req__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: tool_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. tool_req:: This is a test + :id: tool_req__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: tool_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. tool_req:: This is a test + :id: tool_req__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: tool_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. tool_req:: This is a test +.. :id: tool_req__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: aou_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. aou_req:: This is a test + :id: aou_req__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: aou_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. aou_req:: This is a test + :id: aou_req__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: aou_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. aou_req:: This is a test +.. :id: aou_req__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: feat_arc_sta__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. feat_arc_sta:: This is a test + :id: feat_arc_sta__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: feat_arc_sta__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. feat_arc_sta:: This is a test + :id: feat_arc_sta__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: feat_arc_sta__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. feat_arc_sta:: This is a test +.. :id: feat_arc_sta__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: feat_arc_dyn__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. feat_arc_dyn:: This is a test + :id: feat_arc_dyn__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: feat_arc_dyn__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. feat_arc_dyn:: This is a test + :id: feat_arc_dyn__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: feat_arc_dyn__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. feat_arc_dyn:: This is a test +.. :id: feat_arc_dyn__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: logic_arc_int__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. logic_arc_int:: This is a test + :id: logic_arc_int__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: logic_arc_int__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. logic_arc_int:: This is a test + :id: logic_arc_int__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: logic_arc_int__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. logic_arc_int:: This is a test +.. :id: logic_arc_int__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: logic_arc_int_op__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. logic_arc_int_op:: This is a test + :id: logic_arc_int_op__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: logic_arc_int_op__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. logic_arc_int_op:: This is a test + :id: logic_arc_int_op__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: logic_arc_int_op__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. logic_arc_int_op:: This is a test +.. :id: logic_arc_int_op__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: comp_arc_sta__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. comp_arc_sta:: This is a test + :id: comp_arc_sta__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: comp_arc_sta__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. comp_arc_sta:: This is a test + :id: comp_arc_sta__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: comp_arc_sta__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. comp_arc_sta:: This is a test +.. :id: comp_arc_sta__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: comp_arc_dyn__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. comp_arc_dyn:: This is a test + :id: comp_arc_dyn__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: comp_arc_dyn__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. comp_arc_dyn:: This is a test + :id: comp_arc_dyn__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: comp_arc_dyn__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. comp_arc_dyn:: This is a test +.. :id: comp_arc_dyn__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: real_arc_int__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. real_arc_int:: This is a test + :id: real_arc_int__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: real_arc_int__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. real_arc_int:: This is a test + :id: real_arc_int__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: real_arc_int__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. real_arc_int:: This is a test +.. :id: real_arc_int__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: real_arc_int_op__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. real_arc_int_op:: This is a test + :id: real_arc_int_op__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: real_arc_int_op__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. real_arc_int_op:: This is a test + :id: real_arc_int_op__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: real_arc_int_op__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. real_arc_int_op:: This is a test +.. :id: real_arc_int_op__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: dd_sta__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. dd_sta:: This is a test + :id: dd_sta__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: dd_sta__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. dd_sta:: This is a test + :id: dd_sta__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: dd_sta__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. dd_sta:: This is a test +.. :id: dd_sta__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: dd_dyn__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. dd_dyn:: This is a test + :id: dd_dyn__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: dd_dyn__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. dd_dyn:: This is a test + :id: dd_dyn__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: dd_dyn__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. dd_dyn:: This is a test +.. :id: dd_dyn__test_bad_1 +.. :status: valid +.. :safety: ASIL_D + +#EXPECT-NOT: sw_unit__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. + +.. sw_unit:: This is a test + :id: sw_unit__test_good_1 + :status: valid + :safety: QM + +#EXPECT-NOT: sw_unit__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. + +.. sw_unit:: This is a test + :id: sw_unit__test_good_2 + :status: valid + :safety: ASIL_B + +.. #EXPECT: sw_unit__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. + +.. .. sw_unit:: This is a test +.. :id: sw_unit__test_bad_1 +.. :status: valid +.. :safety: ASIL_D diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index 24aa324e..f0268ec2 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -185,7 +185,7 @@ def test_rst_files( # Collect the warnings warnings = app.warning.getvalue().splitlines() - # print(f"Warnings: {warnings}") + print(f"Warnings: {warnings}") # Check if the expected warnings are present for warning_info in rst_data.warning_infos: From 98e7276e8ab8316f9367866deb699cc5f038cafd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 14 Jul 2025 13:48:13 +0200 Subject: [PATCH 063/231] Fix SCL test. (#135) Lists were compared with order in mind, not just content. Fixed it, to only compare content, not order. --- src/extensions/score_metamodel/log.py | 2 +- .../score_source_code_linker/tests/test_source_link.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_metamodel/log.py b/src/extensions/score_metamodel/log.py index 2e19c97b..5f61c9ed 100644 --- a/src/extensions/score_metamodel/log.py +++ b/src/extensions/score_metamodel/log.py @@ -63,7 +63,7 @@ def _log_message( if is_info: msg += ( "\nPlease fix this warning related to the new check " - "before the release of the next version of Score." + "before the release of the next version of Docs-As-Code." ) self.info(msg, location) else: diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index ce050960..8771f48d 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -320,7 +320,9 @@ def test_source_link_integration_ok( ws_root, example_source_link_text_all_ok[f"TREQ_ID_{i}"] ) # extra_options are only available at runtime - assert expected_link == need_as_dict["source_code_link"] # type: ignore) + # Compare contents, regardless of order. + actual_source_code_link = cast(list[str], need_as_dict["source_code_link"]) + assert set(expected_link) == set(actual_source_code_link) finally: app.cleanup() From e5c2dc56d6737ca3359332431af0754ecae205e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Volker=20H=C3=A4ussler?= Date: Mon, 14 Jul 2025 14:17:57 +0200 Subject: [PATCH 064/231] Bugfix: Attribute Safety Analysis (#131) * correct DFA mandatory links * changed violation_applicable attribute back to sufficient --- src/extensions/score_metamodel/metamodel.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 4aca501d..399af0b6 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -514,12 +514,15 @@ needs_types: violation_cause: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + optional_options: + sufficient: ^(yes|no)$ mandatory_links: mitigates: ^(feat_req__.*|aou_req__.*)$ - verifies: ^feat_arc_dyn__[0-9a-z_]*$ + verifies: ^feat_arc_sta__[0-9a-z_]*$ optional_links: mitigation_issue: ^https://github.com/.*$ + feat_saf_dfa: title: DFA prefix: feat_saf_dfa__ @@ -531,7 +534,7 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: mitigates: ^(feat_req__.*|aou_req__.*)$ - verifies: ^feat_arc_dyn__[0-9a-z_]*$ + verifies: ^feat_arc_sta__[0-9a-z_]*$ optional_links: mitigation_issue: ^https://github.com/.*$ @@ -546,7 +549,7 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: mitigates: ^(comp_req__.*|aou_req__.*)$ - verifies: ^comp_arc_dyn__[0-9a-z_]*$ + verifies: ^comp_arc_sta__[0-9a-z_]*$ optional_links: mitigation_issue: ^https://github.com/.*$ From 609d4fb25e3baf106e6855f6547a1f5ab33d16d2 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Wed, 16 Jul 2025 09:49:21 +0300 Subject: [PATCH 065/231] Add cli help targets description. (#134) * Add cli help targets description. * tooling: Update versions of used tools Signed-off-by: Nicolae Dicu --- MODULE.bazel | 6 +++--- docs.bzl | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index cae1f74c..5c46f407 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.4.2", + version = "0.4.3", compatibility_level = 0, ) @@ -88,10 +88,10 @@ http_file( ) # Provides, pytest & venv -bazel_dep(name = "score_python_basics", version = "0.3.2") +bazel_dep(name = "score_python_basics", version = "0.3.3") # Checker rule for CopyRight checks/fixes -bazel_dep(name = "score_cr_checker", version = "0.2.2") +bazel_dep(name = "score_cr_checker", version = "0.3.1") # This is only needed to build the examples. diff --git a/docs.bzl b/docs.bzl index f1c9b023..8d7cb482 100644 --- a/docs.bzl +++ b/docs.bzl @@ -124,6 +124,22 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s dependencies = sphinx_requirements + extra_dependencies + ["@rules_python//python/runfiles"] + # Create description tags for the incremental targets. + call_path = native.package_name() + incremental_tag = "cli_help=Build documentation incrementally:\nbazel run //" + call_path + ":" + incremental_name + + if incremental_name == "incremental_latest": + incremental_tag = ( + "cli_help=Build documentation incrementally (use current main branch of imported docs repositories " + + "(e.g. process_description)):\n" + + "bazel run //" + call_path + ":incremental_latest" + ) + elif incremental_name == "incremental_release": + incremental_tag = ( + "cli_help=Build documentation incrementally (use release version imported in MODULE.bazel):\n" + + "bazel run //" + call_path + ":incremental_release" + ) + py_binary( name = incremental_name, srcs = ["@score_docs_as_code//src:incremental.py"], @@ -137,6 +153,7 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s "EXTERNAL_NEEDS_INFO": json.encode(external_needs_def), "ACTION": "incremental", }, + tags = [incremental_tag], ) py_binary( @@ -154,10 +171,15 @@ def _incremental(incremental_name = "incremental", live_name = "live_preview", s ) def _ide_support(extra_dependencies): + call_path = native.package_name() score_virtualenv( name = "ide_support", venv_name = ".venv_docs", reqs = sphinx_requirements + extra_dependencies, + tags = [ + "cli_help=Create virtual environment for documentation:\n" + + "bazel run //" + call_path + ":ide_support", + ], ) def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = list(), external_needs_def = list()): From 8dac10b3f2656ca27e62127cc6d38e7334e41d20 Mon Sep 17 00:00:00 2001 From: Markus Schu <142009492+masc2023@users.noreply.github.com> Date: Wed, 16 Jul 2025 11:46:04 +0200 Subject: [PATCH 066/231] add new need doc_tool (#137) doc_tool added for tool verification reports updated document with optional_links and moved realizes under this category --- MODULE.bazel | 2 +- src/extensions/score_metamodel/metamodel.yaml | 22 ++++++++++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 5c46f407..99f6c852 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -97,4 +97,4 @@ bazel_dep(name = "score_cr_checker", version = "0.3.1") # Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") -bazel_dep(name = "score_process", version = "1.0.2") +bazel_dep(name = "score_process", version = "1.0.4") diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 399af0b6..25e3cab6 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -166,7 +166,7 @@ needs_types: optional_links: contains: ^rl__.*$ - # Documents + # Documents, process_description only doc_concept: title: Concept Definition prefix: doc_concept__ @@ -181,6 +181,7 @@ needs_types: id: ^doc_getstrt__[0-9a-z_]*$ status: ^(valid|draft)$ + # Documents, score, and other modules only document: title: Generic Document prefix: doc__ @@ -190,6 +191,20 @@ needs_types: optional_options: safety: "^(QM|ASIL_B)$" security: "^(YES|NO)$" + optional_links: + realizes: "^wp__.+$" + + doc_tool: + title: Tool Verification Report + prefix: doc_tool__ + mandatory_options: + id: ^doc_tool__[0-9a-z_]*$ + status: ^(draft|evaluated|qualified|released|rejected)$ + version: ^.*$ + safety_affected: "^(YES|NO)$" + security_affected: "^(YES|NO)$" + tcl: "^(LOW|HIGH)$" + optional_links: realizes: "^wp__.+$" # Requirements @@ -628,6 +643,11 @@ needs_extra_links: incoming: complies to outgoing: complies + # document, doc_tool + realizes: + incoming: realized by + outgoing: realizes + ############################################################## # S-CORE Metamodel ############################################################## From e866db56a75c00ed8d629eee63bbce7763a99e32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 17 Jul 2025 09:22:04 +0200 Subject: [PATCH 067/231] Adding doc_tool to exception list (#139) --- src/extensions/score_metamodel/checks/attributes_format.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 55992d8b..b4d09bfe 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -41,6 +41,7 @@ def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): if need["type"] in [ "std_wp", "document", # This is used in 'platform_managment' in score. + "doc_tool", "gd_guidl", "workflow", "gd_chklst", From 2ce245b943bcb64afc2330f9ba33dc259e052d6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 17 Jul 2025 09:31:06 +0200 Subject: [PATCH 068/231] Increase version of module (#141) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 99f6c852..000db285 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.4.3", + version = "0.4.4", compatibility_level = 0, ) From c3065ed2033d5d892b9234ddbae976be90e87154 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 22 Jul 2025 16:05:42 +0200 Subject: [PATCH 069/231] Fixing current BUILD errors (#153) * Fixing current BUILD errors * Fixed ASIL_D references. * Fixed dead links. * Enabled disallowing of ASIL_D. * Out comment DEBUG level logger --- docs/how-to-integrate/example/index.rst | 2 +- .../example/testing/index.rst | 2 +- docs/product/requirements.rst | 23 ++-- examples/linking-both/index.rst | 2 +- examples/linking-both/testing/test.rst | 2 +- .../score_metamodel/checks/check_options.py | 5 +- src/extensions/score_metamodel/metamodel.yaml | 5 +- .../tests/rst/graph/test_metamodel_graph.rst | 85 +-------------- .../rst/options/test_options_options.rst | 102 ------------------ .../score_source_code_linker/__init__.py | 20 ++-- src/find_runfiles/__init__.py | 2 +- 11 files changed, 36 insertions(+), 214 deletions(-) diff --git a/docs/how-to-integrate/example/index.rst b/docs/how-to-integrate/example/index.rst index e7e0339a..622d8afa 100644 --- a/docs/how-to-integrate/example/index.rst +++ b/docs/how-to-integrate/example/index.rst @@ -39,7 +39,7 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs :id: tool_req__example__some_title :reqtype: Process :security: YES - :safety: ASIL_D + :safety: ASIL_B :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid diff --git a/docs/how-to-integrate/example/testing/index.rst b/docs/how-to-integrate/example/testing/index.rst index 353694d4..d2fcd839 100644 --- a/docs/how-to-integrate/example/testing/index.rst +++ b/docs/how-to-integrate/example/testing/index.rst @@ -33,7 +33,7 @@ This example will help catch things and bugs when rst's are defined inside a fol :id: tool_req__testing__some_title :reqtype: Process :security: YES - :safety: ASIL_D + :safety: ASIL_B :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid diff --git a/docs/product/requirements.rst b/docs/product/requirements.rst index b9e4150d..edf39d69 100644 --- a/docs/product/requirements.rst +++ b/docs/product/requirements.rst @@ -91,7 +91,7 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_title :implemented: PARTIAL :tags: Common Attributes - :satisfies: PROCESS_gd_req__requirements_attr_title + :satisfies: PROCESS_gd_req__req__attr_title :parent_covered: NO: Can not ensure summary @@ -123,8 +123,8 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :tags: Common Attributes :satisfies: - PROCESS_gd_req__requirements_attr_security, - PROCESS_gd_req__arch_attr_security, + PROCESS_gd_req__req__attr_security, + PROCESS_gd_req__arch__attr_security, Docs-as-Code shall enforce that the ``security`` attribute has one of the following values: @@ -154,7 +154,7 @@ This section provides an overview of current process requirements and their clar * QM * ASIL_B - * ASIL_D + This rule applies to: @@ -204,9 +204,9 @@ This section provides an overview of current process requirements and their clar :tags: Documents :implemented: NO :satisfies: - PROCESS_gd_req__doc_author, - PROCESS_gd_req__doc_approver, - PROCESS_gd_req__doc_reviewer, + PROCESS_gd_req__doc__author, + PROCESS_gd_req__doc__approver, + PROCESS_gd_req__doc__reviewer, :parent_covered: NO Docs-as-Code shall enforce that each :need:`tool_req__docs_doc_types` has the @@ -221,7 +221,7 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_doc_attr_author_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc_author + :satisfies: PROCESS_gd_req__doc__author :parent_covered: YES: Together with tool_req__docs_doc_attr :status: invalid @@ -239,7 +239,7 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_doc_attr_approver_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc_approver + :satisfies: PROCESS_gd_req__doc__approver :parent_covered: YES: Together with tool_req__docs_doc_attr Docs-as-Code shall provide an automatic mechanism to determine the document approver. @@ -252,7 +252,7 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_doc_attr_reviewer_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc_reviewer + :satisfies: PROCESS_gd_req__doc__reviewer :parent_covered: YES: Together with tool_req__docs_doc_attr Docs-as-Code shall provide an automatic mechanism to determine the document reviewers. @@ -566,7 +566,7 @@ This section provides an overview of current process requirements and their clar :tags: Tool Verification Reports :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__tool_attr_security_affected + :satisfies: PROCESS_gd_req__tool__attr_security_affected Docs-as-Code shall enforce that every Tool Verification Report includes a ``security_affected`` attribute with one of the following values: @@ -574,6 +574,7 @@ This section provides an overview of current process requirements and their clar * YES * NO + .. tool_req:: Enforce status classification :id: tool_req__docs_tvr_status :tags: Tool Verification Reports diff --git a/examples/linking-both/index.rst b/examples/linking-both/index.rst index 24f0d55c..b2dbbb76 100644 --- a/examples/linking-both/index.rst +++ b/examples/linking-both/index.rst @@ -42,7 +42,7 @@ This is a simple example of a documentation page using the `docs` tool. :id: tool_req__index__some_title :reqtype: Process :security: YES - :safety: ASIL_D + :safety: ASIL_B :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid diff --git a/examples/linking-both/testing/test.rst b/examples/linking-both/testing/test.rst index d5b2ecef..e9d0e63a 100644 --- a/examples/linking-both/testing/test.rst +++ b/examples/linking-both/testing/test.rst @@ -33,7 +33,7 @@ This example will help catch things and bugs when rst's are defined inside a fol :id: tool_req__testing__some_title :reqtype: Process :security: YES - :safety: ASIL_D + :safety: ASIL_B :satisfies: PROCESS_gd_req__req__attr_uid :status: invalid diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 57e0c073..ca675cd5 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -82,10 +82,7 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: try: if not re.match(pattern, value): log.warning_for_option( - need, - field, - f"does not follow pattern `{pattern}`.", - new_check="ASIL_D" in value, + need, field, f"does not follow pattern `{pattern}`." ) except TypeError: log.warning_for_option( diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 25e3cab6..3eefe231 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -720,7 +720,10 @@ graph_checks: include: comp_req, feat_req condition: safety == ASIL_B check: - satisfies: safety != ASIL_D + satisfies: + or: + - safety == ASIL_B + - safety == QM # saf - ID gd_req__saf_linkage_safety # It shall be checked that Safety Analysis (DFA and FMEA) can only be linked via mitigate against # - Requirements with the same ASIL or diff --git a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst index b193749e..49c51f07 100644 --- a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst +++ b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst @@ -28,10 +28,6 @@ :safety: ASIL_B :status: valid -.. feat_req:: Parent requirement ASIL_D - :id: feat_req__parent__ASIL_D - :safety: ASIL_D - :status: valid .. Positive Test: Child requirement QM. Parent requirement has the correct related safety level. Parent requirement is `QM`. @@ -52,14 +48,6 @@ :satisfies: feat_req__parent__ASIL_B :status: valid -.. Positive Test: Child requirement ASIL D. Parent requirement has the correct related safety level. Parent requirement is `QM`. -#EXPECT-NOT: feat_req__child__3: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety == QM`. - -.. feat_req:: Child requirement 3 - :id: feat_req__child__3 - :safety: ASIL_D - :satisfies: feat_req__parent__ASIL_D - :status: valid .. Negative Test: Child requirement QM. Parent requirement is `ASIL_B`. Child cant fulfill the safety level of the parent. #EXPECT: feat_req__child__4: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. @@ -70,41 +58,7 @@ :satisfies: feat_req__parent__ASIL_B :status: valid -.. Negative Test: Child requirement QM. Parent requirement is `ASIL_D`. Child cant fulfill the safety level of the parent. -#EXPECT: feat_req__child__5: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety == QM`. - -.. comp_req:: Child requirement 5 - :id: feat_req__child__5 - :safety: QM - :satisfies: feat_req__parent__ASIL_D - :status: valid - -.. Positive Test: Child requirement ASIL_B. Parent requirement has the correct related safety level. Parent requirement is `QM`. -#EXPECT-NOT: feat_req__child__6: parent need `feat_req__parent__QM` does not fulfill condition `safety != ASIL_D`. - -.. feat_req:: Child requirement 6 - :id: feat_req__child__6 - :safety: ASIL_B - :satisfies: feat_req__parent__QM - :status: valid - -.. Positive Test: Child requirement ASIL_B. Parent requirement has the correct related safety level. Parent requirement is `ASIL_B`. -#EXPECT-NOT: feat_req__child__7: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != ASIL_D`. - -.. feat_req:: Child requirement 7 - :id: feat_req__child__7 - :safety: ASIL_B - :satisfies: feat_req__parent__ASIL_B - :status: valid - -.. Negative Test: Child requirement ASIL_B. Parent requirement is `ASIL_D`. Child cant fulfill the safety level of the parent. -#EXPECT: feat_req__child__8: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != ASIL_D`. -.. comp_req:: Child requirement 8 - :id: feat_req__child__8 - :safety: ASIL_B - :satisfies: feat_req__parent__ASIL_D - :status: valid @@ -137,14 +91,6 @@ :status: valid :mitigates: feat_req__parent__ASIL_B -.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. -#EXPECT-NOT: feat_saf_dfa__child__12: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. - -.. feat_saf_dfa:: Child requirement 12 - :id: feat_saf_dfa__child__12 - :safety: ASIL_B - :status: valid - :mitigates: feat_req__parent__ASIL_D .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. #EXPECT: comp_saf_dfa__child__13: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. @@ -164,14 +110,6 @@ :status: valid :mitigates: feat_req__parent__ASIL_B -.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. -#EXPECT-NOT: comp_saf_dfa__child__15: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. - -.. comp_saf_dfa:: Child requirement 15 - :id: comp_saf_dfa__child__15 - :safety: ASIL_B - :status: valid - :mitigates: feat_req__parent__ASIL_D .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. #EXPECT: feat_plat_saf_dfa__child__16: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. @@ -191,14 +129,6 @@ :status: valid :mitigates: feat_req__parent__ASIL_B -.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. -#EXPECT-NOT: feat_plat_saf_dfa__child__18: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. - -.. feat_plat_saf_dfa:: Child requirement 18 - :id: feat_plat_saf_dfa__child__15 - :safety: ASIL_B - :status: valid - :mitigates: feat_req__parent__ASIL_D .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. #EXPECT: feat_saf_fmea__child__19: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. @@ -218,14 +148,15 @@ :status: valid :mitigates: feat_req__parent__ASIL_B + .. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. -#EXPECT-NOT: feat_saf_fmea__child__21: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. +#EXPECT-NOT: feat_saf_fmea__child__21: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. .. feat_saf_fmea:: Child requirement 21 :id: feat_saf_fmea__child__21 - :safety: ASIL_B + :safety: QM :status: valid - :mitigates: feat_req__parent__ASIL_D + :mitigates: feat_req__parent__ASIL_B .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. #EXPECT: comp_saf_fmea__child__22: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. @@ -245,11 +176,3 @@ :status: valid :mitigates: feat_req__parent__ASIL_B -.. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. -#EXPECT-NOT: comp_saf_fmea__child__24: parent need `feat_req__parent__ASIL_D` does not fulfill condition `safety != QM`. - -.. comp_saf_fmea:: Child requirement 24 - :id: comp_saf_fmea__child__24 - :safety: ASIL_B - :status: valid - :mitigates: feat_req__parent__ASIL_D diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index 016f27f1..6ec03192 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -215,12 +215,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: doc__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. document:: This is a test document -.. :id: doc__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: stkh_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -236,12 +230,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: stkh_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. stkh_req:: This is a test -.. :id: stkh_req__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: feat_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -257,12 +245,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: feat_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. feat_req:: This is a test -.. :id: feat_req__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: comp_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -278,12 +260,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: comp_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. comp_req:: This is a test -.. :id: comp_req__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: tool_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -299,12 +275,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: tool_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. tool_req:: This is a test -.. :id: tool_req__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: aou_req__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -320,12 +290,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: aou_req__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. aou_req:: This is a test -.. :id: aou_req__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: feat_arc_sta__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -341,12 +305,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: feat_arc_sta__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. feat_arc_sta:: This is a test -.. :id: feat_arc_sta__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: feat_arc_dyn__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -362,12 +320,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: feat_arc_dyn__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. feat_arc_dyn:: This is a test -.. :id: feat_arc_dyn__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: logic_arc_int__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -383,12 +335,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: logic_arc_int__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. logic_arc_int:: This is a test -.. :id: logic_arc_int__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: logic_arc_int_op__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -404,12 +350,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: logic_arc_int_op__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. logic_arc_int_op:: This is a test -.. :id: logic_arc_int_op__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: comp_arc_sta__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -425,12 +365,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: comp_arc_sta__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. comp_arc_sta:: This is a test -.. :id: comp_arc_sta__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: comp_arc_dyn__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -446,12 +380,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: comp_arc_dyn__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. comp_arc_dyn:: This is a test -.. :id: comp_arc_dyn__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: real_arc_int__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -467,12 +395,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: real_arc_int__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. real_arc_int:: This is a test -.. :id: real_arc_int__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: real_arc_int_op__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -488,12 +410,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: real_arc_int_op__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. real_arc_int_op:: This is a test -.. :id: real_arc_int_op__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: dd_sta__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -509,12 +425,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: dd_sta__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. dd_sta:: This is a test -.. :id: dd_sta__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: dd_dyn__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -530,12 +440,6 @@ :status: valid :safety: ASIL_B -.. #EXPECT: dd_dyn__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. dd_dyn:: This is a test -.. :id: dd_dyn__test_bad_1 -.. :status: valid -.. :safety: ASIL_D #EXPECT-NOT: sw_unit__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -551,9 +455,3 @@ :status: valid :safety: ASIL_B -.. #EXPECT: sw_unit__test_bad_1.safety (ASIL_D): does not follow pattern `^(QM|ASIL_B)$`. - -.. .. sw_unit:: This is a test -.. :id: sw_unit__test_bad_1 -.. :status: valid -.. :safety: ASIL_D diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index aebac616..3ea1d3b3 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -16,8 +16,6 @@ """ import subprocess -import os -from pprint import pprint from collections import defaultdict from copy import deepcopy from pathlib import Path @@ -41,7 +39,8 @@ ) LOGGER = get_logger(__name__) -LOGGER.setLevel("DEBUG") +# Outcomment this to enable more verbose logging +# LOGGER.setLevel("DEBUG") def get_cache_filename(build_dir: Path) -> Path: @@ -56,9 +55,11 @@ def setup_once(app: Sphinx, config: Config): # might be the only way to solve this? if "skip_rescanning_via_source_code_linker" in app.config: return - print(f"DEBUG: Workspace root is {find_ws_root()}") - print(f"DEBUG: Current working directory is {Path('.')} = {Path('.').resolve()}") - print(f"DEBUG: Git root is {find_git_root()}") + LOGGER.debug(f"DEBUG: Workspace root is {find_ws_root()}") + LOGGER.debug( + f"DEBUG: Current working directory is {Path('.')} = {Path('.').resolve()}" + ) + LOGGER.debug(f"DEBUG: Git root is {find_git_root()}") # Run only for local files! # ws_root is not set when running on external repositories (dependencies). @@ -235,7 +236,6 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: env: Buildenvironment, this is filled automatically app: Sphinx app application, this is filled automatically """ - print("inject_links_into_needs!!!!") ws_root = find_ws_root() assert ws_root @@ -247,9 +247,9 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: ) # TODO: why do we create a copy? Can we also needs_copy = needs[:]? copy(needs)? for id, need in needs.items(): - if need["source_code_link"]: - print( - f"?? Need {need['id']} already has source_code_link: {need['source_code_link']}" + if need.get("source_code_link"): + LOGGER.debug( + f"?? Need {need['id']} already has source_code_link: {need.get('source_code_link')}" ) source_code_links = load_source_code_links_json(get_cache_filename(app.outdir)) diff --git a/src/find_runfiles/__init__.py b/src/find_runfiles/__init__.py index 4a87da6b..a1aec645 100644 --- a/src/find_runfiles/__init__.py +++ b/src/find_runfiles/__init__.py @@ -16,7 +16,7 @@ from pathlib import Path logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) +# logger.setLevel(logging.DEBUG) def _log_debug(message: str): From a7bea320f926ea4f741ec085288109f9eba02af8 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Tue, 22 Jul 2025 19:27:25 +0200 Subject: [PATCH 070/231] Expand the check of stop words in the title to be for all requirements (#149) * Expand the check of stop words in the title to be for all requirements * Remove redundancy of defining types in the metamodel and add tags to differentiate them * Add 2 test cases to cover architecture element scenarios --- src/extensions/score_metamodel/__init__.py | 4 ++-- .../checks/attributes_format.py | 15 +++++++++++- src/extensions/score_metamodel/metamodel.yaml | 24 +++++++++++++++++++ .../test_attributes_format_title.rst | 12 ++++++++++ 4 files changed, 52 insertions(+), 3 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index a19ce1f9..8365e47a 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -145,7 +145,6 @@ def load_metamodel_data(): global_base_options = data.get("needs_types_base_options", {}) global_base_options_optional_opts = global_base_options.get("optional_options", {}) - # Get the list of stop-words and weak-words # Get the stop_words and weak_words as separate lists stop_words_list = global_base_options.get("prohibited_words", {}).get("title", []) weak_words_list = global_base_options.get("prohibited_words", {}).get("content", []) @@ -173,6 +172,8 @@ def load_metamodel_data(): # Store mandatory_options and optional_options directly as a dict mandatory_options = directive_data.get("mandatory_options", {}) one_type["mandatory_options"] = mandatory_options + tags = directive_data.get("tags", []) + one_type["tags"] = tags optional_options = directive_data.get("optional_options", {}) optional_options.update(global_base_options_optional_opts) @@ -299,7 +300,6 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.graph_checks = metamodel["needs_graph_check"] app.config.stop_words = metamodel["stop_words"] app.config.weak_words = metamodel["weak_words"] - # Ensure that 'needs.json' is always build. app.config.needs_build_json = True app.config.needs_reproducible_json = True diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index b4d09bfe..288d199e 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -16,6 +16,14 @@ from sphinx_needs.data import NeedsInfoType +def get_need_type(needs_types: list[str], directive: str) -> str: + for need_type in needs_types: + assert isinstance(need_type, dict), need_type + if need_type["directive"] == directive: + return need_type + raise ValueError(f"Need type {directive} not found in needs_types") + + # req-#id: gd_req__req__attr_uid @local_check def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): @@ -96,7 +104,12 @@ def check_title(app: Sphinx, need: NeedsInfoType, log: CheckLogger): This helps enforce clear and concise naming conventions. """ stop_words = app.config.stop_words - if need["type"] in ["stkh_req", "feat_req", "comp_req"]: + need_options = get_need_type(app.config.needs_types, need["type"]) + + if any( + tag in need_options.get("tags", []) + for tag in ["architecture_element", "requirement"] + ): for word in stop_words: if word in need["title"]: msg = ( diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 3eefe231..ecc1fa82 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -73,6 +73,8 @@ needs_types: status: ^(valid)$ optional_links: links: ^.*$ + tags: + - requirement std_wp: title: Standard Work Product @@ -111,6 +113,8 @@ needs_types: complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: ^wf__.*$ + tags: + - requirement gd_temp: title: Process Template @@ -229,6 +233,8 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ + tags: + - requirement # req-Id: tool_req__docs_req_types feat_req: @@ -253,6 +259,8 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ + tags: + - requirement # req-Id: tool_req__docs_req_types comp_req: @@ -276,6 +284,8 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ + tags: + - requirement # req-Id: tool_req__docs_req_types tool_req: @@ -303,6 +313,8 @@ needs_types: implemented: ^(YES|PARTIAL|NO)$ parent_covered: ^.*$ parent_has_problem: ^.*$ + tags: + - requirement # req-Id: tool_req__docs_req_types aou_req: @@ -325,6 +337,8 @@ needs_types: hash: ^.*$ optional_links: mitigates: ^.*$ + tags: + - requirement # Architecture @@ -342,6 +356,8 @@ needs_types: includes: ^logic_arc_int(_op)*__.+$ optional_links: fulfils: ^feat_req__.+$ + tags: + - architecture_element feat_arc_dyn: title: Feature Architecture Dynamic View @@ -369,6 +385,8 @@ needs_types: optional_links: includes: ^logic_arc_int_op__.+$ fulfils: ^comp_req__.+$ + tags: + - architecture_element logic_arc_int_op: title: Logical Architecture Interface Operation @@ -382,6 +400,8 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: included_by: ^logic_arc_int__.+$ + tags: + - architecture_element mod_view_sta: title: Module Architecture Static View @@ -416,6 +436,8 @@ needs_types: includes: ^comp_arc_sta__.+$ uses: ^real_arc_int(_op)*__.+$ fulfils: ^comp_req__.+$ + tags: + - architecture_element comp_arc_dyn: title: Component Architecture Dynamic View @@ -429,6 +451,8 @@ needs_types: status: ^(valid|invalid)$ optional_links: fulfils: ^comp_req__.+$ + tags: + - architecture_element real_arc_int: title: Component Architecture Interfaces diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst index 41a46322..de3ae097 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst @@ -25,6 +25,18 @@ .. feat_req:: This is a test :id: feat_req__test__abce +.. Title of an architecture element contains a stop word +#EXPECT: logic_arc_int__test__abcd.title (This must work): contains a stop word: `must`. + +.. logic_arc_int:: This must work + :id: logic_arc_int__test__abcd + +.. Title of an architecture element contains no stop word +#EXPECT-NOT: logic_arc_int__test__abce.title (This is a test): contains a stop word + +.. logic_arc_int:: This is a test + :id: logic_arc_int__test__abce + .. Title of requirement of type std_wp is not checked for stop words #EXPECT-NOT: std_wp__test__abce.title (This must work): contains a stop word: `must`. From bae447dcc69e8c7726fee8420d048a914c155e19 Mon Sep 17 00:00:00 2001 From: Oliver Pajonk Date: Wed, 23 Jul 2025 10:55:12 +0200 Subject: [PATCH 071/231] Add DevContainer Support (#151) - added devcontainer.json - bumped Bazel version from 7.4.0 to 7.5.0 (the one present in the container) - added a small README note --- .bazelversion | 2 +- .devcontainer/devcontainer.json | 5 +++++ README.md | 4 ++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 .devcontainer/devcontainer.json diff --git a/.bazelversion b/.bazelversion index ba7f754d..18bb4182 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -7.4.0 +7.5.0 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..3a0ec07d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,5 @@ +{ + "name": "eclipse-s-core", + "image": "ghcr.io/eclipse-score/devcontainer:latest", + "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel" +} diff --git a/README.md b/README.md index d3281d92..2331f1c3 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,10 @@ Docs-as-code tooling for Eclipse S-CORE The S-CORE docs Sphinx configuration and build code. +> [!NOTE] +> This repository offers a [DevContainer](https://containers.dev/). +> For setting this up read [eclipse-score/devcontainer/README.md#inside-the-container](https://github.com/eclipse-score/devcontainer/blob/main/README.md#inside-the-container). + ## Building documentation #### Run a documentation build: From 2d193476fb42bf6d2cfee91fa230a4370cd81696 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Wed, 23 Jul 2025 11:19:33 +0200 Subject: [PATCH 072/231] =?UTF-8?q?Add=20Process=20Overview=20Table=20for?= =?UTF-8?q?=20Process=E2=80=93Tool=20Requirements=20Mapping=20(#133)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add Process overview table to the template --- docs/product/index.rst | 6 ++++++ docs/product/process_overview.rst | 10 ++++++++++ 2 files changed, 16 insertions(+) create mode 100644 docs/product/process_overview.rst diff --git a/docs/product/index.rst b/docs/product/index.rst index 00b7b660..e6f54bfd 100644 --- a/docs/product/index.rst +++ b/docs/product/index.rst @@ -42,6 +42,11 @@ Docs-as-Code Head over to our extensions to learn about what we offer and how to configure,extend or integrate them. :ref:`See our extensions here ` + .. grid-item-card:: + + Process requirements overview + ^^^ + See the :ref:`process_overview` table for a process requirements overview. .. toctree:: @@ -52,3 +57,4 @@ Docs-as-Code capabilities requirements extensions/index + process_overview diff --git a/docs/product/process_overview.rst b/docs/product/process_overview.rst new file mode 100644 index 00000000..64e120eb --- /dev/null +++ b/docs/product/process_overview.rst @@ -0,0 +1,10 @@ +.. _process_overview: + +=============================== +Process Requirements Overview +=============================== + +.. needtable:: + :types: tool_req + :columns: satisfies as "Process Requirement" ;id as "Tool Requirement";implemented;source_code_link + :style: table From 5c669283ccf2eceb3eb0d1c016730b9da0d1448b Mon Sep 17 00:00:00 2001 From: Oliver Pajonk Date: Wed, 23 Jul 2025 11:42:57 +0200 Subject: [PATCH 073/231] Automatically Execute ide_support Command (#157) * auto-execute ide_support command --- .devcontainer/devcontainer.json | 3 ++- .vscode/settings.json | 1 + README.md | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 3a0ec07d..affe8541 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,5 +1,6 @@ { "name": "eclipse-s-core", "image": "ghcr.io/eclipse-score/devcontainer:latest", - "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel" + "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel", + "postCreateCommand": "bazel run //src:ide_support" } diff --git a/.vscode/settings.json b/.vscode/settings.json index 478df016..a50be5ac 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -23,6 +23,7 @@ }, // // + "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", "python.testing.pytestArgs": [ ".", "--ignore-glob=bazel-*/*", diff --git a/README.md b/README.md index 2331f1c3..2fbc7abe 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ bazel run //docs:incremental_latest #### Getting IDE support -Create the virtual environment via `bazel run //process:ide_support`.\ +Create the virtual environment via `bazel run //src:ide_support`.\ If your IDE does not automatically ask you to activate the newly created environment you can activate it. - In VSCode via `ctrl+p` => `Select Python Interpreter` then select `.venv/bin/python` From 73fd2ee9ba05997d454e13fcd75357f5184987f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 23 Jul 2025 12:52:58 +0200 Subject: [PATCH 074/231] consumer tests (#129) --- .github/workflows/consumer_test.yml | 28 ++ src/requirements.in | 1 + src/requirements.txt | 18 +- src/tests/test_consumer.py | 463 ++++++++++++++++++++++++++++ 4 files changed, 504 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/consumer_test.yml create mode 100644 src/tests/test_consumer.py diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml new file mode 100644 index 00000000..a92bd736 --- /dev/null +++ b/.github/workflows/consumer_test.yml @@ -0,0 +1,28 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Run Consumer Tests on Comment +on: issue_comment + +jobs: + consumer_test: + if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, '/consumer-test') }} + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + + - name: Run consumer tests + run: | + bazel run //src:ide_support + .venv/bin/python -m pytest -s -v src/tests/ diff --git a/src/requirements.in b/src/requirements.in index c2f81393..d798d7f5 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -22,3 +22,4 @@ esbonio<1 # Although not required in all targets, we want pytest within ide_support to run tests from the IDE. debugpy +rich diff --git a/src/requirements.txt b/src/requirements.txt index 076f2dc3..3bfc13bb 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -37,7 +37,7 @@ babel==2.17.0 \ basedpyright==1.29.2 \ --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d - # via -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # via -r external/score_python_basics~/requirements.txt beautifulsoup4==4.13.4 \ --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 @@ -438,7 +438,7 @@ iniconfig==2.1.0 \ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # -r external/score_python_basics~/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ @@ -546,6 +546,7 @@ markdown-it-py==3.0.0 \ # via # mdit-py-plugins # myst-parser + # rich markupsafe==3.0.2 \ --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ @@ -668,7 +669,7 @@ nodejs-wheel-binaries==22.16.0 \ --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # -r external/score_python_basics~/requirements.txt # basedpyright numpy==2.2.5 \ --hash=sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70 \ @@ -733,7 +734,7 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # -r external/score_python_basics~/requirements.txt # matplotlib # pytest # sphinx @@ -828,7 +829,7 @@ pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via - # -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # -r external/score_python_basics~/requirements.txt # pytest pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -852,6 +853,7 @@ pygments==2.19.1 \ # via # accessible-pygments # pydata-sphinx-theme + # rich # sphinx pyjwt[crypto]==2.10.1 \ --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ @@ -880,7 +882,7 @@ pyspellchecker==0.8.2 \ pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r /home/maxi/.cache/bazel/_bazel_maxi/f48cff2378be6375aa481f2a5c918c10/external/score_python_basics~/requirements.txt + # via -r external/score_python_basics~/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 @@ -958,6 +960,10 @@ requests-file==2.1.0 \ --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c # via sphinx-needs +rich==14.0.0 \ + --hash=sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0 \ + --hash=sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725 + # via -r src/requirements.in roman-numerals-py==3.1.0 \ --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py new file mode 100644 index 00000000..bba49a92 --- /dev/null +++ b/src/tests/test_consumer.py @@ -0,0 +1,463 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import subprocess +import pytest +import os +import re +import logging + + +from pathlib import Path +from rich import print +from rich.table import Table +from collections import defaultdict +from pytest import TempPathFactory +from dataclasses import dataclass, field + +from src.extensions.score_source_code_linker.generate_source_code_links_json import ( + find_git_root, +) +from src.extensions.score_source_code_linker import get_github_base_url + +""" +This script's main usecase is to test consumers of Docs-As-Code with the new changes made in PR's. +This enables us to find new issues and problems we introduce with changes that we otherwise would only know much later. +There are several things to note. + +- The `print` function has been overwritten by the 'rich' package to allow for richer text output. +- The script itself takes quiet a bit of time, roughly 5+ min for a full run. +- If you need more output, enable it via `-v` or `-vv` +- Start the script via the following command: + - bazel run //src:ide_support + - .venv/bin/python -m pytest -s src/tests (If you need more verbosity add `-v` or `-vv`) +""" + +# Max width of the printout +len_max = 80 + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel("DEBUG") + + +@dataclass +class ConsumerRepo: + name: str + git_url: str + commands: list[str] + test_commands: list[str] + + +@dataclass +class BuildOutput: + returncode: int + stdout: str + stderr: str + warnings: dict[str, list[str]] = field(default_factory=dict) + + +@dataclass +class Result: + repo: str + cmd: str + local_or_git: str + passed: bool + reason: str + + +REPOS_TO_TEST: list[ConsumerRepo] = [ + ConsumerRepo( + name="process_description", + git_url="git@github.com:eclipse-score/process_description.git", + commands=["bazel run //process:incremental_latest"], + test_commands=[], + ), + ConsumerRepo( + name="score", + git_url="git@github.com:eclipse-score/score.git", + commands=[ + "bazel run //docs:incremental_latest", + "bazel run //docs:ide_support", + "bazel run //docs:incremental_release", + "bazel build //docs:docs_release", + "bazel build //docs:docs_latest", + ], + test_commands=[], + ), + ConsumerRepo( + name="module_template", + git_url="git@github.com:eclipse-score/module_template.git", + commands=[ + "bazel run //docs:ide_support", + "bazel run //docs:incremental", + "bazel build //docs:docs", + ], + test_commands=[ + "bazel test //tests/...", + ], + ), +] + + +@pytest.fixture(scope="session") +def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: + return tmp_path_factory.mktemp("testing_dir") + + +def get_current_git_commit(curr_path: Path): + """ + Get the current git commit hash (HEAD). + """ + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + text=True, + check=True, + cwd=curr_path, + ) + return result.stdout.strip() + + +def replace_bazel_dep_with_local_override(module_content: str) -> str: + """ """ + + # Pattern to match the bazel_dep line + pattern = rf'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' + + # Replacement with local_path_override + replacement = f"""bazel_dep(name = "score_docs_as_code", version = "0.0.0") +local_path_override( + module_name = "score_docs_as_code", + path = "../docs_as_code" +)""" + + modified_content = re.sub(pattern, replacement, module_content) + + return modified_content + + +def replace_bazel_dep_with_git_override( + module_content: str, git_hash: str, gh_url: str +) -> str: + pattern = rf'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' + + replacement = f'''bazel_dep(name = "score_docs_as_code", version = "0.0.0") +git_override( + module_name = "score_docs_as_code", + remote = "{gh_url}", + commit = "{git_hash}" +)''' + + modified_content = re.sub(pattern, replacement, module_content) + + return modified_content + + +def parse_bazel_output(BR: BuildOutput) -> BuildOutput: + err_lines = BR.stderr.splitlines() + split_warnings = [x for x in err_lines if "WARNING: " in x] + warning_dict: dict[str, list[str]] = defaultdict(list) + + for raw_warning in split_warnings: + logger = "[NO SPECIFIC LOGGER]" + file_and_warning = raw_warning + # If this is the case we have a specific logger => therefore parsing it + if raw_warning.endswith("]"): + tmp_split_warning = raw_warning.split() + logger = tmp_split_warning[-1].upper() # [score_metamodel] + file_and_warning = raw_warning.replace(logger, "").rstrip() + warning_dict[logger].append(file_and_warning) + BR.warnings = warning_dict + return BR + + +def print_overview_logs(BR: BuildOutput): + warning_loggers = list(BR.warnings.keys()) + len_left_test_result = len_max - len("TEST RESULTS") + print( + f"[blue]{'=' * int(len_left_test_result / 2)}TEST RESULTS{'=' * int(len_left_test_result / 2)}[/blue]" + ) + print(f"[navy_blue]{'=' * len_max}[/navy_blue]") + warning_total_loggers_msg = f"Warning Loggers Total: {len(warning_loggers)}" + len_left_loggers = len_max - len(warning_total_loggers_msg) + print( + f"[blue]{'=' * int(len_left_loggers / 2)}{warning_total_loggers_msg}{'=' * int(len_left_loggers / 2)}[/blue]" + ) + warning_loggers = list(BR.warnings.keys()) + warning_total_msg = f"Logger Warnings Accumulated" + len_left_loggers_total = len_max - len(warning_total_msg) + print( + f"[blue]{'=' * int(len_left_loggers_total / 2)}{warning_total_msg}{'=' * int(len_left_loggers_total / 2)}[/blue]" + ) + for logger in warning_loggers: + if len(BR.warnings[logger]) == 0: + continue + color = "orange1" if logger == "[NO SPECIFIC LOGGER]" else "red" + warning_logger_msg = f"{logger} has {len(BR.warnings[logger])} warnings" + len_left_logger = len_max - len(warning_logger_msg) + print( + f"[{color}]{'=' * int(len_left_logger / 2)}{warning_logger_msg}{'=' * int(len_left_logger / 2)}[/{color}]" + ) + print(f"[blue]{'=' * len_max}[/blue]") + + +def verbose_printout(BR: BuildOutput): + """Prints warnings for each logger when '-v' or higher is specified.""" + warning_loggers = list(BR.warnings.keys()) + for logger in warning_loggers: + len_left_logger = len_max - len(logger) + print( + f"[cornflower_blue]{'=' * int(len_left_logger / 2)}{logger}{'=' * int(len_left_logger / 2)}[/cornflower_blue]" + ) + warnings = BR.warnings[logger] + len_left_warnings = len_max - len(f"Warnings Found: {len(warnings)}\n") + color = "red" + if logger == "[NO SPECIFIC LOGGER]": + color = "orange1" + print( + f"[{color}]{'=' * int(len_left_warnings / 2)}{f'Warnings Found: {len(warnings)}'}{'=' * int(len_left_warnings / 2)}[/{color}]" + ) + print("\n".join(f"[{color}]{x}[/{color}]" for x in warnings)) + + +def print_running_cmd(repo: str, cmd: str, local_or_git: str): + """Prints a 'Title Card' for the current command""" + len_left_cmd = len_max - len(cmd) + len_left_repo = len_max - len(repo) + len_left_local = len_max - len(local_or_git) + print(f"\n[cyan]{'=' * len_max}[/cyan]") + print( + f"[cornflower_blue]{'=' * int(len_left_repo / 2)}{repo}{'=' * int(len_left_repo / 2)}[/cornflower_blue]" + ) + print( + f"[cornflower_blue]{'=' * int(len_left_local / 2)}{local_or_git}{'=' * int(len_left_local / 2)}[/cornflower_blue]" + ) + print( + f"[cornflower_blue]{'=' * int(len_left_cmd / 2)}{cmd}{'=' * int(len_left_cmd / 2)}[/cornflower_blue]" + ) + print(f"[cyan]{'=' * len_max}[/cyan]") + + +def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]: + """ + Analyze if the build should be considered successful based on your rules. + + Rules: + - '[NO SPECIFIC LOGGER]' warnings are always ignored + - '[SCORE_METAMODEL]' warnings are ignored only if metamodel_changed is True + """ + + # Unsure if this is good, as sometimes the returncode is 1 but it should still go through? + # Logging for feedback here + if BR.returncode != 0: + return False, f"Build failed with return code {BR.returncode}" + + # Check for critical/non ignored warnings + critical_warnings = [] + + for logger, warnings in BR.warnings.items(): + if logger == "[NO SPECIFIC LOGGER]": + # Always ignore these + continue + else: + # Any other logger is critical/not ignored + critical_warnings.extend(warnings) + + if critical_warnings: + return False, f"Found {len(critical_warnings)} critical warnings" + + return True, "Build successful - no critical warnings" + + +def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig): + """ + Print your existing detailed output plus a clear success/failure summary + """ + print_overview_logs(BR) + if pytestconfig.get_verbosity() >= 1: + # Verbosity Level 1 (-v) + verbose_printout(BR) + if pytestconfig.get_verbosity() >= 2: + # Verbosity Level 2 (-vv) + print("==== STDOUT ====:\n\n", BR.stdout) + print("==== STDERR ====:\n\n", BR.stderr) + + is_success, reason = analyze_build_success(BR) + + status = "OK PASSED" if is_success else "XX FAILED" + color = "green" if is_success else "red" + + # Printing a small 'report' for each cmd. + result_msg = f"{repo_name} - {cmd}: {status}" + len_left = len_max - len(result_msg) + print( + f"[{color}]{'=' * int(len_left / 2)}{result_msg}{'=' * int(len_left / 2)}[/{color}]" + ) + print(f"[{color}]Reason: {reason}[/{color}]") + print(f"[{color}]{'=' * len_max}[/{color}]") + + return is_success, reason + + +def print_result_table(results: list[Result]): + """Printing an 'overview' table to show all results.""" + table = Table(title="Docs-As-Code Consumer Test Result") + table.add_column("Repository") + table.add_column("CMD") + table.add_column("LOCAL OR GIT") + table.add_column("PASSED") + table.add_column("REASON") + for result in results: + style = "green" if result.passed else "red" + table.add_row( + result.repo, + result.cmd, + result.local_or_git, + str(result.passed), + result.reason, + style=style, + ) + print(table) + + +def run_cmd( + cmd: str, results: list[Result], repo_name: str, local_or_git: str, pytestconfig +) -> tuple[list[Result], bool]: + out = subprocess.run(cmd.split(), capture_output=True, text=True) + + BR = BuildOutput( + returncode=out.returncode, + stdout=str(out.stdout), + stderr=str(out.stderr), + ) + BR_parsed = parse_bazel_output(BR) + + is_success, reason = print_final_result(BR_parsed, repo_name, cmd, pytestconfig) + + results.append( + Result( + repo=repo_name, + cmd=cmd, + local_or_git=local_or_git, + passed=is_success, + reason=reason, + ) + ) + + return results, is_success + + +def run_test_commands(): + pass + + +def setup_test_environment(sphinx_base_dir): + """Set up the test environment and return necessary paths and metadata.""" + os.chdir(sphinx_base_dir) + curr_path = Path(__file__).parent + git_root = find_git_root(curr_path) + + if git_root is None: + assert False, "Git root was none" + + # Get GitHub URL and current hash for git override + gh_url = get_github_base_url(git_root) + current_hash = get_current_git_commit(curr_path) + + # Create symlink for local docs-as-code + docs_as_code_dest = sphinx_base_dir / "docs_as_code" + docs_as_code_dest.symlink_to(git_root) + + return curr_path, git_root, gh_url, current_hash + + +def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url): + """Clone repo and prepare both local and git overrides.""" + # Clone the repository + subprocess.run(["git", "clone", git_url], check=True, capture_output=True) + os.chdir(repo_name) + + # Read original MODULE.bazel + with open("MODULE.bazel", "r") as f: + module_orig = f.read() + + # Prepare override versions + module_local_override = replace_bazel_dep_with_local_override(module_orig) + module_git_override = replace_bazel_dep_with_git_override( + module_orig, current_hash, gh_url + ) + + return module_local_override, module_git_override + + +# Updated version of your test loop +def test_and_clone_repos_updated(sphinx_base_dir, pytestconfig): + # Setting up the Test Environment + + # This might be hacky, but currently the best way I could solve the issue of going to the right place. + curr_path, git_root, gh_url, current_hash = setup_test_environment(sphinx_base_dir) + + overall_success = True + + # We capture the results for each command run. + results: list[Result] = [] + + for repo in REPOS_TO_TEST: + # ╭──────────────────────────────────────╮ + # │ Preparing the Repository for testing │ + # ╰──────────────────────────────────────╯ + module_local_override, module_git_override = prepare_repo_overrides( + repo.name, repo.git_url, current_hash, gh_url + ) + overrides = {"local": module_local_override, "git": module_git_override} + for type, override_content in overrides.items(): + with open("MODULE.bazel", "w") as f: + f.write(override_content) + + # ╭──────────────────────────────────────╮ + # │ Running the different build & run │ + # │ commands │ + # ╰──────────────────────────────────────╯ + for cmd in repo.commands: + print_running_cmd(repo.name, cmd, f"{type.upper()} OVERRIDE") + # Running through all 'cmds' specified with the local override + gotten_results, is_success = run_cmd( + cmd, results, repo.name, type, pytestconfig + ) + results = gotten_results + if not is_success: + overall_success = False + + # ╭──────────────────────────────────────╮ + # │ Running the different test commands │ + # ╰──────────────────────────────────────╯ + for test_cmd in repo.test_commands: + # Running through all 'test cmds' specified with the local override + print_running_cmd(repo.name, test_cmd, "LOCAL OVERRIDE") + + gotten_results, is_success = run_cmd( + test_cmd, results, repo.name, "local", pytestconfig + ) + results = gotten_results + + if not is_success: + overall_success = False + + # NOTE: We have to change directories back to the parent, otherwise the cloning & override will not be correct + os.chdir(Path.cwd().parent) + + # Printing a 'overview' table as a result + print_result_table(results) + assert overall_success, ( + "Consumer Tests failed, see table for which commands specifically. Enable verbosity for warning/error printouts" + ) From e81f9e95fa779dfc5d6f5dd5fd929787b3a1b1e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 23 Jul 2025 14:20:21 +0200 Subject: [PATCH 075/231] fix consumer tests (#161) --- .github/workflows/consumer_test.yml | 31 ++++++++++++++++++++++++++++- src/tests/test_consumer.py | 6 +++--- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index a92bd736..a68dc217 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -22,7 +22,36 @@ jobs: - name: Checkout repository uses: actions/checkout@v4.2.2 - - name: Run consumer tests + + - name: Run Consumer tests + id: consumer_tests run: | bazel run //src:ide_support .venv/bin/python -m pytest -s -v src/tests/ + + - name: Report Consumer Tests Check + if: always() + uses: actions/github-script@v7 + with: + script: | + const outcome = core.getInput('consumer_test_result'); + const conclusion = outcome === 'success' ? 'success' : + outcome === 'skipped' ? 'neutral' : 'failure'; + + await github.rest.checks.create({ + owner: context.repo.owner, + repo: context.repo.repo, + name: "Consumer Tests", + head_sha: context.sha, + status: "completed", + conclusion: conclusion, + output: { + title: "Consumer Tests Result", + summary: `The Consumer Tests step concluded with: ${conclusion}` + } + }); + consumer_test_result: ${{ steps.consumer_tests.outcome }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index bba49a92..c49f1e78 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -78,13 +78,13 @@ class Result: REPOS_TO_TEST: list[ConsumerRepo] = [ ConsumerRepo( name="process_description", - git_url="git@github.com:eclipse-score/process_description.git", + git_url="https://github.com/eclipse-score/process_description.git", commands=["bazel run //process:incremental_latest"], test_commands=[], ), ConsumerRepo( name="score", - git_url="git@github.com:eclipse-score/score.git", + git_url="https://github.com/eclipse-score/score.git", commands=[ "bazel run //docs:incremental_latest", "bazel run //docs:ide_support", @@ -96,7 +96,7 @@ class Result: ), ConsumerRepo( name="module_template", - git_url="git@github.com:eclipse-score/module_template.git", + git_url="https://github.com/eclipse-score/module_template.git", commands=[ "bazel run //docs:ide_support", "bazel run //docs:incremental", From 110032cf7cb52d86cb0d752f68f097f5aa836cbd Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 23 Jul 2025 16:29:55 +0200 Subject: [PATCH 076/231] Update LICENSE and NOTICE files (#163) --- LICENSE | 184 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- NOTICE | 32 ++++++++++ 2 files changed, 206 insertions(+), 10 deletions(-) create mode 100644 NOTICE diff --git a/LICENSE b/LICENSE index 8c69a8bc..f433b1a5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,13 +1,177 @@ -Copyright 2025 Contributors to the Eclipse Foundation -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - http://www.apache.org/licenses/LICENSE-2.0 + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..5d111d48 --- /dev/null +++ b/NOTICE @@ -0,0 +1,32 @@ +# Notices for Eclipse Safe Open Vehicle Core + +This content is produced and maintained by the Eclipse Safe Open Vehicle Core project. + + * Project home: https://projects.eclipse.org/projects/automotive.score + +## Trademarks + +Eclipse, and the Eclipse Logo are registered trademarks of the Eclipse Foundation. + +## Copyright + +All content is the property of the respective authors or their employers. +For more information regarding authorship of content, please consult the +listed source code repository logs. + +## Declared Project Licenses + +This program and the accompanying materials are made available under the terms +of the Apache License Version 2.0 which is available at +https://www.apache.org/licenses/LICENSE-2.0. + +SPDX-License-Identifier: Apache-2.0 + +## Cryptography + +Content may contain encryption software. The country in which you are currently +may have restrictions on the import, possession, and use, and/or re-export to +another country, of encryption software. BEFORE using any encryption software, +please check the country's laws, regulations and policies concerning the import, +possession, or use, and re-export of encryption software, to see if this is +permitted. From eaec7c6e019a0123166f9d039bad53f2e887dd37 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 23 Jul 2025 20:23:58 +0200 Subject: [PATCH 077/231] fill FAQ with recent questions and answers (#164) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Maximilian Sören Pollak --- docs/how-to-use/faq.md | 110 ++++++++++++++++++++++++++++++++++++++++ docs/how-to-use/faq.rst | 22 -------- 2 files changed, 110 insertions(+), 22 deletions(-) create mode 100644 docs/how-to-use/faq.md delete mode 100644 docs/how-to-use/faq.rst diff --git a/docs/how-to-use/faq.md b/docs/how-to-use/faq.md new file mode 100644 index 00000000..eb68a69d --- /dev/null +++ b/docs/how-to-use/faq.md @@ -0,0 +1,110 @@ + + +# docs-as-code FAQ + +*docs-as-code is the S-CORE tool for building documentation, defining requirements and +verifying compliance.* + +In this document you will find answers to frequently asked questions regarding +docs-as-code and its usage. + + +## Why is docs-as-code so slow? + + +If you are experiencing slow performance, you might be using the deprecated `docs:docs` +target. Please try one of the following solutions: + - `bazel run //docs:incremental` (typically takes 5-15 seconds per iteration and + provides metamodel warnings on the command line) + - `bazel run //docs:live_preview` (runs continuously in the background and provides + metamodel warnings on the command line) + +Note: In some repositories, you may need to append `_release` to the target name, e.g., +`bazel run //docs:incremental_release`. + + + +## IDE support (auto completion, metamodel checks, preview, LSP capabilities) + +Currently, IDE support for docs-as-code is limited. Improving this is on our roadmap, +but not a primary focus at the moment. **Which might be a major oversight on our side.** + +In the meantime, we recommend using the live preview feature: `bazel run +//docs:live_preview`. This provides immediate metamodel feedback (although only on the +console) and IDE-agnostic preview capabilities. + + +### Esbonio + +Known issues: +* Dependencies are not available. We'll address this by dropping support for "latest" + targets and pinning all dependencies to specific versions via Bazel. +* Python is required at startup, which is a problem for any Python-based LSP. We are + working to improve this by providing a devcontainer with Python preinstalled. + Additionally, we have submitted a feature request for Esbonio to handle Python + installation. + + +### uBc + +Currently, uBc is not aware of our metamodel. As a result, checks and auto-completion +features are not available. + +We plan to explore improvements in this area in the future together with useblocks. + + + +## Do we need to write custom Python code for every Metamodel check? +With our current approach, allowed attributes and links for Needs are defined within the +`metamodel.yml` file. If the check can be fully described there (e.g., process +requirements are only allowed to link to stakeholder requirements), no custom code is +needed. It is also not necessary to write individual tests for every single check +performed by the metamodel. + +Only a few very specific checks require custom Python code beyond the generic metamodel +capabilities. These are cases that cannot be addressed by generic metamodel approaches +in any tool. For example: "the middle part of certain IDs must match the directory name +of the file." + + +## How can I be sure that the Metamodel does what I want it to do? +We use *examples* written in reStructuredText (rst) to verify that the metamodel has +been configured as intended. + +Metamodel checks are verified through standard testing practices, like any other code. +The examples mentioned above are helpful, but they are only examples. They are not +mandatory for verification of the metamodel checks. + + + +## Sphinx and safety +It is important to distinguish between metamodel checks and HTML rendering. + +Metamodel checks can be verified / qualified without Sphinx. + +If the renderer is safety-relevant, then qualification of Sphinx (and Sphinx Needs) is +required. This is currently under evaluation by the process team (@aschemmel-tech). + + +## What about versioning of requirements? +We are currently discussing possible implementations to enable linking to specific +versions of requirements (e.g. `implements: req-5@v3.0.0`). + + +## Sphinx traceability +It is possible to link requirements from other requirements, from source code, or from +tests (tests within the next days). + +### What about bazel targets? +Bazel targets are not involved in traceability (currently not required by process). diff --git a/docs/how-to-use/faq.rst b/docs/how-to-use/faq.rst deleted file mode 100644 index 75adbe53..00000000 --- a/docs/how-to-use/faq.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. # ******************************************************************************* - # Copyright (c) 2025 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - - -FAQ -=== - -In this document you will find answers to frequently asked questions regarding docs-as-code and it's usage. - - -TODO: Everything ----------------- From 6b181a88df16056185e1f3730d0ecd5e25825051 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 24 Jul 2025 10:20:48 +0200 Subject: [PATCH 078/231] Add permissions to workflow (#165) This should allow for the Workflow to comment on the PR --- .github/workflows/consumer_test.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index a68dc217..3dc7b882 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -14,6 +14,11 @@ name: Run Consumer Tests on Comment on: issue_comment +permissions: + checks: write + contents: read + pull-requests: read + jobs: consumer_test: if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, '/consumer-test') }} From bd0c101615d2e3d2d3f2af533278244403c4f667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 24 Jul 2025 12:15:17 +0200 Subject: [PATCH 079/231] Fixing consumer_test triggers & color output (#167) * Fixing consumer_test triggers & color output --- .github/workflows/consumer_test.yml | 66 +++++++++++++++++++++-------- src/tests/test_consumer.py | 3 ++ 2 files changed, 51 insertions(+), 18 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 3dc7b882..b5cce38f 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -15,7 +15,7 @@ name: Run Consumer Tests on Comment on: issue_comment permissions: - checks: write + statuses: write contents: read pull-requests: read @@ -24,39 +24,69 @@ jobs: if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, '/consumer-test') }} runs-on: ubuntu-latest steps: - - name: Checkout repository + - name: Get PR details + id: pr_details + uses: actions/github-script@v7 + with: + script: | + const { data: pr } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number + }); + core.setOutput('head_sha', pr.head.sha); + core.setOutput('head_ref', pr.head.ref); + + - name: Checkout PR uses: actions/checkout@v4.2.2 + with: + ref: ${{ steps.pr_details.outputs.head_ref }} + - name: Set Consumer Tests Status - Pending + uses: actions/github-script@v7 + with: + script: | + await github.rest.repos.createCommitStatus({ + owner: context.repo.owner, + repo: context.repo.repo, + sha: '${{ steps.pr_details.outputs.head_sha }}', + state: 'pending', + context: 'Consumer Tests (Manual)', + description: 'Running consumer tests (manually triggered by @${{ github.event.comment.user.login }})', + target_url: `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${{ github.run_id }}` + }); + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Run Consumer tests id: consumer_tests run: | bazel run //src:ide_support .venv/bin/python -m pytest -s -v src/tests/ + env: + FORCE_COLOR: "1" + TERM: xterm-256color + PYTHONUNBUFFERED: "1" - - name: Report Consumer Tests Check + - name: Report Consumer Tests Status if: always() uses: actions/github-script@v7 with: script: | - const outcome = core.getInput('consumer_test_result'); - const conclusion = outcome === 'success' ? 'success' : - outcome === 'skipped' ? 'neutral' : 'failure'; + const outcome = '${{ steps.consumer_tests.outcome }}'; + const state = outcome === 'success' ? 'success' : 'failure'; + + console.log(`Test outcome: ${outcome}, state: ${state}`); + console.log(`Head SHA: ${{ steps.pr_details.outputs.head_sha }}`); - await github.rest.checks.create({ + await github.rest.repos.createCommitStatus({ owner: context.repo.owner, repo: context.repo.repo, - name: "Consumer Tests", - head_sha: context.sha, - status: "completed", - conclusion: conclusion, - output: { - title: "Consumer Tests Result", - summary: `The Consumer Tests step concluded with: ${conclusion}` - } + sha: '${{ steps.pr_details.outputs.head_sha }}', + state: state, + context: 'Consumer Tests (Manual)', + description: `Consumer tests ${outcome} (manually triggered by @${{ github.event.comment.user.login }})`, + target_url: `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${{ github.run_id }}` }); - consumer_test_result: ${{ steps.consumer_tests.outcome }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index c49f1e78..7cf68ab4 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -20,6 +20,7 @@ from pathlib import Path from rich import print from rich.table import Table +from rich.console import Console from collections import defaultdict from pytest import TempPathFactory from dataclasses import dataclass, field @@ -49,6 +50,8 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel("DEBUG") +console = Console(force_terminal=True if os.getenv("CI") else None, width=120) + @dataclass class ConsumerRepo: From f101ba85dda6c50a8041c852e13e3bb8214f66bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 24 Jul 2025 12:58:02 +0200 Subject: [PATCH 080/231] add mandatory content check (#152) --- docs/product/requirements.rst | 5 +- .../score_metamodel/checks/check_options.py | 3 +- src/extensions/score_metamodel/metamodel.yaml | 15 ++- .../rst/options/test_options_options.rst | 99 ++++++++++++++----- .../tests/test_rules_file_based.py | 3 +- 5 files changed, 91 insertions(+), 34 deletions(-) diff --git a/docs/product/requirements.rst b/docs/product/requirements.rst index edf39d69..68f9c4d1 100644 --- a/docs/product/requirements.rst +++ b/docs/product/requirements.rst @@ -110,9 +110,10 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_description :tags: Common Attributes :parent_covered: NO: Can not cover 'ISO/IEC/IEEE/29148' - :implemented: NO + :implemented: YES + :satisfies: PROCESS_gd_req__req__attr_description - Docs-as-Code shall enforce that each Need contains a description (content). + Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has a description (content) ---------------------------- 🔒 Security Classification diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index ca675cd5..a5a2a66e 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -11,7 +11,6 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import re -from collections.abc import Generator from score_metamodel import ( CheckLogger, @@ -26,7 +25,7 @@ CheckingDictType = dict[str, list[FieldCheck]] -def get_need_type(needs_types: list[NeedType], directive: str): +def get_need_type(needs_types: list[NeedType], directive: str) -> NeedType: for need_type in needs_types: assert isinstance(need_type, dict), need_type if need_type["directive"] == directive: diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index ecc1fa82..28d19b50 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -73,8 +73,6 @@ needs_types: status: ^(valid)$ optional_links: links: ^.*$ - tags: - - requirement std_wp: title: Standard Work Product @@ -109,6 +107,7 @@ needs_types: id: ^gd_req__[0-9a-z_]*$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|draft)$ + content: ^[\s\S]+$ optional_links: complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ # req-Id: tool_req__docs_req_link_satisfies_allowed @@ -222,6 +221,8 @@ needs_types: reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + # WARNING: THis will be activated again with new process release (1.1.0) + # content: ^[\s\S]+$ # req-Id: tool_req__docs_req_attr_rationale rationale: ^.+$ optional_options: @@ -248,6 +249,7 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: ^stkh_req__.*$ @@ -273,6 +275,7 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: ^feat_req__.*$ @@ -296,6 +299,7 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: ^.*$ @@ -327,6 +331,7 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ optional_options: codelink: ^.*$ testlink: ^.*$ @@ -371,6 +376,8 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: fulfils: ^feat_req__.+$ + tags: + - architecture_element logic_arc_int: title: Logical Architecture Interfaces @@ -467,6 +474,8 @@ needs_types: language: ^(cpp|rust)$ optional_links: fulfils: ^comp_req__.+$ + tags: + - architecture_element real_arc_int_op: title: Component Architecture Interface Operation @@ -482,6 +491,8 @@ needs_types: included_by: ^real_arc_int__.+$ optional_links: implements: ^logic_arc_int_op__.+$ + tags: + - architecture_element review_header: prefix: review__header diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index 6ec03192..931d3266 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -11,16 +11,18 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -#CHECK: check_options +#CHECK: check_options, check_content -.. Required option: `status` is missing +.. + Required option: `status` is missing #EXPECT: std_wp__test__abcd: is missing required option: `status`. .. std_wp:: This is a test :id: std_wp__test__abcd + .. All required options are present #EXPECT-NOT: std_wp__test__abcd: is missing required option @@ -29,6 +31,7 @@ :status: active + .. Required link `satisfies` refers to wrong requirement type #EXPECT: feat_req__abce.satisfies (['std_wp__test__abce']): does not follow pattern `^stkh_req__.*$`. @@ -36,30 +39,6 @@ :id: feat_req__abce :satisfies: std_wp__test__abce -.. Optional link `supported_by` refers to wrong requirement type - This check is disabled in check_options.py:114 - #EXPECT: wf__abcd.supported_by (['feat_req__abce']): does not follow pattern `^rl__.*$`. - - .. std_wp:: This is a test - :id: wf__abcd - :supported_by: feat_req__abce - -.. Optional link `supported_by` refers to the correct requirement type - This check is disabled in check_options.py:114 - #EXPECT-NOT: does not follow pattern `^rl__.*$`. - - .. std_wp:: This is a test - :id: wf__abcd - :supported_by: rl__abcd - - .. rl:: This is a test - :id: rl__abcd - - .. Required link: `satisfies` is missing - #EXPECT: feat_req__abcf: is missing required link: `satisfies`. - - .. feat_req:: Child requirement - :id: feat_req__abcf .. All required links are present @@ -73,6 +52,7 @@ :id: stkh_req__abcd + .. Test if the `sufficient` option for Safety Analysis (FMEA and DFA) follows the pattern `^(yes|no)$` #EXPECT: feat_saf_fmea__test__bad_1.sufficient (QM): does not follow pattern `^(yes|no)$`. @@ -80,84 +60,98 @@ :id: feat_saf_fmea__test__bad_1 :sufficient: QM + #EXPECT-NOT: feat_saf_fmea__test__good_2.sufficient (yes): does not follow pattern `^(yes|no)$`. .. feat_saf_fmea:: This is a test :id: feat_saf_fmea__test__2 :sufficient: yes + #EXPECT-NOT: feat_saf_fmea__test__good_3.sufficient (no): does not follow pattern `^(yes|no)$`. .. feat_saf_fmea:: This is a test :id: feat_saf_fmea__test__3 :sufficient: no + #EXPECT: comp_saf_fmea__test__bad_4.sufficient (QM): does not follow pattern `^(yes|no)$`. .. comp_saf_fmea:: This is a test :id: comp_saf_fmea__test__bad_4 :sufficient: QM + #EXPECT-NOT: comp_saf_fmea__test__good_5.sufficient (yes): does not follow pattern `^(yes|no)$`. .. comp_saf_fmea:: This is a test :id: comp_saf_fmea__test__5 :sufficient: yes + #EXPECT-NOT: comp_saf_fmea__test__good_6.sufficient (no): does not follow pattern `^(yes|no)$`. .. comp_saf_fmea:: This is a test :id: comp_saf_fmea__test__6 :sufficient: no + #EXPECT: feat_plat_saf_dfa__test__bad_7.sufficient (QM): does not follow pattern `^(yes|no)$`. .. feat_plat_saf_dfa:: This is a test :id: feat_plat_saf_dfa__test__bad_7 :sufficient: QM + #EXPECT-NOT: feat_plat_saf_dfa__test__good_8.sufficient (yes): does not follow pattern `^(yes|no)$`. .. feat_plat_saf_dfa:: This is a test :id: feat_plat_saf_dfa__test__8 :sufficient: yes + #EXPECT-NOT: feat_plat_saf_dfa__test__good_9.sufficient (no): does not follow pattern `^(yes|no)$`. .. feat_plat_saf_dfa:: This is a test :id: feat_plat_saf_dfa__test__9 :sufficient: no + #EXPECT: feat_saf_dfa__test__bad_10.sufficient (QM): does not follow pattern `^(yes|no)$`. .. feat_saf_dfa:: This is a test :id: feat_saf_dfa__test__bad_10 :sufficient: QM + #EXPECT-NOT: feat_saf_dfa__test__good_11.sufficient (yes): does not follow pattern `^(yes|no)$`. .. feat_saf_dfa:: This is a test :id: feat_saf_dfa__test__11 :sufficient: yes + #EXPECT-NOT: feat_saf_dfa__test__good_12.sufficient (no): does not follow pattern `^(yes|no)$`. .. feat_saf_dfa:: This is a test :id: feat_saf_dfa__test__12 :sufficient: no + #EXPECT: comp_saf_dfa__test__bad_13.sufficient (QM): does not follow pattern `^(yes|no)$`. .. comp_saf_dfa:: This is a test :id: comp_saf_dfa__test__bad_13 :sufficient: QM + #EXPECT-NOT: comp_saf_dfa__test__good_14.sufficient (yes): does not follow pattern `^(yes|no)$`. .. comp_saf_dfa:: This is a test :id: comp_saf_dfa__test__14 :sufficient: yes + #EXPECT-NOT: comp_saf_dfa__test__good_15.sufficient (no): does not follow pattern `^(yes|no)$`. .. comp_saf_dfa:: This is a test @@ -165,6 +159,7 @@ :sufficient: no + .. Test that the `sufficient` option is case sensitive and does not accept values other than `yes` or `no` #EXPECT: feat_saf_fmea__test__bad_16.sufficient (yEs): does not follow pattern `^(yes|no)$`. @@ -180,6 +175,7 @@ :status: valid + .. Negative Test: Linked to a non-allowed requirement type. #EXPECT: feat_saf_fmea__child__25.mitigates (['comp_req__child__ASIL_B']): does not follow pattern `^(feat_req__.*|aou_req__.*)$`. @@ -190,6 +186,7 @@ :mitigates: comp_req__child__ASIL_B + .. Negative Test: Linked to a non-allowed requirement type. #EXPECT: feat_saf_fmea__child__26.verifies (['comp_req__child__ASIL_B']): does not follow pattern `^feat_arc_dyn__[0-9a-z_]*$`. @@ -200,6 +197,7 @@ :verifies: comp_req__child__ASIL_B + .. Tests if the attribute `safety` follows the pattern `^(QM|ASIL_B)$` #EXPECT-NOT: doc__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. @@ -208,6 +206,7 @@ :status: valid :safety: QM + #EXPECT-NOT: doc__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. document:: This is a test document @@ -223,6 +222,7 @@ :status: valid :safety: QM + #EXPECT-NOT: stkh_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. stkh_req:: This is a test @@ -238,6 +238,7 @@ :status: valid :safety: QM + #EXPECT-NOT: feat_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. feat_req:: This is a test @@ -253,6 +254,7 @@ :status: valid :safety: QM + #EXPECT-NOT: comp_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. comp_req:: This is a test @@ -268,6 +270,8 @@ :status: valid :safety: QM + + #EXPECT-NOT: tool_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. tool_req:: This is a test @@ -283,6 +287,7 @@ :status: valid :safety: QM + #EXPECT-NOT: aou_req__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. aou_req:: This is a test @@ -298,6 +303,7 @@ :status: valid :safety: QM + #EXPECT-NOT: feat_arc_sta__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. feat_arc_sta:: This is a test @@ -313,6 +319,8 @@ :status: valid :safety: QM + + #EXPECT-NOT: feat_arc_dyn__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. feat_arc_dyn:: This is a test @@ -328,6 +336,8 @@ :status: valid :safety: QM + + #EXPECT-NOT: logic_arc_int__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. logic_arc_int:: This is a test @@ -343,6 +353,7 @@ :status: valid :safety: QM + #EXPECT-NOT: logic_arc_int_op__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. logic_arc_int_op:: This is a test @@ -358,6 +369,7 @@ :status: valid :safety: QM + #EXPECT-NOT: comp_arc_sta__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. comp_arc_sta:: This is a test @@ -373,6 +385,7 @@ :status: valid :safety: QM + #EXPECT-NOT: comp_arc_dyn__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. comp_arc_dyn:: This is a test @@ -381,6 +394,7 @@ :safety: ASIL_B + #EXPECT-NOT: real_arc_int__test_good_1.safety (QM): does not follow pattern `^(QM|ASIL_B)$`. .. real_arc_int:: This is a test @@ -388,6 +402,7 @@ :status: valid :safety: QM + #EXPECT-NOT: real_arc_int__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. real_arc_int:: This is a test @@ -448,6 +463,7 @@ :status: valid :safety: QM + #EXPECT-NOT: sw_unit__test_good_2.safety (ASIL_B): does not follow pattern `^(QM|ASIL_B)$`. .. sw_unit:: This is a test @@ -455,3 +471,34 @@ :status: valid :safety: ASIL_B + + +.. + Ensuring that empty content is detected correctly +.. #EXPECT: stkh_req__test_no_content: is missing required option: `content` +.. +.. .. stkh_req:: This is a test +.. :id: stkh_req__test_no_content +.. :status: valid +.. :safety: QM + + +.. + Ensuring that non empty content is detected correctly +#EXPECT-NOT: stkh_req__test_content: is missing required option: `content` + +.. stkh_req:: This is a test + :id: stkh_req__test_content + :status: valid + :safety: QM + + Some content, to not trigger the warning + + +.. + This should not trigger, as 'std_wp' is not checked for content +#EXPECT-NOT: std_wp__test_content: is missing required option: `content` + +.. std_wp:: This is a test + :id: std_wp__test_content + diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index f0268ec2..fdff5fa1 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -175,7 +175,6 @@ def test_rst_files( "Unable to extract test data from the rst file: " f"{rst_file}. Please check the file for the correct format." ) - # print(f"RST Data: {rst_data}") app: SphinxTestApp = sphinx_app_setup(RST_DIR / rst_file) os.chdir(app.srcdir) # Change working directory to the source directory @@ -185,7 +184,7 @@ def test_rst_files( # Collect the warnings warnings = app.warning.getvalue().splitlines() - print(f"Warnings: {warnings}") + # print(f"Warnings: {warnings}") # Check if the expected warnings are present for warning_info in rst_data.warning_infos: From 3007d2bd3cb7482a9480f4c9106f4894163be8f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 24 Jul 2025 15:28:25 +0200 Subject: [PATCH 081/231] fix consumer test (#170) --- .github/workflows/consumer_test.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index b5cce38f..714a87d6 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -40,7 +40,7 @@ jobs: - name: Checkout PR uses: actions/checkout@v4.2.2 with: - ref: ${{ steps.pr_details.outputs.head_ref }} + ref: refs/pull/${{ github.event.issue.number }}/head - name: Set Consumer Tests Status - Pending uses: actions/github-script@v7 @@ -75,10 +75,8 @@ jobs: script: | const outcome = '${{ steps.consumer_tests.outcome }}'; const state = outcome === 'success' ? 'success' : 'failure'; - console.log(`Test outcome: ${outcome}, state: ${state}`); console.log(`Head SHA: ${{ steps.pr_details.outputs.head_sha }}`); - await github.rest.repos.createCommitStatus({ owner: context.repo.owner, repo: context.repo.repo, From df01d29580f9b39ef063caee2e59db441085e60f Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Thu, 24 Jul 2025 20:04:52 +0200 Subject: [PATCH 082/231] remove obsolete venv (#171) --- .devcontainer/devcontainer.json | 2 +- .github/workflows/consumer_test.yml | 4 ++-- .github/workflows/format.yml | 2 +- .github/workflows/test.yml | 2 +- .gitignore | 2 +- .vscode/settings.json | 2 +- README.md | 6 +++--- pyproject.toml | 4 ++-- src/BUILD | 12 ------------ src/extensions/BUILD | 7 ------- src/tests/test_consumer.py | 27 +++++++++++++-------------- 11 files changed, 25 insertions(+), 45 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index affe8541..16419602 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,5 +2,5 @@ "name": "eclipse-s-core", "image": "ghcr.io/eclipse-score/devcontainer:latest", "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel", - "postCreateCommand": "bazel run //src:ide_support" + "postCreateCommand": "bazel run //docs:ide_support" } diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 714a87d6..3295f1b6 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -61,8 +61,8 @@ jobs: - name: Run Consumer tests id: consumer_tests run: | - bazel run //src:ide_support - .venv/bin/python -m pytest -s -v src/tests/ + bazel run //docs:ide_support + .venv_docs/bin/python -m pytest -s -v src/tests/ env: FORCE_COLOR: "1" TERM: xterm-256color diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index f664708f..a2fd8c00 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -37,5 +37,5 @@ jobs: bazelisk-cache: true - name: Run formatting checks run: | - bazel run //src:ide_support + bazel run //docs:ide_support bazel test //src:format.check diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 33c4f4d2..e696d567 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -37,5 +37,5 @@ jobs: bazelisk-cache: true - name: Run test targets run: | - bazel run //src:ide_support + bazel run //docs:ide_support bazel test //src/... diff --git a/.gitignore b/.gitignore index 421cd8aa..9d875ae9 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,6 @@ styles/ .envrc # Python -.venv +.venv_docs __pycache__/ /.coverage diff --git a/.vscode/settings.json b/.vscode/settings.json index a50be5ac..a0b74b55 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -23,7 +23,7 @@ }, // // - "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", + "python.defaultInterpreterPath": "${workspaceFolder}/.venv_docs/bin/python", "python.testing.pytestArgs": [ ".", "--ignore-glob=bazel-*/*", diff --git a/README.md b/README.md index 2fbc7abe..74b0c9bc 100644 --- a/README.md +++ b/README.md @@ -26,11 +26,11 @@ bazel run //docs:incremental_latest #### Getting IDE support -Create the virtual environment via `bazel run //src:ide_support`.\ +Create the virtual environment via `bazel run //docs:ide_support`.\ If your IDE does not automatically ask you to activate the newly created environment you can activate it. -- In VSCode via `ctrl+p` => `Select Python Interpreter` then select `.venv/bin/python` -- In the terminal via `source .venv/bin/activate` +- In VSCode via `ctrl+p` => `Select Python Interpreter` then select `.venv_docs/bin/python` +- In the terminal via `source .venv_docs/bin/activate` #### Format your documentation with: diff --git a/pyproject.toml b/pyproject.toml index e6e9b40d..35fde2ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # This file is at the root level, as it applies to all Python code, # not only to docs or to tools. [tool.pyright] -extends = "bazel-bin/src/ide_support.runfiles/score_python_basics~/pyproject.toml" +extends = "bazel-bin/docs/ide_support.runfiles/score_python_basics~/pyproject.toml" exclude = [ "**/__pycache__", @@ -11,7 +11,7 @@ exclude = [ ] [tool.ruff] -extend = "bazel-bin/src/ide_support.runfiles/score_python_basics~/pyproject.toml" +extend = "bazel-bin/docs/ide_support.runfiles/score_python_basics~/pyproject.toml" extend-exclude = [ "**/__pycache__", diff --git a/src/BUILD b/src/BUILD index d3204016..39be6f7e 100644 --- a/src/BUILD +++ b/src/BUILD @@ -20,18 +20,6 @@ load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary") load("@score_dash_license_checker//:dash.bzl", "dash_license_checker") load("@score_python_basics//:defs.bzl", "score_virtualenv") -score_virtualenv( - reqs = [ - "@score_docs_as_code//src:plantuml_for_python", - "@score_docs_as_code//src/extensions:score_plantuml", - "@score_docs_as_code//src/find_runfiles:find_runfiles", - "@score_docs_as_code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", - "@score_docs_as_code//src/extensions/score_header_service:score_header_service", - "@score_docs_as_code//src/extensions/score_layout:score_layout", - "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", - "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", - ] + all_requirements, -) # These are only exported because they're passed as files to the //docs.bzl # macros, and thus must be visible to other packages. They should only be # referenced by the //docs.bzl macros. diff --git a/src/extensions/BUILD b/src/extensions/BUILD index 710a8f51..d8ad70e2 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -15,13 +15,6 @@ load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_python_basics//:defs.bzl", "score_py_pytest", "score_virtualenv") -# TODO: This probably can be deleted -# score_virtualenv( -# name = "process_venv", -# reqs = all_requirements, -# venv_name = ".venv_process", -# ) - py_library( name = "score_plantuml", srcs = ["@score_docs_as_code//src/extensions:score_plantuml.py"], diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index 7cf68ab4..13bc2bba 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -10,37 +10,36 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -import subprocess -import pytest +import logging import os import re -import logging - - +import subprocess +from collections import defaultdict +from dataclasses import dataclass, field from pathlib import Path + +import pytest +from pytest import TempPathFactory from rich import print -from rich.table import Table from rich.console import Console -from collections import defaultdict -from pytest import TempPathFactory -from dataclasses import dataclass, field +from rich.table import Table +from src.extensions.score_source_code_linker import get_github_base_url from src.extensions.score_source_code_linker.generate_source_code_links_json import ( find_git_root, ) -from src.extensions.score_source_code_linker import get_github_base_url """ -This script's main usecase is to test consumers of Docs-As-Code with the new changes made in PR's. +This script's main usecase is to test consumers of Docs-As-Code with the new changes made in PR's. This enables us to find new issues and problems we introduce with changes that we otherwise would only know much later. There are several things to note. - The `print` function has been overwritten by the 'rich' package to allow for richer text output. - The script itself takes quiet a bit of time, roughly 5+ min for a full run. -- If you need more output, enable it via `-v` or `-vv` +- If you need more output, enable it via `-v` or `-vv` - Start the script via the following command: - - bazel run //src:ide_support - - .venv/bin/python -m pytest -s src/tests (If you need more verbosity add `-v` or `-vv`) + - bazel run //docs:ide_support + - .venv_docs/bin/python -m pytest -s src/tests (If you need more verbosity add `-v` or `-vv`) """ # Max width of the printout From d7a98e36ea675436b9284c77044fb3b61e525f27 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Thu, 24 Jul 2025 20:53:55 +0200 Subject: [PATCH 083/231] update tool_reqs (#172) * update tool_reqs * make security temp optional --- docs/how-to-integrate/example/index.rst | 2 +- .../example/testing/index.rst | 2 +- docs/product/requirements.rst | 46 ++++++++------- src/extensions/score_metamodel/metamodel.yaml | 57 +++++++++++++++++-- 4 files changed, 80 insertions(+), 27 deletions(-) diff --git a/docs/how-to-integrate/example/index.rst b/docs/how-to-integrate/example/index.rst index 622d8afa..ea417124 100644 --- a/docs/how-to-integrate/example/index.rst +++ b/docs/how-to-integrate/example/index.rst @@ -25,6 +25,7 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs :id: stkh_req__index__test_requirement :status: valid :safety: QM + :security: YES :rationale: A simple requirement we need to enable a documentation build :reqtype: Functional @@ -37,7 +38,6 @@ This is a rendered example of the 'examples/linking-both' folder using the `docs .. tool_req:: Some Title :id: tool_req__example__some_title - :reqtype: Process :security: YES :safety: ASIL_B :satisfies: PROCESS_gd_req__req__attr_uid diff --git a/docs/how-to-integrate/example/testing/index.rst b/docs/how-to-integrate/example/testing/index.rst index d2fcd839..fa023543 100644 --- a/docs/how-to-integrate/example/testing/index.rst +++ b/docs/how-to-integrate/example/testing/index.rst @@ -20,6 +20,7 @@ This example will help catch things and bugs when rst's are defined inside a fol :id: stkh_req__testing__test_requirement :status: valid :safety: QM + :security: YES :rationale: A simple requirement we need to enable a documentation build :reqtype: Functional @@ -31,7 +32,6 @@ This example will help catch things and bugs when rst's are defined inside a fol .. tool_req:: Some Title :id: tool_req__testing__some_title - :reqtype: Process :security: YES :safety: ASIL_B :satisfies: PROCESS_gd_req__req__attr_uid diff --git a/docs/product/requirements.rst b/docs/product/requirements.rst index 68f9c4d1..956e3a62 100644 --- a/docs/product/requirements.rst +++ b/docs/product/requirements.rst @@ -89,7 +89,7 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Enforces title wording rules :id: tool_req__docs_common_attr_title - :implemented: PARTIAL + :implemented: YES :tags: Common Attributes :satisfies: PROCESS_gd_req__req__attr_title :parent_covered: NO: Can not ensure summary @@ -121,7 +121,7 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Security: enforce classification :id: tool_req__docs_common_attr_security - :implemented: PARTIAL + :implemented: YES :tags: Common Attributes :satisfies: PROCESS_gd_req__req__attr_security, @@ -301,7 +301,7 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Enforces requirement type classification :id: tool_req__docs_req_attr_reqtype :tags: Requirements - :implemented: PARTIAL + :implemented: YES :satisfies: PROCESS_gd_req__req__attr_type Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` @@ -353,6 +353,7 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :satisfies: PROCESS_gd_req__req__linkage, PROCESS_gd_req__req__traceability :parent_covered: YES + :status: invalid Docs-as-Code shall enforce that linking between model elements via the ``satisfies`` attribute follows defined rules. Having at least one link is mandatory. @@ -371,6 +372,10 @@ This section provides an overview of current process requirements and their clar Tooling Requirements Process Requirements ================================ =========================== + .. note:: + Some tool requirements do not have a matching process requirement (gap). + And sometimes we need to link to documents and not requirements?! + 🏛️ Architecture ################ @@ -386,20 +391,20 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__arch__viewpoints, PROCESS_gd_req__arch__build_blocks, PROCESS_gd_req__arch__build_blocks_corr - :implemented: PARTIAL + :implemented: YES :parent_covered: NO :status: invalid Docs-as-Code shall support the following architecture types: - * Feature (Architecture Element) = Feature Architecture Static View (feat_arch_static) - * Feature Architecture Dynamic View (feat_arch_dyn) + * Feature (Architecture Element) = Feature Architecture Static View (feat_arc_sta) + * Feature Architecture Dynamic View (feat_arc_dyn) * Feature: Logical Architecture Interface (incl Logical Interface View) (logic_arc_int) * Feature: Logical Architecture Interface Operation (logic_arc_int_op) * Component Architecture Static View (comp_arc_sta) * Component Architecture Dynamic View (comp_arc_dyn) - * Component Architecture Interface = Real Interface (comp_arc_int) - * Component Architecture Interface Operation = Real Interface Operation (comp_arc_int_op) + * Component Architecture Interface = Real Interface (real_arc_int) + * Component Architecture Interface Operation = Real Interface Operation (real_arc_int_op) .. tool_req::Module Views @@ -416,7 +421,7 @@ This section provides an overview of current process requirements and their clar Docs-as-Code shall support the following module view-types: - * Module = Module Architecture Static View = Top Level SW component container (mod_view_static) + * Module = Module Architecture Static View = Top Level SW component container (mod_view_sta) * Module Architecture Dynamic View = Top Level SW component container (mod_view_dyn) @@ -433,6 +438,7 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__arch__attr_fulfils, PROCESS_gd_req__arch__traceability, :parent_covered: YES + :status: invalid Docs-as-Code shall enforce that linking via the ``fulfils`` attribute follows defined rules. @@ -514,7 +520,7 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Supports linking to source code :tags: Detailed Design & Code :id: tool_req__docs_dd_link_source_code_link - :implemented: PARTIAL + :implemented: YES :parent_covered: YES :satisfies: PROCESS_gd_req__req__attr_impl @@ -526,7 +532,7 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Supports linking to test cases :id: tool_req__docs_dd_link_testcase :tags: Detailed Design & Code - :implemented: NO + :implemented: PARTIAL :satisfies: PROCESS_gd_req__req__attr_testlink Docs-as-Code shall allow requirements of type :need:`tool_req__docs_req_types` to @@ -540,21 +546,21 @@ This section provides an overview of current process requirements and their clar .. they are so different, that they need their own section .. tool_req:: Tool Verification Report - :id: tool_req__docs_tvr_uid + :id: tool_req__docs_tvr :tags: Tool Verification Reports - :implemented: NO + :implemented: YES :parent_covered: NO :satisfies: PROCESS_gd_req__tool__attr_uid Docs-as-Code shall support the definition and management of Tool Verification Reports - (``tool_verification_report``). + (``doc_tool``). .. tool_req:: Enforce safety classification :id: tool_req__docs_tvr_safety :tags: Tool Verification Reports - :implemented: NO + :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__tool__attr_safety_affected + :satisfies: PROCESS_gd_req__tool__attr_safety_affected, PROCESS_gd_req__tool__check_mandatory Docs-as-Code shall enforce that every Tool Verification Report includes a ``safety_affected`` attribute with one of the following values: @@ -565,9 +571,9 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Enforce security classification :id: tool_req__docs_tvr_security :tags: Tool Verification Reports - :implemented: NO + :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__tool__attr_security_affected + :satisfies: PROCESS_gd_req__tool__attr_security_affected, PROCESS_gd_req__tool__check_mandatory Docs-as-Code shall enforce that every Tool Verification Report includes a ``security_affected`` attribute with one of the following values: @@ -579,8 +585,8 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Enforce status classification :id: tool_req__docs_tvr_status :tags: Tool Verification Reports - :implemented: NO - :satisfies: PROCESS_gd_req__tool__attr_status + :implemented: YES + :satisfies: PROCESS_gd_req__tool__attr_status, PROCESS_gd_req__tool__check_mandatory :parent_covered: YES Docs-as-Code shall enforce that every Tool Verification Report includes a ``status`` diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 28d19b50..e4c6703c 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -109,9 +109,10 @@ needs_types: status: ^(valid|draft)$ content: ^[\s\S]+$ optional_links: - complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ # req-Id: tool_req__docs_req_link_satisfies_allowed + # TODO: fix once process_description is fixed satisfies: ^wf__.*$ + complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ tags: - requirement @@ -194,17 +195,24 @@ needs_types: optional_options: safety: "^(QM|ASIL_B)$" security: "^(YES|NO)$" + author: ^.*$ + approver: ^.*$ + reviewer: ^.*$ optional_links: realizes: "^wp__.+$" + # req-Id: tool_req__docs_tvr doc_tool: title: Tool Verification Report prefix: doc_tool__ mandatory_options: id: ^doc_tool__[0-9a-z_]*$ + # req-Id: tool_req__docs_tvr_status status: ^(draft|evaluated|qualified|released|rejected)$ version: ^.*$ + # req-Id: tool_req__docs_tvr_safety safety_affected: "^(YES|NO)$" + # req-Id: tool_req__docs_tvr_security security_affected: "^(YES|NO)$" tcl: "^(LOW|HIGH)$" optional_links: @@ -219,13 +227,17 @@ needs_types: id: ^stkh_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - # WARNING: THis will be activated again with new process release (1.1.0) + # WARNING: THis will be activated again with new process release (1.1.0) # content: ^[\s\S]+$ # req-Id: tool_req__docs_req_attr_rationale rationale: ^.+$ optional_options: + # req-Id: tool_req__docs_common_attr_security + # TODO: move to mandatory once https://github.com/eclipse-score/process_description/pull/133 is merged security: ^(YES|NO)$ codelink: ^.*$ testlink: ^.*$ @@ -246,8 +258,11 @@ needs_types: id: ^feat_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ content: ^[\s\S]+$ mandatory_links: @@ -272,8 +287,11 @@ needs_types: id: ^comp_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ content: ^[\s\S]+$ mandatory_links: @@ -296,16 +314,18 @@ needs_types: prefix: tool_req__ mandatory_options: id: ^tool_req__[0-9a-z_]*$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ content: ^[\s\S]+$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed - satisfies: ^.*$ + # TODO: make it mandatory + satisfies: ^gd_req__.*$ optional_options: - # req-Id: tool_req__docs_req_attr_reqtype - reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ codelink: ^.*$ tags: ^.*$ testlink: ^.*$ @@ -328,8 +348,11 @@ needs_types: id: ^aou_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ content: ^[\s\S]+$ optional_options: @@ -354,8 +377,11 @@ needs_types: style: card mandatory_options: id: ^feat_arc_sta__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: includes: ^logic_arc_int(_op)*__.+$ @@ -371,8 +397,11 @@ needs_types: style: card mandatory_options: id: ^feat_arc_dyn__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: fulfils: ^feat_req__.+$ @@ -386,8 +415,11 @@ needs_types: style: card mandatory_options: id: ^logic_arc_int__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: includes: ^logic_arc_int_op__.+$ @@ -402,8 +434,11 @@ needs_types: style: card mandatory_options: id: ^logic_arc_int_op__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: included_by: ^logic_arc_int__.+$ @@ -435,8 +470,11 @@ needs_types: style: card mandatory_options: id: ^comp_arc_sta__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: implements: ^real_arc_int(_op)*__.+$ @@ -453,8 +491,11 @@ needs_types: style: card mandatory_options: id: ^comp_arc_dyn__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: fulfils: ^comp_req__.+$ @@ -468,8 +509,11 @@ needs_types: style: card mandatory_options: id: ^real_arc_int__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ language: ^(cpp|rust)$ optional_links: @@ -484,8 +528,11 @@ needs_types: style: card mandatory_options: id: ^real_arc_int_op__[0-9a-z_]+$ + # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: included_by: ^real_arc_int__.+$ From 6290e80b76755e8078184168746d9740bf0e09ef Mon Sep 17 00:00:00 2001 From: Oliver Pajonk Date: Fri, 25 Jul 2025 09:34:36 +0200 Subject: [PATCH 084/231] Update DevContainer Usage (#179) --- .devcontainer/devcontainer.json | 2 +- .vscode/settings.json | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 16419602..c3798082 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,5 +2,5 @@ "name": "eclipse-s-core", "image": "ghcr.io/eclipse-score/devcontainer:latest", "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel", - "postCreateCommand": "bazel run //docs:ide_support" + "updateContentCommand": "bazel run //docs:ide_support" } diff --git a/.vscode/settings.json b/.vscode/settings.json index a0b74b55..dec63f2f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -32,11 +32,6 @@ ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, - "bazel.lsp.command": "bazel", - "bazel.lsp.args": [ - "run", - "//:starpls_server" - ], // Disable internal type checking, since we use basedpyright "python.analysis.typeCheckingMode": "off", From a2d7cad46a52e65097e4ce2a6c2fc70a0a27ea5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 25 Jul 2025 12:10:49 +0200 Subject: [PATCH 085/231] Fix conent checks (#173) * Add prohibited word checks Build in collaboration with @Aymen-Soussi-01 --- docs/product/requirements.rst | 20 +++-- src/extensions/score_metamodel/__init__.py | 48 ++++++++++-- .../checks/attributes_format.py | 71 +++++++----------- .../score_metamodel/checks/check_options.py | 4 +- src/extensions/score_metamodel/metamodel.yaml | 28 ++++++- .../test_attributes_format_description.rst | 38 ---------- .../test_attributes_format_title.rst | 44 ----------- .../rst/attributes/test_prohibited_words.rst | 75 +++++++++++++++++++ .../rst/options/test_options_options.rst | 2 +- 9 files changed, 186 insertions(+), 144 deletions(-) delete mode 100644 src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst delete mode 100644 src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst create mode 100644 src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst diff --git a/docs/product/requirements.rst b/docs/product/requirements.rst index 956e3a62..3035ecc0 100644 --- a/docs/product/requirements.rst +++ b/docs/product/requirements.rst @@ -89,17 +89,14 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Enforces title wording rules :id: tool_req__docs_common_attr_title - :implemented: YES + :implemented: YES :tags: Common Attributes :satisfies: PROCESS_gd_req__req__attr_title :parent_covered: NO: Can not ensure summary - Docs-as-Code shall enforce that Need titles do not contain the following words: - - * shall - * must - * will + Docs-as-Code shall enforce that needs of type :need:`tool_req__docs_req_types` do not have prohibited words + which can be found in the metamodel. --------------------------- @@ -115,6 +112,17 @@ This section provides an overview of current process requirements and their clar Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has a description (content) + +.. tool_req:: Enforces description wording rules + :id: tool_req__docs_common_attr_desc_wording + :tags: Common Attributes + :implemented: YES + :satisfies: PROCESS_gd_req__req__attr_desc_weak + :parent_covered: YES + + + Docs-as-Code shall enforce that Need description do not contain the weak words that are defined in the metamodel + ---------------------------- 🔒 Security Classification ---------------------------- diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 8365e47a..e52ce16f 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -16,10 +16,13 @@ import pkgutil from collections.abc import Callable from pathlib import Path +from dataclasses import dataclass, field +from typing import cast from ruamel.yaml import YAML from sphinx.application import Sphinx from sphinx_needs import logging +from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData from .log import CheckLogger @@ -33,6 +36,20 @@ graph_checks: list[graph_check_function] = [] +@dataclass +class ScoreNeedType(NeedType): + tags: list[str] + + +@dataclass +class ProhibitedWordCheck: + name: str + option_check: dict[str, list[str]] = field( + default_factory=dict + ) # { Option: [Forbidden words]} + types: list[str] = field(default_factory=list) + + def parse_checks_filter(filter: str) -> list[str]: """ Parse the checks filter string into a list of individual checks. @@ -120,6 +137,18 @@ def is_check_enabled(check: local_check_function | graph_check_function): # TODO: exit code +def convert_checks_to_dataclass(checks_dict) -> list[ProhibitedWordCheck]: + prohibited_words_checks = [ + ProhibitedWordCheck( + name=check_name, + option_check={k: v for k, v in check_config.items() if k != "types"}, + types=check_config.get("types", []), + ) + for check_name, check_config in checks_dict.items() + ] + return prohibited_words_checks + + def load_metamodel_data(): """ Load and process metamodel.yaml. @@ -146,8 +175,13 @@ def load_metamodel_data(): global_base_options_optional_opts = global_base_options.get("optional_options", {}) # Get the stop_words and weak_words as separate lists - stop_words_list = global_base_options.get("prohibited_words", {}).get("title", []) - weak_words_list = global_base_options.get("prohibited_words", {}).get("content", []) + proh_checks_dict = data.get("prohibited_words_checks", {}) + prohibited_words_checks = convert_checks_to_dataclass(proh_checks_dict) + + # prohibited_words_checks= [ProhibitedWordCheck(**check) for check in pro_checks.values()] + + # stop_words_list = global_base_options.get("prohibited_words", {}).get("title", []) + # weak_words_list = global_base_options.get("prohibited_words", {}).get("content", []) # Default options by sphinx, sphinx-needs or anything else we need to account for default_options_list = default_options() @@ -212,8 +246,8 @@ def load_metamodel_data(): needs_extra_options = sorted(all_options - set(default_options_list)) return { - "stop_words": stop_words_list, - "weak_words": weak_words_list, + "prohibited_words_checks": prohibited_words_checks, + # "weak_words": weak_words_list, "needs_types": needs_types_list, "needs_extra_links": needs_extra_links_list, "needs_extra_options": needs_extra_options, @@ -298,8 +332,10 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.needs_extra_links = metamodel["needs_extra_links"] app.config.needs_extra_options = metamodel["needs_extra_options"] app.config.graph_checks = metamodel["needs_graph_check"] - app.config.stop_words = metamodel["stop_words"] - app.config.weak_words = metamodel["weak_words"] + app.config.prohibited_words_checks = metamodel["prohibited_words_checks"] + + # app.config.stop_words = metamodel["stop_words"] + # app.config.weak_words = metamodel["weak_words"] # Ensure that 'needs.json' is always build. app.config.needs_build_json = True app.config.needs_reproducible_json = True diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 288d199e..3cd01f98 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -11,12 +11,12 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from score_metamodel import CheckLogger, local_check +from score_metamodel import CheckLogger, local_check, ScoreNeedType, ProhibitedWordCheck from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType -def get_need_type(needs_types: list[str], directive: str) -> str: +def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeedType: for need_type in needs_types: assert isinstance(need_type, dict), need_type if need_type["directive"] == directive: @@ -96,48 +96,33 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): log.warning_for_option(need, "id", msg) -# req-#id: gd_req__requirements_attr_title -@local_check -def check_title(app: Sphinx, need: NeedsInfoType, log: CheckLogger): - """ - Ensures that the requirement Title does not contain stop words. - This helps enforce clear and concise naming conventions. - """ - stop_words = app.config.stop_words - need_options = get_need_type(app.config.needs_types, need["type"]) - - if any( - tag in need_options.get("tags", []) - for tag in ["architecture_element", "requirement"] - ): - for word in stop_words: - if word in need["title"]: - msg = ( - f"contains a stop word: `{word}`. " - "The title is meant to provide a short summary, " - "not to repeat the requirement statement. " - "Please revise the title for clarity and brevity." - ) - log.warning_for_option(need, "title", msg) - break +def _check_options_for_prohibited_words( + prohibited_word_checks: ProhibitedWordCheck, need: NeedsInfoType, log: CheckLogger +): + options: list[str] = [ + x for x in prohibited_word_checks.option_check.keys() if x != "types" + ] + for option in options: + forbidden_words = prohibited_word_checks.option_check[option] + for word in need[option].split(): + if word in forbidden_words: + msg = f"contains a weak word: `{word}` in option: `{option}`. Please revise the wording." + log.warning_for_need(need, msg) # req-#id: gd_req__req__attr_desc_weak +# # req-#id: gd_req__requirements_attr_title @local_check -def check_description(app: Sphinx, need: NeedsInfoType, log: CheckLogger): - """ - Ensures that the requirement Description does not contain weak words. - This helps enforce strong, clear, and unambiguous requirement phrasing - --- - """ - weak_words = app.config.weak_words - if need["type"] in [ - "stkh_req", - "feat_req", - "comp_req", - ] and need.get("content", None): - for word in weak_words: - if word in need["content"]: - msg = f"contains a weak word: `{word}`. Please revise the description." - log.warning_for_option(need, "content", msg) - break +def check_for_prohibited_words(app: Sphinx, need: NeedsInfoType, log: CheckLogger): + need_options = get_need_type(app.config.needs_types, need["type"]) + prohibited_word_checks: list[ProhibitedWordCheck] = ( + app.config.prohibited_words_checks + ) + for check in prohibited_word_checks: + # Check if there are any type restrictions for this check + types_to_check = check.types + if types_to_check: + if any(tag in need_options.get("tags", []) for tag in types_to_check): + _check_options_for_prohibited_words(check, need, log) + else: + _check_options_for_prohibited_words(check, need, log) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index a5a2a66e..aae651c6 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -16,16 +16,16 @@ CheckLogger, default_options, local_check, + ScoreNeedType, ) from sphinx.application import Sphinx -from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType FieldCheck = tuple[dict[str, str], bool] CheckingDictType = dict[str, list[FieldCheck]] -def get_need_type(needs_types: list[NeedType], directive: str) -> NeedType: +def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeedType: for need_type in needs_types: assert isinstance(need_type, dict), need_type if need_type["directive"] == directive: diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index e4c6703c..2716ee61 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -15,14 +15,29 @@ needs_types_base_options: optional_options: source_code_link: ^https://github.com/.* - # Custom semantic validation rules - prohibited_words: - # req-Id: tool_req__docs_common_attr_title + # Custom semantic validation rules + +# Prohibited Word Option Checks +# Follow this schema to write new checks +# check_name: +# types[optional]: +# - List of tag types ('tags' option) +# - ... +# option_to_check: +# - List of words that are forbidden for this option +# - ... +# +prohibited_words_checks: + # req-Id: tool_req__docs_common_attr_title + title_check: title: - shall - must - will - # req-Id: tool_req__docs_common_attr_description + # req-Id: tool_req__docs_common_attr_description + content_check: + types: + - requirement_excl_process content: - just - about @@ -248,6 +263,7 @@ needs_types: hash: ^.*$ tags: - requirement + - requirement_excl_process # req-Id: tool_req__docs_req_types feat_req: @@ -278,6 +294,7 @@ needs_types: hash: ^.*$ tags: - requirement + - requirement_excl_process # req-Id: tool_req__docs_req_types comp_req: @@ -307,6 +324,7 @@ needs_types: hash: ^.*$ tags: - requirement + - requirement_excl_process # req-Id: tool_req__docs_req_types tool_req: @@ -339,6 +357,7 @@ needs_types: parent_has_problem: ^.*$ tags: - requirement + - requirement_excl_process # req-Id: tool_req__docs_req_types aou_req: @@ -367,6 +386,7 @@ needs_types: mitigates: ^.*$ tags: - requirement + - requirement_excl_process # Architecture diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst deleted file mode 100644 index 5ea30b00..00000000 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_description.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2025 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* -#CHECK: check_description - -.. Description contains a weak word -#EXPECT: stkh_req__test__abcd.content (This should really work): contains a weak word: `really`. - -.. stkh_req:: This is a test - :id: stkh_req__test__abcd - - This should really work - -.. Description contains no weak word -#EXPECT-NOT: contains a weak word - -.. stkh_req:: This is a test - :id: stkh_req__test__abce - - This should work - -.. Description of requirement of type std_wp is not checked for weak words -#EXPECT-NOT: contains a weak word - -.. std_wp:: This is a test - :id: stdwp__test__abce - - This should really work diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst deleted file mode 100644 index de3ae097..00000000 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_title.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2025 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* -#CHECK: check_title - -.. Title contains a stop word -#EXPECT: feat_req__test__abcd.title (This must work): contains a stop word: `must`. - -.. feat_req:: This must work - :id: feat_req__test__abcd - -.. Title contains no stop word -#EXPECT-NOT: feat_req__test__abce.title (This is a test): contains a stop word - -.. feat_req:: This is a test - :id: feat_req__test__abce - -.. Title of an architecture element contains a stop word -#EXPECT: logic_arc_int__test__abcd.title (This must work): contains a stop word: `must`. - -.. logic_arc_int:: This must work - :id: logic_arc_int__test__abcd - -.. Title of an architecture element contains no stop word -#EXPECT-NOT: logic_arc_int__test__abce.title (This is a test): contains a stop word - -.. logic_arc_int:: This is a test - :id: logic_arc_int__test__abce - -.. Title of requirement of type std_wp is not checked for stop words -#EXPECT-NOT: std_wp__test__abce.title (This must work): contains a stop word: `must`. - -.. std_wp:: This must work - :id: std_wp__test__abce diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst new file mode 100644 index 00000000..0c7af6a0 --- /dev/null +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst @@ -0,0 +1,75 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* +#CHECK: check_for_prohibited_words + + +.. Title contains a stop word +#EXPECT: feat_req__test__title_bad: contains a weak word: `must` in option: `title`. Please revise the wording. + +.. feat_req:: This must work + :id: feat_req__test__title_bad + + + +.. Title contains no stop word +#EXPECT-NOT: feat_req__test__title_good: contains a weak word: `must` in option: `title`. Please revise the wording. + +.. feat_req:: This is a test + :id: feat_req__test__title_good + + + +.. Title of an architecture element contains a stop word +#EXPECT: stkh_req__test_title_bad: contains a weak word: `must` in option: `title`. Please revise the wording. + +.. stkh_req:: This must work + :id: stkh_req__test_title_bad + + + +#EXPECT-NOT: stkh_req__test_title_good: contains a weak word: `must` in option: `title`. Please revise the wording. + +.. stkh_req:: This is a teset + :id: stkh_req__test_title_good + + + + +.. Description contains a weak word +#EXPECT: stkh_req__test__desc_bad: contains a weak word: `really` in option: `content`. Please revise the wording. + +.. stkh_req:: This is a test + :id: stkh_req__test__desc_bad + + This should really work + + + +.. Description contains no weak word +#EXPECT-NOT: stkh_req__test__desc_good: contains a weak word: `really` in option: `content`. Please revise the wording. + +.. stkh_req:: This is a test + :id: stkh_req__test__desc_good + + This should work + + + +.. Description of requirement of type feat_arc_sta is not checked for weak words +#EXPECT-NOT: feat_arc_sta_desc_good: contains a weak word: `really` in option: `content`. Please revise the wording. + +.. feat_arc_sta:: This is a test + :id: feat_arc_sta_desc_good + + This should really work diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index 931d3266..98449927 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -11,7 +11,7 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -#CHECK: check_options, check_content +#CHECK: check_options .. From 3c7be8ef049e6d9a566c6426ffd8124107b879f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 25 Jul 2025 12:46:16 +0200 Subject: [PATCH 086/231] Improve consumer test (#177) --- src/tests/README.md | 104 ++++++++++++++++ src/tests/conftest.py | 26 ++++ src/tests/test_consumer.py | 243 +++++++++++++++++++++++++++++++------ 3 files changed, 333 insertions(+), 40 deletions(-) create mode 100644 src/tests/README.md create mode 100644 src/tests/conftest.py diff --git a/src/tests/README.md b/src/tests/README.md new file mode 100644 index 00000000..ef68ce41 --- /dev/null +++ b/src/tests/README.md @@ -0,0 +1,104 @@ +# Docs-As-Code Consumer Tests + +This test validates that changes to the docs-as-code system don't break downstream consumers. +It tests both local changes and git-based overrides against real consumer repositories. + +## Use in CI + +If you want to start the consumer tests on a PR inside `docs-as-code`, then all you have to do is comment +`/consumer-test` on the PR and this should trigger them. + +## Quick Start + +```bash +# Create the virtual environment +bazel run //docs:ide_support + +# Run with std. configuration +.venv_docs/bin/python -m pytest -s src/tests + +# Run with more verbose output (up to -vvv) +.venv_docs/bin/python -m pytest -s -v src/tests + +# Run specific repositories only +.venv_docs/bin/python -m pytest -s src/tests --repo=score + +# Disable the persistent cache +.venv_docs/bin/python -m pytest -s src/tests --disable-cache + +# Or combine both options +.venv_docs/bin/python -m pytest -s src/tests --disable-cache --repo=score +``` + +## Verbosity Levels + +The test suite supports different levels of output detail: + +- **No flags**: Basic test results and summary table +- **`-v`**: Shows detailed warnings breakdown by logger type +- **`-vv`**: Adds full stdout/stderr output from build commands +- **`-vvv`**: Streams build output in real-time (useful for debugging hanging builds) + +## Command Line Options + +### `--disable-cache` +Disabled persistent caching for clean testing cycle. + +**What the test normaly do:** +- Uses `~/.cache/docs_as_code_consumer_tests` instead of temporary directories +- Reuses cloned repositories between runs (with git updates) +- Significantly speeds up subsequent test runs + +**This option disables the above mentioned behaviour and clones the repositories fresh** + +**When to use:** During development when you need to ensure testing is done on a fresh env. + +### `--repo` +Filters which repositories to test. + +**Usage:** +```bash +# Test only the 'score' repository +.venv_docs/bin/python -m pytest -s src/tests --repo=score + +# Test multiple repositories +.venv_docs/bin/python -m pytest -s src/tests --repo=score,module_template + +# Invalid repo names fall back to testing all repositories +.venv_docs/bin/python -m pytest -s src/tests --repo=nonexistent +``` + +**Available repositories:** Check `REPOS_TO_TEST` in the test file for current list. + +## Currently tested repositories + +- [score](https://github.com/eclipse-score/score) +- [process_description](https://github.com/eclipse-score/process_description) +- [module_template](https://github.com/eclipse-score/module_template) + +## What Gets Tested + +For each repository, the test: +1. Clones the consumer repository +2. Tests with **local override** (your current changes) +3. Tests with **git override** (current commit from remote) +4. Runs build commands and test commands +5. Analyzes warnings and build success + +## Example Development Workflow + +```bash +# Create the virtual environment +bazel run //docs:ide_support + +# First run - clones everything fresh +.venv_docs/bin/python -m pytest -s -v src/tests --repo=score + +# Make changes to docs-as-code... + +# Subsequent runs - much faster due to caching +.venv_docs/bin/python -m pytest -s -v src/tests --repo=score + +# Final validation - test all repos without cache +.venv_docs/bin/python -m pytest -s -v src/tests --disable-cache +``` diff --git a/src/tests/conftest.py b/src/tests/conftest.py new file mode 100644 index 00000000..5bbd0e4a --- /dev/null +++ b/src/tests/conftest.py @@ -0,0 +1,26 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +def pytest_addoption(parser): + """Add custom command line options to pytest""" + parser.addoption( + "--repo", + action="store", + default=None, + help="Comma separated string of ConsumerRepo's name tests to run", + ) + parser.addoption( + "--disable-cache", + action="store_true", + default=False, + help="Disable local caching", + ) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index 13bc2bba..4c345655 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -13,6 +13,7 @@ import logging import os import re +import argparse import subprocess from collections import defaultdict from dataclasses import dataclass, field @@ -44,12 +45,9 @@ # Max width of the printout len_max = 80 +CACHE_DIR = Path.home() / ".cache" / "docs_as_code_consumer_tests" - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel("DEBUG") - -console = Console(force_terminal=True if os.getenv("CI") else None, width=120) +console = Console(force_terminal=True if os.getenv("CI") else None, width=80) @dataclass @@ -88,8 +86,8 @@ class Result: name="score", git_url="https://github.com/eclipse-score/score.git", commands=[ - "bazel run //docs:incremental_latest", "bazel run //docs:ide_support", + "bazel run //docs:incremental_latest", "bazel run //docs:incremental_release", "bazel build //docs:docs_release", "bazel build //docs:docs_latest", @@ -112,8 +110,19 @@ class Result: @pytest.fixture(scope="session") -def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: - return tmp_path_factory.mktemp("testing_dir") +def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig) -> Path: + """Create base directory for testing - either temporary or persistent cache""" + disable_cache = pytestconfig.getoption("--disable-cache") + + if disable_cache: + # Use persistent cache directory for local development + temp_dir = tmp_path_factory.mktemp("testing_dir") + print(f"[blue]Using temporary directory: {temp_dir}[/blue]") + return temp_dir + else: + CACHE_DIR.mkdir(parents=True, exist_ok=True) + print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]") + return CACHE_DIR def get_current_git_commit(curr_path: Path): @@ -130,6 +139,36 @@ def get_current_git_commit(curr_path: Path): return result.stdout.strip() +def filter_repos(repo_filter: str | None) -> list[ConsumerRepo]: + """Filter repositories based on command line argument""" + if not repo_filter: + return REPOS_TO_TEST + + requested_repos = [name.strip() for name in repo_filter.split(",")] + filtered_repos = [] + + for repo in REPOS_TO_TEST: + if repo.name in requested_repos: + filtered_repos.append(repo) + requested_repos.remove(repo.name) + + # Warn about any repos that weren't found + if requested_repos: + available_names = [repo.name for repo in REPOS_TO_TEST] + print(f"[yellow]Warning: Unknown repositories: {requested_repos}[/yellow]") + print(f"[yellow]Available repositories: {available_names}[/yellow]") + + # If no valid repos were found but filter was provided, return all repos + # This prevents accidentally running zero tests due to typos + if not filtered_repos and repo_filter: + print( + f"[red]No valid repositories found in filter, running all repositories instead[/red]" + ) + return REPOS_TO_TEST + + return filtered_repos + + def replace_bazel_dep_with_local_override(module_content: str) -> str: """ """ @@ -165,19 +204,35 @@ def replace_bazel_dep_with_git_override( return modified_content -def parse_bazel_output(BR: BuildOutput) -> BuildOutput: +def strip_ansi_codes(text: str) -> str: + """Remove ANSI escape sequences from text""" + ansi_escape = re.compile(r"\x1b\[[0-9;]*m") + return ansi_escape.sub("", text) + + +def parse_bazel_output(BR: BuildOutput, pytestconfig) -> BuildOutput: err_lines = BR.stderr.splitlines() split_warnings = [x for x in err_lines if "WARNING: " in x] warning_dict: dict[str, list[str]] = defaultdict(list) + if pytestconfig.get_verbosity() >= 2: + if os.getenv("CI"): + print("[DEBUG] Raw warnings in CI:") + for i, warning in enumerate(split_warnings): + print(f"[DEBUG] Warning {i}: {repr(warning)}") + for raw_warning in split_warnings: + # In the CLI we seem to have some ansi codes in the warnings. Need to strip those + clean_warning = strip_ansi_codes(raw_warning).strip() + logger = "[NO SPECIFIC LOGGER]" - file_and_warning = raw_warning - # If this is the case we have a specific logger => therefore parsing it - if raw_warning.endswith("]"): - tmp_split_warning = raw_warning.split() - logger = tmp_split_warning[-1].upper() # [score_metamodel] - file_and_warning = raw_warning.replace(logger, "").rstrip() + file_and_warning = clean_warning + + if clean_warning.endswith("]"): + tmp_split_warning = clean_warning.split() + logger = tmp_split_warning[-1].upper() + file_and_warning = clean_warning.replace(logger, "").rstrip() + warning_dict[logger].append(file_and_warning) BR.warnings = warning_dict return BR @@ -256,7 +311,6 @@ def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]: Rules: - '[NO SPECIFIC LOGGER]' warnings are always ignored - - '[SCORE_METAMODEL]' warnings are ignored only if metamodel_changed is True """ # Unsure if this is good, as sometimes the returncode is 1 but it should still go through? @@ -332,19 +386,61 @@ def print_result_table(results: list[Result]): print(table) +def stream_subprocess_output(cmd: str, repo_name: str): + """Stream subprocess output in real-time for maximum verbosity""" + print(f"[green]Streaming output for: {cmd}[/green]") + + process = subprocess.Popen( + cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, # Merge stderr into stdout + universal_newlines=True, + bufsize=1, + ) + + # Stream output line by line + output_lines = [] + for line in iter(process.stdout.readline, ""): + if line: + print(line.rstrip()) # Print immediately + output_lines.append(line) + + process.stdout.close() + return_code = process.wait() + + return BuildOutput( + returncode=return_code, + stdout="".join(output_lines), + stderr="", # All merged into stdout + ) + + def run_cmd( cmd: str, results: list[Result], repo_name: str, local_or_git: str, pytestconfig ) -> tuple[list[Result], bool]: - out = subprocess.run(cmd.split(), capture_output=True, text=True) + verbosity = pytestconfig.get_verbosity() + + if verbosity >= 3: + # Level 3 (-vvv): Stream output in real-time + BR = stream_subprocess_output(cmd, repo_name) + else: + # Level 0-2: Capture output and parse later + out = subprocess.run(cmd.split(), capture_output=True, text=True) + BR = BuildOutput( + returncode=out.returncode, + stdout=str(out.stdout), + stderr=str(out.stderr), + ) - BR = BuildOutput( - returncode=out.returncode, - stdout=str(out.stdout), - stderr=str(out.stderr), - ) - BR_parsed = parse_bazel_output(BR) + # Parse warnings (only needed for non-streaming mode) + if verbosity < 3: + BR = parse_bazel_output(BR, pytestconfig) + else: + # For streaming mode, we can't parse warnings from stderr easily + # since everything was merged to stdout and already printed + BR.warnings = {} - is_success, reason = print_final_result(BR_parsed, repo_name, cmd, pytestconfig) + is_success, reason = print_final_result(BR, repo_name, cmd, pytestconfig) results.append( Result( @@ -363,12 +459,18 @@ def run_test_commands(): pass -def setup_test_environment(sphinx_base_dir): +def setup_test_environment(sphinx_base_dir, pytestconfig): """Set up the test environment and return necessary paths and metadata.""" os.chdir(sphinx_base_dir) curr_path = Path(__file__).parent git_root = find_git_root(curr_path) + verbosity = pytestconfig.get_verbosity() + + if verbosity >= 2: + print(f"[DEBUG] curr_path: {curr_path}") + print(f"[DEBUG] git_root: {git_root}") + if git_root is None: assert False, "Git root was none" @@ -376,18 +478,67 @@ def setup_test_environment(sphinx_base_dir): gh_url = get_github_base_url(git_root) current_hash = get_current_git_commit(curr_path) + if verbosity >= 2: + print(f"[DEBUG] gh_url: {gh_url}") + print(f"[DEBUG] current_hash: {current_hash}") + print( + f"[DEBUG] Working directory has uncommitted changes: {has_uncommitted_changes(curr_path)}" + ) + # Create symlink for local docs-as-code docs_as_code_dest = sphinx_base_dir / "docs_as_code" + if docs_as_code_dest.exists() or docs_as_code_dest.is_symlink(): + # Remove existing symlink/directory to recreate it + if docs_as_code_dest.is_symlink(): + docs_as_code_dest.unlink() + if verbosity >= 2: + print(f"[DEBUG] Removed existing symlink: {docs_as_code_dest}") + elif docs_as_code_dest.is_dir(): + import shutil + + shutil.rmtree(docs_as_code_dest) + if verbosity >= 2: + print(f"[DEBUG] Removed existing directory: {docs_as_code_dest}") + docs_as_code_dest.symlink_to(git_root) - return curr_path, git_root, gh_url, current_hash + if verbosity >= 2: + print(f"[DEBUG] Symlink created: {docs_as_code_dest} -> {git_root}") + + return gh_url, current_hash -def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url): +def has_uncommitted_changes(path: Path) -> bool: + """Check if there are uncommitted changes in the git repo.""" + result = subprocess.run( + ["git", "status", "--porcelain"], + capture_output=True, + text=True, + cwd=path, + ) + return bool(result.stdout.strip()) + + +def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url, use_cache=True): """Clone repo and prepare both local and git overrides.""" - # Clone the repository - subprocess.run(["git", "clone", git_url], check=True, capture_output=True) - os.chdir(repo_name) + repo_path = Path(repo_name) + + if not use_cache and repo_path.exists(): + print(f"[green]Using cached repository: {repo_name}[/green]") + # Update the existing repo + os.chdir(repo_name) + subprocess.run(["git", "fetch", "origin"], check=True, capture_output=True) + subprocess.run( + ["git", "reset", "--hard", "origin/main"], check=True, capture_output=True + ) + else: + # Clone the repository fresh + if repo_path.exists(): + import shutil + + shutil.rmtree(repo_path) + subprocess.run(["git", "clone", git_url], check=True, capture_output=True) + os.chdir(repo_name) # Read original MODULE.bazel with open("MODULE.bazel", "r") as f: @@ -404,32 +555,44 @@ def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url): # Updated version of your test loop def test_and_clone_repos_updated(sphinx_base_dir, pytestconfig): - # Setting up the Test Environment + # Get command line options from pytest config + repo_tests = pytestconfig.getoption("--repo") + disable_cache = pytestconfig.getoption("--disable-cache") + + repos_to_test = filter_repos(repo_tests) + + # Exit early if we don't find repos to test. + if not repos_to_test: + print("[red]No repositories to test after filtering![/red]") + return + print( + f"[green]Testing {len(repos_to_test)} repositories: {[r.name for r in repos_to_test]}[/green]" + ) # This might be hacky, but currently the best way I could solve the issue of going to the right place. - curr_path, git_root, gh_url, current_hash = setup_test_environment(sphinx_base_dir) + gh_url, current_hash = setup_test_environment(sphinx_base_dir, pytestconfig) overall_success = True # We capture the results for each command run. results: list[Result] = [] - for repo in REPOS_TO_TEST: - # ╭──────────────────────────────────────╮ + for repo in repos_to_test: + # ┌─────────────────────────────────────────┐ # │ Preparing the Repository for testing │ - # ╰──────────────────────────────────────╯ + # └─────────────────────────────────────────┘ module_local_override, module_git_override = prepare_repo_overrides( - repo.name, repo.git_url, current_hash, gh_url + repo.name, repo.git_url, current_hash, gh_url, use_cache=disable_cache ) overrides = {"local": module_local_override, "git": module_git_override} for type, override_content in overrides.items(): with open("MODULE.bazel", "w") as f: f.write(override_content) - # ╭──────────────────────────────────────╮ + # ┌─────────────────────────────────────────┐ # │ Running the different build & run │ # │ commands │ - # ╰──────────────────────────────────────╯ + # └─────────────────────────────────────────┘ for cmd in repo.commands: print_running_cmd(repo.name, cmd, f"{type.upper()} OVERRIDE") # Running through all 'cmds' specified with the local override @@ -440,9 +603,9 @@ def test_and_clone_repos_updated(sphinx_base_dir, pytestconfig): if not is_success: overall_success = False - # ╭──────────────────────────────────────╮ + # ┌─────────────────────────────────────────┐ # │ Running the different test commands │ - # ╰──────────────────────────────────────╯ + # └─────────────────────────────────────────┘ for test_cmd in repo.test_commands: # Running through all 'test cmds' specified with the local override print_running_cmd(repo.name, test_cmd, "LOCAL OVERRIDE") From 9719be9ca4bb43f9fc716f72da942f86d79fcebf Mon Sep 17 00:00:00 2001 From: Oliver Mueller Date: Mon, 28 Jul 2025 07:28:31 +0200 Subject: [PATCH 087/231] Upgrade bazel version to 8.3.0 (#181) Change-Id: I8bf1c35d8ef65143ce0ad9c3af88927c6635f271 Co-authored-by: Sameer Srivastava --- .bazelversion | 2 +- MODULE.bazel | 2 +- docs/BUILD | 2 +- examples/linking-both/BUILD | 2 +- examples/linking-release/BUILD | 2 +- pyproject.toml | 4 ++-- src/BUILD | 9 ++++++--- 7 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.bazelversion b/.bazelversion index 18bb4182..2bf50aaf 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -7.5.0 +8.3.0 diff --git a/MODULE.bazel b/MODULE.bazel index 000db285..dc1ad0f1 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "0.4.4", + version = "0.5.0", compatibility_level = 0, ) diff --git a/docs/BUILD b/docs/BUILD index 3d553f31..fd68ac80 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -41,7 +41,7 @@ docs( "external_needs_info": [ { "base_url": "https://eclipse-score.github.io/process_description/main", - "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", + "json_path": "/score_process+/process/docs_needs_latest/_build/needs/needs.json", "id_prefix": "process_", }, ], diff --git a/examples/linking-both/BUILD b/examples/linking-both/BUILD index c03e9017..43f998b4 100644 --- a/examples/linking-both/BUILD +++ b/examples/linking-both/BUILD @@ -39,7 +39,7 @@ docs( "external_needs_info": [ { "base_url": "https://eclipse-score.github.io/process_description/main", - "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", + "json_path": "/score_process+/process/docs_needs_latest/_build/needs/needs.json", "id_prefix": "process_", }, ], diff --git a/examples/linking-release/BUILD b/examples/linking-release/BUILD index 33beef3c..0d68b578 100644 --- a/examples/linking-release/BUILD +++ b/examples/linking-release/BUILD @@ -29,7 +29,7 @@ docs( "external_needs_info": [ { "base_url": "https://eclipse-score.github.io/score/main", - "json_path": "/score_process~/process/docs_needs_latest/_build/needs/needs.json", + "json_path": "/score_process+/process/docs_needs_latest/_build/needs/needs.json", "id_prefix": "process_", }, ], diff --git a/pyproject.toml b/pyproject.toml index 35fde2ec..e509ec2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # This file is at the root level, as it applies to all Python code, # not only to docs or to tools. [tool.pyright] -extends = "bazel-bin/docs/ide_support.runfiles/score_python_basics~/pyproject.toml" +extends = "bazel-bin/docs/ide_support.runfiles/score_python_basics+/pyproject.toml" exclude = [ "**/__pycache__", @@ -11,7 +11,7 @@ exclude = [ ] [tool.ruff] -extend = "bazel-bin/docs/ide_support.runfiles/score_python_basics~/pyproject.toml" +extend = "bazel-bin/docs/ide_support.runfiles/score_python_basics+/pyproject.toml" extend-exclude = [ "**/__pycache__", diff --git a/src/BUILD b/src/BUILD index 39be6f7e..4a61e6ad 100644 --- a/src/BUILD +++ b/src/BUILD @@ -108,9 +108,12 @@ py_library( filegroup( name = "docs_assets", - srcs = glob([ - "assets/**/*", - ]), + srcs = glob( + [ + "assets/**/*", + ], + allow_empty = True, + ), visibility = ["//visibility:public"], ) From d8aa801f34f6758d6dd27854c9767b7dc5d8abee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 29 Jul 2025 12:26:34 +0200 Subject: [PATCH 088/231] add FMEA/DFA & Needpie function (#183) --- .../score_metamodel/checks/standards.py | 48 ++++++++++ src/extensions/score_metamodel/metamodel.yaml | 92 ++++++++++--------- .../tests/rst/graph/test_metamodel_graph.rst | 34 +++---- .../rst/options/test_options_options.rst | 39 ++++---- .../score_metamodel/tests/test_standards.py | 81 +++++++++++++++- 5 files changed, 216 insertions(+), 78 deletions(-) diff --git a/src/extensions/score_metamodel/checks/standards.py b/src/extensions/score_metamodel/checks/standards.py index f3f7cc24..c0f6c66f 100644 --- a/src/extensions/score_metamodel/checks/standards.py +++ b/src/extensions/score_metamodel/checks/standards.py @@ -209,6 +209,54 @@ def my_pie_linked_standard_requirements( results.append(cnt_not_connected) +def my_pie_linked_standard_requirements_by_tag( + needs: list[NeedsInfoType], results: list[int], **kwargs: str | int | float +) -> None: + """ + Filter function used for 'needpie' directives. + `Needs` and `results` are automatically passed in by sphinx-needs. + The tag is put into `kwargs`. + + Only one(1) tag is allowed to be passed into this function + + Example usage: + .. needpie:: Linked Requirements ASPICE 4.0 MAN.5 + :labels: Linked, Not Linked + :legend: + :colors: LightSeaGreen, lightgray + :filter-func: score_metamodel.checks.standards.my_pie_linked_standard_requirements_by_tag(aspice40_man5) + + The call: + => score_metamodel.checks.standards.my_pie_linked_standard_requirements_by_tag(aspice40_man5) + would then pass 'aspice40_man5' as the arg1 and you have access to it then that way. + + NOTE:: There can not be any `.`(dots) in the tag passed into this function + + + Return: + The direct return of this function is None. Sphinx-needs will get the mutated `results` + list, and use this to display/generate the piechart. + + """ + count_linked = 0 + count_non_linked = 0 + + tag = str(kwargs["arg1"]) + assert len(kwargs) == 1, ( + "Can only provide one tag to `my_pie_linked_standard_requirements_by_tag`" + ) + + compliance_req_needs = get_compliance_req_needs(needs) + for need in needs: + if tag in need["tags"]: + if need["id"] in compliance_req_needs: + count_linked += 1 + else: + count_non_linked += 1 + results.append(count_linked) + results.append(count_non_linked) + + def my_pie_linked_standard_workproducts( needs: list[NeedsInfoType], results: list[int], **kwargs: str | int | float ) -> None: diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 2716ee61..833ac332 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -621,85 +621,92 @@ needs_types: safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ - # Safety Analysis DFA + # DFA (Dependent Failure Analysis) feat_plat_saf_dfa: - title: DFA + title: Feature Dependent Failure Analysis prefix: feat_plat_saf_dfa__ mandatory_options: id: ^feat_plat_saf_dfa__[0-9a-z_]+$ - violation_id: ^.*$ - violation_cause: ^.*$ + failure_id: ^.*$ + failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ - optional_options: - sufficient: ^(yes|no)$ + content: ^[\s\S]+$ mandatory_links: - mitigates: ^(feat_req__.*|aou_req__.*)$ - verifies: ^feat_arc_sta__[0-9a-z_]*$ - optional_links: + violates: ^feat_arc_sta__[0-9a-z_]+$ + optional_options: mitigation_issue: ^https://github.com/.*$ - + optional_links: + mitigated_by: ^(feat_req__.*|aou_req__.*)$ feat_saf_dfa: - title: DFA + title: Feature Dependent Failure Analysis prefix: feat_saf_dfa__ mandatory_options: id: ^feat_saf_dfa__[0-9a-z_]+$ - violation_id: ^.*$ - violation_cause: ^.*$ + failure_id: ^.*$ + failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ mandatory_links: - mitigates: ^(feat_req__.*|aou_req__.*)$ - verifies: ^feat_arc_sta__[0-9a-z_]*$ - optional_links: + violates: ^feat_arc_sta__[0-9a-z_]+$ + optional_options: mitigation_issue: ^https://github.com/.*$ + optional_links: + mitigated_by: ^(feat_req__.*|aou_req__.*)$ comp_saf_dfa: - title: DFA + title: Component Dependent Failure Analysis prefix: comp_saf_dfa__ mandatory_options: id: ^comp_saf_dfa__[0-9a-z_]+$ - violation_id: ^.*$ - violation_cause: ^.*$ + failure_id: ^.*$ + failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ + optional_options: + mitigation_issue: ^https://github.com/.*$ mandatory_links: - mitigates: ^(comp_req__.*|aou_req__.*)$ - verifies: ^comp_arc_sta__[0-9a-z_]*$ + violates: ^comp_arc_sta__[0-9a-z_]+$ optional_links: - mitigation_issue: ^https://github.com/.*$ + mitigated_by: ^(comp_req__.*|aou_req__.*)$ - # # Safety Analysis FMEA + # FMEA (Failure Mode and Effects Analysis) feat_saf_fmea: - title: FMEA + title: Feature Failure Mode and Effects Analysis prefix: feat_saf_fmea__ mandatory_options: id: ^feat_saf_fmea__[0-9a-z_]+$ - violation_id: ^.*$ - violation_cause: ^.*$ + fault_id: ^.*$ + failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ + optional_options: + mitigation_issue: ^https://github.com/.*$ mandatory_links: - mitigates: ^(feat_req__.*|aou_req__.*)$ - verifies: ^feat_arc_dyn__[0-9a-z_]*$ + violates: ^feat_arc_dyn__[0-9a-z_]+$ optional_links: - mitigation_issue: ^https://github.com/.*$ + mitigated_by: ^(feat_req__.*|aou_req__.*)$ comp_saf_fmea: - title: FMEA + title: Component Failure Mode and Effects Analysis prefix: comp_saf_fmea__ mandatory_options: id: ^comp_saf_fmea__[0-9a-z_]+$ - violation_id: ^.*$ - violation_cause: ^.*$ + fault_id: ^.*$ + failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + content: ^[\s\S]+$ + optional_options: + mitigation_issue: ^https://github.com/.*$ mandatory_links: - mitigates: ^(comp_req__.*|aou_req__.*)$ - verifies: ^comp_arc_dyn__[0-9a-z_]*$ + violates: ^comp_arc_dyn__[0-9a-z_]+$ optional_links: - mitigation_issue: ^https://github.com/.*$ + mitigated_by: ^(comp_req__.*|aou_req__.*)$ # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from @@ -779,13 +786,13 @@ needs_extra_links: incoming: includes outgoing: included by - mitigates: - incoming: mitigated by - outgoing: mitigates + mitigated_by: + incoming: mititages + outgoing: mitigated_by - verifies: - incoming: verified by - outgoing: verifies + violates: + incoming: violated_by + outgoing: violates ############################################################## # Graph Checks # The graph checks focus on the relation of the needs and their attributes. @@ -836,4 +843,5 @@ graph_checks: include: feat_saf_fmea, comp_saf_fmea, feat_plat_saf_dfa, feat_saf_dfa, comp_saf_dfa condition: safety == ASIL_B check: - mitigates: safety != QM + mitigated_by: safety != QM + diff --git a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst index 49c51f07..fd41c82a 100644 --- a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst +++ b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst @@ -80,7 +80,7 @@ :id: feat_saf_dfa__child__10 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__QM + :mitigated_by: feat_req__parent__QM .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. #EXPECT-NOT: feat_saf_dfa__child__11: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. @@ -89,7 +89,7 @@ :id: feat_saf_dfa__child__11 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__ASIL_B + :mitigated_by: feat_req__parent__ASIL_B .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. @@ -99,7 +99,7 @@ :id: comp_saf_dfa__child__13 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__QM + :mitigated_by: feat_req__parent__QM .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. #EXPECT-NOT: comp_saf_dfa__child__14: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. @@ -108,26 +108,26 @@ :id: comp_saf_dfa__child__14 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__ASIL_B + :mitigated_by: feat_req__parent__ASIL_B .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. -#EXPECT: feat_plat_saf_dfa__child__16: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. +#EXPECT: feat_saf_dfa__child__16: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. -.. feat_plat_saf_dfa:: Child requirement 16 - :id: feat_plat_saf_dfa__child__16 +.. feat_saf_dfa:: Child requirement 16 + :id: feat_saf_dfa__child__16 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__QM + :mitigated_by: feat_req__parent__QM .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. -#EXPECT-NOT: feat_plat_saf_dfa__child__17: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: feat_saf_dfa__child__17: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. -.. feat_plat_saf_dfa:: Child requirement 17 - :id: feat_plat_saf_dfa__child__17 +.. feat_saf_dfa:: Child requirement 17 + :id: feat_saf_dfa__child__17 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__ASIL_B + :mitigated_by: feat_req__parent__ASIL_B .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. @@ -137,7 +137,7 @@ :id: feat_saf_fmea__child__19 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__QM + :mitigated_by: feat_req__parent__QM .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. #EXPECT-NOT: feat_saf_fmea__child__20: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. @@ -146,7 +146,7 @@ :id: feat_saf_fmea__child__20 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__ASIL_B + :mitigated_by: feat_req__parent__ASIL_B .. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. @@ -156,7 +156,7 @@ :id: feat_saf_fmea__child__21 :safety: QM :status: valid - :mitigates: feat_req__parent__ASIL_B + :mitigated_by: feat_req__parent__ASIL_B .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. #EXPECT: comp_saf_fmea__child__22: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. @@ -165,7 +165,7 @@ :id: comp_saf_fmea__child__22 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__QM + :mitigated_by: feat_req__parent__QM .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. #EXPECT-NOT: comp_saf_fmea__child__23: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. @@ -174,5 +174,5 @@ :id: comp_saf_fmea__child__23 :safety: ASIL_B :status: valid - :mitigates: feat_req__parent__ASIL_B + :mitigated_by: feat_req__parent__ASIL_B diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index 98449927..a3cd9c07 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -96,24 +96,24 @@ :sufficient: no -#EXPECT: feat_plat_saf_dfa__test__bad_7.sufficient (QM): does not follow pattern `^(yes|no)$`. +#EXPECT: feat_saf_dfa__test__bad_7.sufficient (QM): does not follow pattern `^(yes|no)$`. -.. feat_plat_saf_dfa:: This is a test - :id: feat_plat_saf_dfa__test__bad_7 +.. feat_saf_dfa:: This is a test + :id: feat_saf_dfa__test__bad_7 :sufficient: QM -#EXPECT-NOT: feat_plat_saf_dfa__test__good_8.sufficient (yes): does not follow pattern `^(yes|no)$`. +#EXPECT-NOT: feat_saf_dfa__test__good_8.sufficient (yes): does not follow pattern `^(yes|no)$`. -.. feat_plat_saf_dfa:: This is a test - :id: feat_plat_saf_dfa__test__8 +.. feat_saf_dfa:: This is a test + :id: feat_saf_dfa__test__8 :sufficient: yes -#EXPECT-NOT: feat_plat_saf_dfa__test__good_9.sufficient (no): does not follow pattern `^(yes|no)$`. +#EXPECT-NOT: feat_saf_dfa__test__good_9.sufficient (no): does not follow pattern `^(yes|no)$`. -.. feat_plat_saf_dfa:: This is a test - :id: feat_plat_saf_dfa__test__9 +.. feat_saf_dfa:: This is a test + :id: feat_saf_dfa__test__9 :sufficient: no @@ -176,25 +176,28 @@ +.. + This Test can not be tested at the moment without enabeling that optional checks are also linked. + TODO: Re-enable this check .. Negative Test: Linked to a non-allowed requirement type. -#EXPECT: feat_saf_fmea__child__25.mitigates (['comp_req__child__ASIL_B']): does not follow pattern `^(feat_req__.*|aou_req__.*)$`. - -.. feat_saf_fmea:: Child requirement 25 - :id: feat_saf_fmea__child__25 - :safety: ASIL_B - :status: valid - :mitigates: comp_req__child__ASIL_B +.. #EXPECT: feat_saf_fmea__child__25.mitigated_by (['comp_req__child__ASIL_B']): does not follow pattern `^(feat_req__.*|aou_req__.*)$`. +.. +.. .. feat_saf_fmea:: Child requirement 25 +.. :id: feat_saf_fmea__child__25 +.. :safety: ASIL_B +.. :status: valid +.. :mitigated_by: comp_req__child__ASIL_B .. Negative Test: Linked to a non-allowed requirement type. -#EXPECT: feat_saf_fmea__child__26.verifies (['comp_req__child__ASIL_B']): does not follow pattern `^feat_arc_dyn__[0-9a-z_]*$`. +#EXPECT: feat_saf_fmea__child__26.violates (['comp_req__child__ASIL_B']): does not follow pattern `^feat_arc_dyn__[0-9a-z_]+$`. .. feat_saf_fmea:: Child requirement 26 :id: feat_saf_fmea__child__26 :safety: ASIL_B :status: valid - :verifies: comp_req__child__ASIL_B + :violates: comp_req__child__ASIL_B diff --git a/src/extensions/score_metamodel/tests/test_standards.py b/src/extensions/score_metamodel/tests/test_standards.py index 6cb1438e..670f3cb7 100644 --- a/src/extensions/score_metamodel/tests/test_standards.py +++ b/src/extensions/score_metamodel/tests/test_standards.py @@ -14,7 +14,7 @@ # from unittest.mock import Mock # from sphinx.application import Sphinx - +import pytest from src.extensions.score_metamodel.checks import standards from src.extensions.score_metamodel.tests import need # ,fake_check_logger @@ -872,3 +872,82 @@ def test_get_workproducts(self): assert need_1 in result.values() assert need_2 not in result.values() + + def test_positive_case_mixed_linked_and_unlinked(self): + """Test case where some needs with the tag are linked and some are not.""" + # Setup mock needs data + needs = [ + { + "id": "std_req__aspice40_MAN-5_REQ_001", + "tags": ["aspice40_man5", "other_tag"], + "type": "gd_requirement", + }, + { + "id": "std_req__aspice40_MAN-5_REQ_002", + "tags": ["aspice40_man5"], + "type": "std_req", + }, + { + "id": "std_req__REQ_003_test", + "tags": ["different_tag"], + "type": "std_req", + }, + { + "id": "stkh_req__aspice40_MAN-5_REQ_004", + "tags": ["aspice40_man5"], + "type": "stkh_req", + }, + { + "id": "COMP_001", + "tags": [], + "type": "gd_req", + "complies": [ + "std_req__aspice40_MAN-5_REQ_002", + "std_req__aspice40_MAN-5_REQ_001", + ], + }, + ] + + results = [] + standards.my_pie_linked_standard_requirements_by_tag( + needs, results, arg1="aspice40_man5" + ) + + # Should find 3 needs with tag aspice40_man5 + # REQ_001 and REQ_002 are linked (in compliance), REQ_004 is not linked + assert results == [2, 1] # [count_linked, count_non_linked] + + def test_negative_case_no_needs_with_tag(self): + """Test case where no needs have the specified tag.""" + needs = [ + {"id": "REQ_001", "tags": ["other_tag"], "type": "gd_requirement"}, + {"id": "REQ_002", "tags": ["different_tag"], "type": "gd_process"}, + { + "id": "COMP_001", + "tags": [], + "type": "gd_compliance", + "complies": ["REQ_001"], + }, + ] + + results = [] + standards.my_pie_linked_standard_requirements_by_tag( + needs, results, arg1="nonexistent_tag" + ) + + # No needs found with the tag + assert results == [0, 0] # [count_linked, count_non_linked] + + def test_assert_multiple_kwargs(self): + """Test case that triggers the assertion error for multiple kwargs.""" + needs = [{"id": "REQ_001", "tags": ["test_tag"], "type": "gd_requirement"}] + + results = [] + # Test if our assert works + with pytest.raises( + AssertionError, + match="Can only provide one tag to `my_pie_linked_standard_requirements_by_tag`", + ): + standards.my_pie_linked_standard_requirements_by_tag( + needs, results, arg1="test_tag", arg2="test_test_tag" + ) From 055bf29defb7fb5f50bd0503580ced9ebc6d6ac1 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 29 Jul 2025 13:46:18 +0200 Subject: [PATCH 089/231] Cleanup docs and fix error reporting (#184) * Cleanup docs and fix error reporting --- .gitignore | 4 +- .vscode/settings.json | 35 ++++- BUILD | 1 - MODULE.bazel | 8 +- docs/BUILD | 10 -- docs/how-to-integrate/example/index.rst | 56 -------- .../example/testing/index.rst | 42 ------ docs/how-to-integrate/getting_started.md | 131 ------------------ docs/how-to-integrate/index.rst | 17 --- docs/how-to-use/index.rst | 12 -- docs/how-to/commands.md | 15 ++ docs/{how-to-use => how-to}/faq.md | 30 ++-- docs/how-to/index.rst | 11 ++ docs/how-to/setup.md | 78 +++++++++++ docs/index.rst | 20 +-- docs/internals/benchmark_results.md | 58 ++++++++ .../extensions/data_flow.png | Bin .../extensions/extension_guide.md | 0 .../extensions/header_service.md | 4 +- .../extensions/index.rst | 0 .../extensions/metamodel.md | 0 .../extensions/rst_filebased_testing.md | 0 .../extensions/source_code_linker.md | 0 docs/internals/index.rst | 10 ++ docs/product/index.rst | 60 -------- .../{product => requirements}/capabilities.md | 2 +- docs/requirements/index.rst | 11 ++ .../process_overview.rst | 0 .../requirements.rst | 8 +- examples/README.md | 70 ---------- examples/linking-both/BUILD | 50 ------- examples/linking-both/conf.py | 54 -------- examples/linking-both/index.rst | 52 ------- examples/linking-both/testing/test.rst | 43 ------ examples/linking-latest/BUILD | 39 ------ examples/linking-latest/conf.py | 54 -------- examples/linking-latest/index.rst | 35 ----- examples/linking-release/BUILD | 40 ------ examples/linking-release/conf.py | 54 -------- examples/linking-release/index.rst | 29 ---- examples/simple/BUILD | 59 -------- examples/simple/conf.py | 54 -------- examples/simple/index.rst | 35 ----- src/README.md | 7 +- src/extensions/score_metamodel/__init__.py | 7 +- .../generate_source_code_links_json.py | 4 - src/incremental.py | 3 +- 47 files changed, 257 insertions(+), 1055 deletions(-) delete mode 100644 docs/how-to-integrate/example/index.rst delete mode 100644 docs/how-to-integrate/example/testing/index.rst delete mode 100644 docs/how-to-integrate/getting_started.md delete mode 100644 docs/how-to-integrate/index.rst delete mode 100644 docs/how-to-use/index.rst create mode 100644 docs/how-to/commands.md rename docs/{how-to-use => how-to}/faq.md (75%) create mode 100644 docs/how-to/index.rst create mode 100644 docs/how-to/setup.md create mode 100644 docs/internals/benchmark_results.md rename docs/{product => internals}/extensions/data_flow.png (100%) rename docs/{product => internals}/extensions/extension_guide.md (100%) rename docs/{product => internals}/extensions/header_service.md (95%) rename docs/{product => internals}/extensions/index.rst (100%) rename docs/{product => internals}/extensions/metamodel.md (100%) rename docs/{product => internals}/extensions/rst_filebased_testing.md (100%) rename docs/{product => internals}/extensions/source_code_linker.md (100%) create mode 100644 docs/internals/index.rst delete mode 100644 docs/product/index.rst rename docs/{product => requirements}/capabilities.md (98%) create mode 100644 docs/requirements/index.rst rename docs/{product => requirements}/process_overview.rst (100%) rename docs/{product => requirements}/requirements.rst (99%) delete mode 100644 examples/README.md delete mode 100644 examples/linking-both/BUILD delete mode 100644 examples/linking-both/conf.py delete mode 100644 examples/linking-both/index.rst delete mode 100644 examples/linking-both/testing/test.rst delete mode 100644 examples/linking-latest/BUILD delete mode 100644 examples/linking-latest/conf.py delete mode 100644 examples/linking-latest/index.rst delete mode 100644 examples/linking-release/BUILD delete mode 100644 examples/linking-release/conf.py delete mode 100644 examples/linking-release/index.rst delete mode 100644 examples/simple/BUILD delete mode 100644 examples/simple/conf.py delete mode 100644 examples/simple/index.rst diff --git a/.gitignore b/.gitignore index 9d875ae9..8ec9ef6d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,7 @@ user.bazelrc # Ruff .ruff_cache -# docs:incremental and docs:ide_support build artifacts +# docs build artifacts /_build* # Vale - editorial style guide @@ -20,6 +20,6 @@ styles/ .envrc # Python -.venv_docs +.venv* __pycache__/ /.coverage diff --git a/.vscode/settings.json b/.vscode/settings.json index dec63f2f..477fe16e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -42,5 +42,38 @@ "isosae", "stkh", "workproduct" - ] + ], + + + // Enable port forwarding for preview if working on remote workstation + "remote.autoForwardPorts": true, + "remote.autoForwardPortsSource": "process", + + // Esbonio 0.x (Current) + // see https://github.com/swyddfa/esbonio/blob/0.x/docs/lsp/getting-started.rst + // and https://github.com/swyddfa/esbonio/blob/0.x/docs/lsp/editors/vscode/_configuration.rst + "esbonio.server.pythonPath": "${workspaceFolder}/.venv_docs/bin/python", + "esbonio.sphinx.srcDir": "${workspaceFolder}/docs", + "esbonio.sphinx.confDir": "${workspaceFolder}/docs", + "esbonio.sphinx.buildDir": "${workspaceFolder}/_build", + "esbonio.server.logLevel": "info", + // Do not auto-install. We'll use the one in the venv. + "esbonio.server.installBehavior": "nothing", + // + // + // Esbonio 1.x (Preview) + "esbonio.sphinx.pythonCommand": [ + ".venv_docs/bin/python" + ], + "esbonio.sphinx.buildCommand": [ + "docs", + "_build", + "-T", // show details in case of errors in extensions + "--jobs", + "auto", + "--conf-dir", + "docs" + ], + // default is "error", which doesn't show anything. + "esbonio.logging.level": "warning", } diff --git a/BUILD b/BUILD index 951bdbf3..121235d7 100644 --- a/BUILD +++ b/BUILD @@ -18,7 +18,6 @@ package(default_visibility = ["//visibility:public"]) copyright_checker( name = "copyright", srcs = [ - "examples", "src", "//:BUILD", "//:MODULE.bazel", diff --git a/MODULE.bazel b/MODULE.bazel index dc1ad0f1..2661c653 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -88,13 +88,11 @@ http_file( ) # Provides, pytest & venv -bazel_dep(name = "score_python_basics", version = "0.3.3") +bazel_dep(name = "score_python_basics", version = "0.3.4") # Checker rule for CopyRight checks/fixes bazel_dep(name = "score_cr_checker", version = "0.3.1") - -# This is only needed to build the examples. - -# Grab dash bazel_dep(name = "score_dash_license_checker", version = "0.1.1") + +# docs dependency bazel_dep(name = "score_process", version = "1.0.4") diff --git a/docs/BUILD b/docs/BUILD index fd68ac80..8da3d561 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -23,16 +23,6 @@ load("//:docs.bzl", "docs") docs( conf_dir = "docs", docs_targets = [ - { - "suffix": "latest", # latest main branch documentation build - "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/process_description/main", - "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", - "id_prefix": "process_", - }, - ], - }, { "suffix": "release", # The version imported from MODULE.bazel "target": [ diff --git a/docs/how-to-integrate/example/index.rst b/docs/how-to-integrate/example/index.rst deleted file mode 100644 index ea417124..00000000 --- a/docs/how-to-integrate/example/index.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2024 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - -.. _example: - - -======= -Example -======= - -This is a rendered example of the 'examples/linking-both' folder using the `docs` tool. - -.. stkh_req:: TestTitle - :id: stkh_req__index__test_requirement - :status: valid - :safety: QM - :security: YES - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this - This is a link to an external need inside the 'score' documentation. - :need:`PROCESS_gd_req__req__attr_uid`. - Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ - - - -.. tool_req:: Some Title - :id: tool_req__example__some_title - :security: YES - :safety: ASIL_B - :satisfies: PROCESS_gd_req__req__attr_uid - :status: invalid - - With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (PROCESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value - against the allowed defined regex in the metamodel - Note: The ID is different here as the 'folder structure' is as well - - -.. toctree:: - :maxdepth: 1 - :titlesonly: - - Subfolder example diff --git a/docs/how-to-integrate/example/testing/index.rst b/docs/how-to-integrate/example/testing/index.rst deleted file mode 100644 index fa023543..00000000 --- a/docs/how-to-integrate/example/testing/index.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. # ******************************************************************************* - # Copyright (c) 2025 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - - -Inside a folder -================= -This example will help catch things and bugs when rst's are defined inside a folder. - -.. stkh_req:: TestTitle - :id: stkh_req__testing__test_requirement - :status: valid - :safety: QM - :security: YES - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this. - This is a link to an external need inside the 'score' documentation. - :need:`PROCESS_gd_req__req__attr_uid` - Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ - - -.. tool_req:: Some Title - :id: tool_req__testing__some_title - :security: YES - :safety: ASIL_B - :satisfies: PROCESS_gd_req__req__attr_uid - :status: invalid - - With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (PROCESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value - against the 'allowed' defined regex in the metamodel diff --git a/docs/how-to-integrate/getting_started.md b/docs/how-to-integrate/getting_started.md deleted file mode 100644 index a8ff191d..00000000 --- a/docs/how-to-integrate/getting_started.md +++ /dev/null @@ -1,131 +0,0 @@ -(getting_started)= -# Using Docs-As-Code - - -A Bazel module providing tools and extensions to enable and simplify documentation building via Sphinx - -## Overview - -This module allows you to easily integrate Sphinx documentation generation into your Bazel build system. It provides a collection of utilities and extensions specifically designed to enhance documentation capabilities. - -## Features - -- Seamless integration with Bazel build system -- Custom Sphinx extensions for enhanced documentation -- Support for PlantUML diagrams -- Source code linking capabilities -- Metamodel validation and checks -- Custom layouts and themes -- Header service for consistent documentation styling - -## Getting Started - -### Installation - -Add the module to your `MODULE.bazel` file: - -```starlark -bazel_dep(name = "score_docs_as_code", version = "0.2.7") -``` - -And make sure to also add the S-core Bazel registry to your `.bazelrc` file - -```starlark -common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ -common --registry=https://bcr.bazel.build -``` - -______________________________________________________________________ - -### Basic Usage - -#### 1. Import the `docs()` macro in your BUILD file: - -```python -load("@score_docs_as_code//docs.bzl", "docs") - -docs( - conf_dir = "", - source_dir = "", - docs_targets = [ - { - # For more detailed explanation look at the 'docs_targets' section - "suffix": "", # This creates the normal 'incremental' and 'docs' target - }, - ], - source_files_to_scan_for_needs_links = [ - # Note: you can add file groups, globs, or entire targets here. - "" - ], -) -``` - -#### 2. Adapt your conf.py if needed - -```python -# ... -extensions = [ - "sphinx_design", - "sphinx_needs", - "sphinxcontrib.plantuml", - "score_plantuml", - "score_metamodel", - "score_draw_uml_funcs", - "score_source_code_linker", - "score_layout", -] -# ... -``` - -Make sure that your conf.py imports all of the extensions you want to enable. - - -#### 3. Run a documentation build: - -```bash -bazel run //path/to/BUILD-file:incremental_latest # documentation at '_build/' -bazel build //path/to/BUILD-file:docs_latest # documentation at 'bazel-bin/ -``` - -#### 4. Access your documentation at - -- `_build/` for incremental -- `bazel-bin/bazel-bin//docs/_build/html` - -
-
- -> ### *For the full example as well as more complex ones, check out the {doc}`example ` - ---- - -### Available Targets - -Using the `docs` macro enables multiple targets which are now useable. - -| Target Name | What it does | How to execute | -|---------------|-----------------------------------------------------------|-----------------| -| docs | Builds documentation in sandbox | `bazel build` | -| incremental | Builds documentation incrementally (faster) | `bazel run` | -| live_preview | Creates a live_preview of the documentation viewable in a local server | `bazel run` | -| ide_support | Creates virtual environment under '.venv_docs' | `bazel run` | -| `html` | Filegroup that exposes the generated HTML files | `bazel build //docs:html` | -| `html_files` | Prepares a flattened version of the HTML output for packaging | `bazel build //docs:html_files` | -| `github_pages` | Creates a `.tar` archive from the HTML output (ready for deployment) | `bazel build //docs:github_pages` | - - -> For each entry in `docs_targets`, these targets are suffixed accordingly (e.g. `docs_api`, `html_api`, `github_pages_api`). -______________________________________________________________________ - -## Configuration Options - -The `docs()` macro accepts the following arguments: - -| Parameter | Description | Required | Default | -|-----------|-------------|----------|---------| -| `conf_dir` | Path to the 'conf.py' containing folder | No | 'docs' | -| `source_dir` | Documentation source files (RST, MD) | No | 'docs' | -| `build_dir_for_incremental` | Output folder for the incremental build | No | '\_build' | -| `docs_targets` | List of dictionaries which allows multi-repo setup | Yes | - | -| `source_files_to_scan_for_needs_links` | List of targets,globs,filegroups that the 'source_code_linker' should parse | No | `[]` | -| `visibility` | Bazel visibility | No | `None` | diff --git a/docs/how-to-integrate/index.rst b/docs/how-to-integrate/index.rst deleted file mode 100644 index d88cd3bc..00000000 --- a/docs/how-to-integrate/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _how-to-integrate: - -How to Integrate -================== - - -Here we'll document how to integrate the docs-as-code tooling into your S-CORE repository. - -For now here are some :ref:`example ` files to get you started. - -See also :doc:`getting_started`. - -.. toctree:: - :hidden: - - example/index - getting_started diff --git a/docs/how-to-use/index.rst b/docs/how-to-use/index.rst deleted file mode 100644 index 656b3ce1..00000000 --- a/docs/how-to-use/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. _how-to-use: - -How To Use -========== - -Interesting Links: -* https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html - -.. toctree:: - :hidden: - - faq diff --git a/docs/how-to/commands.md b/docs/how-to/commands.md new file mode 100644 index 00000000..b96fced2 --- /dev/null +++ b/docs/how-to/commands.md @@ -0,0 +1,15 @@ +# Commands + +⚠️ Only valid for docs-as-code v1.x.x. + +| Target | What it does | +| --------------------------- | ---------------------------------------------------------------------- | +| `bazel run //:docs` | Builds documentation | +| `bazel run //:live_preview` | Creates a live_preview of the documentation viewable in a local server | +| `bazel run //:ide_support` | Sets up a Python venv for esbonio (Remember to restart VS Code!) | + +## Internal targets (do not use directly) + +| Target | What it does | +| --------------------------- | --------------------------- | +| `bazel build //:needs_json` | Creates a 'needs.json' file | diff --git a/docs/how-to-use/faq.md b/docs/how-to/faq.md similarity index 75% rename from docs/how-to-use/faq.md rename to docs/how-to/faq.md index eb68a69d..11c2f3b4 100644 --- a/docs/how-to-use/faq.md +++ b/docs/how-to/faq.md @@ -11,7 +11,7 @@ https://www.apache.org/licenses/LICENSE-2.0 SPDX-License-Identifier: Apache-2.0 *************************************************************************** --> -# docs-as-code FAQ +# FAQ *docs-as-code is the S-CORE tool for building documentation, defining requirements and verifying compliance.* @@ -22,39 +22,29 @@ docs-as-code and its usage. ## Why is docs-as-code so slow? - -If you are experiencing slow performance, you might be using the deprecated `docs:docs` -target. Please try one of the following solutions: - - `bazel run //docs:incremental` (typically takes 5-15 seconds per iteration and - provides metamodel warnings on the command line) - - `bazel run //docs:live_preview` (runs continuously in the background and provides - metamodel warnings on the command line) - -Note: In some repositories, you may need to append `_release` to the target name, e.g., -`bazel run //docs:incremental_release`. - - +The performance of docs-as-code has significantly improved in version 1.0.0 and later. ## IDE support (auto completion, metamodel checks, preview, LSP capabilities) -Currently, IDE support for docs-as-code is limited. Improving this is on our roadmap, -but not a primary focus at the moment. **Which might be a major oversight on our side.** +Since 1.0.0 IDE support works via esbonio. You'll need to install the esbonio extension +for your IDE (e.g., VSCode, PyCharm, etc.). Your repository must also be configured for +esbonio (settings.json). -In the meantime, we recommend using the live preview feature: `bazel run -//docs:live_preview`. This provides immediate metamodel feedback (although only on the -console) and IDE-agnostic preview capabilities. +If this does not work, please use the live preview feature: `bazel run //:live_preview`. +This provides immediate metamodel feedback (although only on the console) and +IDE-agnostic preview capabilities. ### Esbonio Known issues: -* Dependencies are not available. We'll address this by dropping support for "latest" - targets and pinning all dependencies to specific versions via Bazel. * Python is required at startup, which is a problem for any Python-based LSP. We are working to improve this by providing a devcontainer with Python preinstalled. Additionally, we have submitted a feature request for Esbonio to handle Python installation. + For now please run `bazel run //:ide_support` and restart VS Code. + ### uBc diff --git a/docs/how-to/index.rst b/docs/how-to/index.rst new file mode 100644 index 00000000..a7c8236f --- /dev/null +++ b/docs/how-to/index.rst @@ -0,0 +1,11 @@ +.. _how-to: + +How To +====== + +.. toctree:: + :maxdepth: 1 + + faq + commands + setup diff --git a/docs/how-to/setup.md b/docs/how-to/setup.md new file mode 100644 index 00000000..b82fd5b2 --- /dev/null +++ b/docs/how-to/setup.md @@ -0,0 +1,78 @@ +(getting_started)= +# Setup + +⚠️ Only valid for docs-as-code v1.x.x. + +## Overview + +docs-as-code allows you to easily integrate Sphinx documentation generation into your +Bazel build system. It provides a collection of utilities and extensions specifically +designed to enhance documentation capabilities in S-CORE. + +## Features + +- Seamless integration with Bazel build system +- S-CORE process compliance +- Support for PlantUML diagrams +- Source code linking capabilities +- S-CORE layouts and themes + +## Installation + +### 1. /MODULE.bazel file + +Add the module to your `MODULE.bazel` file: + +```starlark +bazel_dep(name = "score_docs_as_code", version = "0.2.7") +``` + +And make sure to also add the S-core Bazel registry to your `.bazelrc` file + +```starlark +common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry/main/ +common --registry=https://bcr.bazel.build +``` + +______________________________________________________________________ + +### 2. /BUILD file + + +```starlark +load("@score_docs_as_code//docs.bzl", "docs") + +docs( + source_dir = "", + data = [ + "@other_repo:needs_json", # Optional, if you have dependencies + ], +) +``` + + +#### Configuration Options + +The `docs()` macro accepts the following arguments: + +| Parameter | Description | Required | +|-----------|-------------|----------| +| `source_dir` | Directory of documentation source files (RST, MD) | Yes | +| `data` | List of `needs_json` targets that should be included in the documentation| No | + + +### 3. Copy conf.py + +Copy the `conf.py` file from the `docs-as-code` module to your `source_dir`. + + +#### 4. Run a documentation build: + + +```bash +bazel run //:docs +``` + +#### 5. Access your documentation at + +`/_build/index.html` diff --git a/docs/index.rst b/docs/index.rst index de9ce06c..c00b1d14 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -Project Documentation +Docs-As-Code ===================== Welcome to the docs-as-code documentation, it is organized into several key sections. @@ -22,22 +22,22 @@ Welcome to the docs-as-code documentation, it is organized into several key sect .. grid-item-card:: - **How to Integrate** + :ref:`How to ` ^^^ - Learn how to integrate this docs-as-code tooling into your S-CORE repository. + Learn how to integrate and use docs-as-code. .. grid-item-card:: - How to Use + :ref:`Internals ` ^^^ - Learn how to write documentation using the docs-as-code approach in S-CORE. + How does docs-as-code work? This section provides an overview of the architecture and design decisions behind the tooling. .. grid-item-card:: - :ref:`Product documentation ` + :ref:`Requirements ` ^^^ - Find the product documentation for docs-as-code, including tool requirements and architecture. + The official tool requirements for docs-as-code. .. dropdown:: Sitemap @@ -46,6 +46,6 @@ Welcome to the docs-as-code documentation, it is organized into several key sect :includehidden: :titlesonly: - how-to-integrate/index - how-to-use/index - product/index + how-to/index + internals/index + requirements/index diff --git a/docs/internals/benchmark_results.md b/docs/internals/benchmark_results.md new file mode 100644 index 00000000..0cb03ffb --- /dev/null +++ b/docs/internals/benchmark_results.md @@ -0,0 +1,58 @@ +# Bazel Mini Benchmark Results +**Runs per Scenario:** 3 + + +## 🖥️ Test Machine Specs + +| Component | Details | +|----------------|----------------------| +| **CPU** | i7-13850HX | +| **RAM** | 65GB | +| **Storage** | 1TB ssd | +| **GPU** | NVIDIA RTX A1000 6GB | +| **OS** | WSL2 (on Windows 11) | +| **Bazel** | 7.4.0 | + +## Executed In Repo + +Repository = [process description](https://github.com/eclipse-score/process_description) (Version 06b3c952b) + +## Explanation of terms + +* Ultra Cold Start = `bazel clean --expunge && rm -r _build` +* Cold Start = `bazel clean && rm -r _build` +* Small Change = `one line change in process/index.rst` +* Cached = `no change` + +--- + +## Benchmark 1: `bazel run //process:incremental_latest` + +| Scenario | Run 1 | Run 2 | Run 3 | Average | +|------------------|---------------|---------------|---------------|-----------| +| Ultra Cold Start | 40.285s | 40.226s | 38.568s | **39.693s** | +| Cold Start | 10.760s | 10.317s | 10.573s | **10.550s** | +| Small Change | 6.250s | 5.664s | 5.820s | **5.911s** | +| Cached | 5.403s | 5.396s | 5.348s | **5.382s** | + +--- + +## Benchmark 2: `bazel build //process:docs_needs_latest` + +| Scenario | Run 1 | Run 2 | Run 3 | Average | +|------------------|---------------|---------------|---------------|-----------| +| Ultra Cold Start | 32.961s | 33.461s | 32.613s | **33.012s** | +| Cold Start | 13.704s | 12.909s | 12.902s | **13.172s** | +| Small Change | 3.732s | 3.697s | 3.742s | **3.724s** | +| Cached | 4.916s | 3.686s | 3.678s | **4.093s** | + +--- + +## Benchmark 3: `bazel run //process:live_preview_latest` + +| Scenario | Run 1 | Run 2 | Run 3 | Average | +|------------------|---------------|---------------|---------------|-----------| +| Ultra Cold Start | 29.896s | 31.930s | 28.955s | **30.260s** | +| Cold Start | 8.634s | 8.789s | 8.251s | **8.558s** | +| Small Change | 3.529s | 3.519s | 3.638s | **3.562s** | +| Cached | 3.598s | 3.434s | 3.436s | **3.489s** | diff --git a/docs/product/extensions/data_flow.png b/docs/internals/extensions/data_flow.png similarity index 100% rename from docs/product/extensions/data_flow.png rename to docs/internals/extensions/data_flow.png diff --git a/docs/product/extensions/extension_guide.md b/docs/internals/extensions/extension_guide.md similarity index 100% rename from docs/product/extensions/extension_guide.md rename to docs/internals/extensions/extension_guide.md diff --git a/docs/product/extensions/header_service.md b/docs/internals/extensions/header_service.md similarity index 95% rename from docs/product/extensions/header_service.md rename to docs/internals/extensions/header_service.md index 70f8bbc8..938069de 100644 --- a/docs/product/extensions/header_service.md +++ b/docs/internals/extensions/header_service.md @@ -27,13 +27,13 @@ GITHUB_REPOSITORY: Github repository / ## Execution The document generation has to be executed as follows: - GH_TOKEN=$GH_TOKEN bazel run //docs:incremental + GH_TOKEN=$GH_TOKEN bazel run //:docs Sphinx cannot access the environment variables when started via Bazel build. If extraction method **Merge commit info** is used the document generation can be executed as follows: - bazel run //docs:incremental + bazel run //:docs ## Usage diff --git a/docs/product/extensions/index.rst b/docs/internals/extensions/index.rst similarity index 100% rename from docs/product/extensions/index.rst rename to docs/internals/extensions/index.rst diff --git a/docs/product/extensions/metamodel.md b/docs/internals/extensions/metamodel.md similarity index 100% rename from docs/product/extensions/metamodel.md rename to docs/internals/extensions/metamodel.md diff --git a/docs/product/extensions/rst_filebased_testing.md b/docs/internals/extensions/rst_filebased_testing.md similarity index 100% rename from docs/product/extensions/rst_filebased_testing.md rename to docs/internals/extensions/rst_filebased_testing.md diff --git a/docs/product/extensions/source_code_linker.md b/docs/internals/extensions/source_code_linker.md similarity index 100% rename from docs/product/extensions/source_code_linker.md rename to docs/internals/extensions/source_code_linker.md diff --git a/docs/internals/index.rst b/docs/internals/index.rst new file mode 100644 index 00000000..51fd3ba6 --- /dev/null +++ b/docs/internals/index.rst @@ -0,0 +1,10 @@ +.. _internals: + +Internals +========= + +.. toctree:: + :maxdepth: 1 + + extensions/index + benchmark_results diff --git a/docs/product/index.rst b/docs/product/index.rst deleted file mode 100644 index e6f54bfd..00000000 --- a/docs/product/index.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. # ******************************************************************************* - # Copyright (c) 2025 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - -.. _product: - - -============ -Docs-as-Code -============ - -.. grid:: 1 1 3 3 - :class-container: score-grid - - .. grid-item-card:: - - Features - ^^^ - High level view of docs-as-code :ref:`capabilities `. - - - .. grid-item-card:: - - Requirements - ^^^ - Detailed list of docs-as-code tool :ref:`requirements`. - - - .. grid-item-card:: - - Information about Extensions - ^^^ - Head over to our extensions to learn about what we offer and how to configure,extend or integrate them. - :ref:`See our extensions here ` - - .. grid-item-card:: - - Process requirements overview - ^^^ - See the :ref:`process_overview` table for a process requirements overview. - - -.. toctree:: - :maxdepth: 1 - :caption: Contents: - :hidden: - - capabilities - requirements - extensions/index - process_overview diff --git a/docs/product/capabilities.md b/docs/requirements/capabilities.md similarity index 98% rename from docs/product/capabilities.md rename to docs/requirements/capabilities.md index 528fc06d..c4e4fcf9 100644 --- a/docs/product/capabilities.md +++ b/docs/requirements/capabilities.md @@ -1,6 +1,6 @@ (capabilities)= -# 📘 S-CORE Docs-as-Code – Capabilities +# Capabilities - A High Level Overview This document outlines the key capabilities of the S-CORE docs-as-code tooling. Core capabilities of [Sphinx](https://www.sphinx-doc.org/) and [sphinx-needs](https://sphinx-needs.readthedocs.io/) are assumed and extended with S-CORE-specific conventions and infrastructure. diff --git a/docs/requirements/index.rst b/docs/requirements/index.rst new file mode 100644 index 00000000..1c730e0e --- /dev/null +++ b/docs/requirements/index.rst @@ -0,0 +1,11 @@ +.. _requirements: + +Requirements +============ + +.. toctree:: + :maxdepth: 1 + + capabilities + process_overview + requirements diff --git a/docs/product/process_overview.rst b/docs/requirements/process_overview.rst similarity index 100% rename from docs/product/process_overview.rst rename to docs/requirements/process_overview.rst diff --git a/docs/product/requirements.rst b/docs/requirements/requirements.rst similarity index 99% rename from docs/product/requirements.rst rename to docs/requirements/requirements.rst index 3035ecc0..3fd79994 100644 --- a/docs/product/requirements.rst +++ b/docs/requirements/requirements.rst @@ -1,7 +1,7 @@ -.. _requirements: +.. _tool_requirements: ================================= -Requirements (Process Compliance) +Tool Requirements ================================= 📈 Status @@ -79,7 +79,7 @@ This section provides an overview of current process requirements and their clar * A prefix indicating the need type (e.g. `feature__`) * A middle part matching the hierarchical structure of the need: * For requirements: a portion of the feature tree or a component acronym - * For architecture elements: the structural element (e.g. some part of the feature tree, component acronym) + * For architecture elements: the structural element (e.g. a part of the feature tree, component acronym) * Additional descriptive text to ensure human readability @@ -89,7 +89,7 @@ This section provides an overview of current process requirements and their clar .. tool_req:: Enforces title wording rules :id: tool_req__docs_common_attr_title - :implemented: YES + :implemented: YES :tags: Common Attributes :satisfies: PROCESS_gd_req__req__attr_title :parent_covered: NO: Can not ensure summary diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index f444bd2c..00000000 --- a/examples/README.md +++ /dev/null @@ -1,70 +0,0 @@ -## Examples - -These examples show how to use the 'docs' macro in order to build without outgoing links, establish links to a latest (main branch) or a (module) release. - - -| Folder | Description | -|-----------|-------------| -| `simple` | Build documentation without links to another sphinx documentations | -| `linking-latest` | Build documentation with links to another documentation via URL | -| `linking-release` | Build documentation with links to another documentation via MODULE import | - ---- -In order to enable linking against an imported Modules needs make sure you have imported it into the MODULE.bazel via -`bazel_dep(...)`. - -Then have a look how the BUILD file is setup, and mimic it with the changes needed for your specific case. -Underneath are some explanations regarding the different key-value pairs and their function. - -Here is a more general overview - -```python -load("@score_docs_as_code//docs.bzl", "docs") - -docs( - conf_dir = "", - source_dir = "", - docs_targets = [ - { - "suffix": "", # 'release' for example - "target": [""], # '@score_platform//docs:docs_needs - "external_needs_info": [ - { - "base_url": "", - "json_path/url": "", # local_path OR a URL - "version": "", - }, - ], - }, - ], - source_files_to_scan_for_needs_links = [ - # Note: you can add file groups, globs, or entire targets here. - "" - ], -) -``` - -`docs_targets` is a list of dictionaries, it accepts the following key-value pairs. - -| Parameter | Description | Required | Default | -|-----------|-------------|----------|---------| -| `suffix` | suffix that gets appended to target definitions. E.g. `release` | yes | '' | -| `target` | Target to be build/executed beforehand in order to build 'needs.json'. E.g. `@score_platform//docs:docs_needs` | No | [] | -| `external_needs_info` | List of dictionaries that contains all available builds | yes | - | -| `base_url` | URL of the documentation that external needs of the following json should point to | Yes | - | -| `json_path\json_url` | A local relative path or URL that points to the needs.json file | yes | '' | -| `id_prefix` | prefix that all exeternal ID's from this needs.json will get. Will be in UPPERCASE | No | '' | - -The `external_needs_info` is based on external needs, which can be explored more in detail [here](https://sphinx-needs.readthedocs.io/en/latest/configuration.html#needs-external-needs) - ---- - -The targets available in the examples are -```python -bazel build //examples/linking-release:docs_release -bazel run //examples/linking-release:incremental_release -bazel run //examples/linking-release:livew_preview_release -``` diff --git a/examples/linking-both/BUILD b/examples/linking-both/BUILD deleted file mode 100644 index 43f998b4..00000000 --- a/examples/linking-both/BUILD +++ /dev/null @@ -1,50 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("//:docs.bzl", "docs") - -# Creates all documentation targets: -# - `docs:incremental` for building docs incrementally at runtime -# - `docs:live_preview` for live preview in the browser without an IDE -# - `docs:ide_support` for creating python virtualenv for IDE support -# - `docs:docs` for building documentation at build-time - -docs( - conf_dir = "examples/linking-both", - docs_targets = [ - { - "suffix": "latest", # latest main branch documentation build - "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/process_description/main/", - "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", - "id_prefix": "process_", - }, - ], - }, - { - "suffix": "release", # The version imported from MODULE.bazel - "target": ["@score_process//process:docs_needs_latest"], - "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/process_description/main", - "json_path": "/score_process+/process/docs_needs_latest/_build/needs/needs.json", - "id_prefix": "process_", - }, - ], - }, - ], - source_dir = "examples/linking-both", - source_files_to_scan_for_needs_links = [], -) diff --git a/examples/linking-both/conf.py b/examples/linking-both/conf.py deleted file mode 100644 index 5862fb81..00000000 --- a/examples/linking-both/conf.py +++ /dev/null @@ -1,54 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = "Simple Example Project" -author = "S-CORE" -version = "0.1" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - - -extensions = [ - "sphinx_design", - "sphinx_needs", - "sphinxcontrib.plantuml", - "score_plantuml", - "score_metamodel", - "score_draw_uml_funcs", - "score_source_code_linker", - "score_layout", -] - -exclude_patterns = [ - # The following entries are not required when building the documentation via 'bazel - # build //docs:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //docs:incremental' or esbonio, these - # entries are required to prevent the build from failing. - "bazel-*", - ".venv_docs", -] - -templates_path = ["templates"] - -# Enable numref -numfig = True diff --git a/examples/linking-both/index.rst b/examples/linking-both/index.rst deleted file mode 100644 index b2dbbb76..00000000 --- a/examples/linking-both/index.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2024 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - - -.. toctree:: - :maxdepth: 1 - :glob: - - testing/test - - -Hello World -================= -This is a simple example of a documentation page using the `docs` tool. - -.. stkh_req:: TestTitle - :id: stkh_req__index__test_requirement - :status: valid - :safety: QM - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this - This is a link to an external need inside the 'score' documentation. - :need:`PROCESS_gd_req__req__attr_uid` - Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ - - - -.. tool_req:: Some Title - :id: tool_req__index__some_title - :reqtype: Process - :security: YES - :safety: ASIL_B - :satisfies: PROCESS_gd_req__req__attr_uid - :status: invalid - - With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (PROCESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value - against the allowed defined regex in the metamodel - diff --git a/examples/linking-both/testing/test.rst b/examples/linking-both/testing/test.rst deleted file mode 100644 index e9d0e63a..00000000 --- a/examples/linking-both/testing/test.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2024 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - -Inside a folder -================= -This example will help catch things and bugs when rst's are defined inside a folder. - -.. stkh_req:: TestTitle - :id: stkh_req__testing__test_requirement - :status: valid - :safety: QM - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this. - This is a link to an external need inside the 'score' documentation. - :need:`PROCESS_gd_req__req__attr_uid` - Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ - - -.. tool_req:: Some Title - :id: tool_req__testing__some_title - :reqtype: Process - :security: YES - :safety: ASIL_B - :satisfies: PROCESS_gd_req__req__attr_uid - :status: invalid - - With this requirement we can check if the removal of the prefix is working correctly. - It should remove id_prefix (PRCOESS _) as it's defined inside the BUILD file and remove it before it checks the leftover value - against the 'allowed' defined regex in the metamodel - diff --git a/examples/linking-latest/BUILD b/examples/linking-latest/BUILD deleted file mode 100644 index 8866b0a2..00000000 --- a/examples/linking-latest/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("//:docs.bzl", "docs") - -# Creates all documentation targets: -# - `docs:incremental` for building docs incrementally at runtime -# - `docs:live_preview` for live preview in the browser without an IDE -# - `docs:ide_support` for creating python virtualenv for IDE support -# - `docs:docs` for building documentation at build-time - -docs( - conf_dir = "examples/linking-latest", - docs_targets = [ - { - "suffix": "latest", # latest main branch documentation build - "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/process_description/main/", - "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", - "id_prefix": "process_", - }, - ], - }, - ], - source_dir = "examples/linking-latest", - source_files_to_scan_for_needs_links = [], -) diff --git a/examples/linking-latest/conf.py b/examples/linking-latest/conf.py deleted file mode 100644 index 5862fb81..00000000 --- a/examples/linking-latest/conf.py +++ /dev/null @@ -1,54 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = "Simple Example Project" -author = "S-CORE" -version = "0.1" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - - -extensions = [ - "sphinx_design", - "sphinx_needs", - "sphinxcontrib.plantuml", - "score_plantuml", - "score_metamodel", - "score_draw_uml_funcs", - "score_source_code_linker", - "score_layout", -] - -exclude_patterns = [ - # The following entries are not required when building the documentation via 'bazel - # build //docs:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //docs:incremental' or esbonio, these - # entries are required to prevent the build from failing. - "bazel-*", - ".venv_docs", -] - -templates_path = ["templates"] - -# Enable numref -numfig = True diff --git a/examples/linking-latest/index.rst b/examples/linking-latest/index.rst deleted file mode 100644 index 6213fac5..00000000 --- a/examples/linking-latest/index.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2024 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - -Hello World -================= -This is a simple example of a documentation page using the `docs` tool. - - -Hello World -================= -This is a simple example of a documentation page using the `docs` tool. - -.. stkh_req:: TestTitle - :id: stkh_req__docs__test_requirement - :status: valid - :safety: QM - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this - This is a link to an external need inside the 'score' documentation. - :need:`PROCESS_gd_req__req__attr_uid` - Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ - diff --git a/examples/linking-release/BUILD b/examples/linking-release/BUILD deleted file mode 100644 index 0d68b578..00000000 --- a/examples/linking-release/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -# Creates all documentation targets: -# - `docs:incremental` for building docs incrementally at runtime -# - `docs:live_preview` for live preview in the browser without an IDE -# - `docs:ide_support` for creating python virtualenv for IDE support -# - `docs:docs` for building documentation at build-time - -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("//:docs.bzl", "docs") - -docs( - conf_dir = "examples/linking-release", - docs_targets = [ - { - "suffix": "release", # The version imported from MODULE.bazel - "target": ["@score_process//process:docs_needs_latest"], - "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/score/main", - "json_path": "/score_process+/process/docs_needs_latest/_build/needs/needs.json", - "id_prefix": "process_", - }, - ], - }, - ], - source_dir = "examples/linking-release", - source_files_to_scan_for_needs_links = [], -) diff --git a/examples/linking-release/conf.py b/examples/linking-release/conf.py deleted file mode 100644 index 5862fb81..00000000 --- a/examples/linking-release/conf.py +++ /dev/null @@ -1,54 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = "Simple Example Project" -author = "S-CORE" -version = "0.1" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - - -extensions = [ - "sphinx_design", - "sphinx_needs", - "sphinxcontrib.plantuml", - "score_plantuml", - "score_metamodel", - "score_draw_uml_funcs", - "score_source_code_linker", - "score_layout", -] - -exclude_patterns = [ - # The following entries are not required when building the documentation via 'bazel - # build //docs:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //docs:incremental' or esbonio, these - # entries are required to prevent the build from failing. - "bazel-*", - ".venv_docs", -] - -templates_path = ["templates"] - -# Enable numref -numfig = True diff --git a/examples/linking-release/index.rst b/examples/linking-release/index.rst deleted file mode 100644 index 3cc89afc..00000000 --- a/examples/linking-release/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2024 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - -Hello World -================= -This is a simple example of a documentation page using the `docs` tool. - -.. stkh_req:: TestTitle - :id: stkh_req__docs__test_requirement - :status: valid - :safety: QM - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this - This is a link to an external need inside the 'score' documentation. - :need:`PROCESS_gd_req__req__attr_uid` - Note how it starts with the defined prefix but in UPPERCASE. This comes from sphinx-needs, `see here `_ diff --git a/examples/simple/BUILD b/examples/simple/BUILD deleted file mode 100644 index 651a91cc..00000000 --- a/examples/simple/BUILD +++ /dev/null @@ -1,59 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("//:docs.bzl", "docs") - -# Creates all documentation targets: -# - `docs:incremental` for building docs incrementally at runtime -# - `docs:live_preview` for live preview in the browser without an IDE -# - `docs:ide_support` for creating python virtualenv for IDE support -# - `docs:docs` for building documentation at build-time - -docs( - conf_dir = "examples/simple", - docs_targets = [ - { - "suffix": "", # local without external needs - }, - ], - source_dir = "examples/simple", - source_files_to_scan_for_needs_links = [], -) - -# ╭───────────────────────────────────────╮ -# │ This is commented out until local │ -# │ multi-repo testing is implemented │ -# ╰───────────────────────────────────────╯ - -# { -# "suffix": "release", # The version imported from MODULE.bazel -# "target": ["@score_platform//docs:docs"], -# "external_needs_info": [ -# { -# "base_url": "https://eclipse-score.github.io/score/pr-980/", -# "json_path": "/score_platform~/docs/docs/_build/html/needs.json", -# "version": "0.1", -# }, -# ], -# }, -# { -# "suffix": "latest", # latest main branch documentation build -# "external_needs_info": [ -# { -# "base_url": "https://eclipse-score.github.io/score/main/", -# "json_url": "https://maximiliansoerenpollak.github.io/score/needs.json", -# "version": "0.1", -# }, -# ], -# }, diff --git a/examples/simple/conf.py b/examples/simple/conf.py deleted file mode 100644 index fbef18a8..00000000 --- a/examples/simple/conf.py +++ /dev/null @@ -1,54 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = "Simple Example Project" -author = "S-CORE" -version = "0.1" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - - -extensions = [ - "sphinx_design", - "sphinx_needs", - "sphinxcontrib.plantuml", - "score_plantuml", - "score_metamodel", - "score_draw_uml_funcs", - "score_source_code_linker", - "score_layout", -] - -exclude_patterns = [ - # The following entries are not required when building the documentation via 'bazel - # build //docs:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //docs:incremental' or esbonio, these - # entries are required to prevent the build from failing. - "bazel-*", - ".venv_docs", -] - -templates_path = ["templates"] - -# Enable numref -numfig = True diff --git a/examples/simple/index.rst b/examples/simple/index.rst deleted file mode 100644 index 7856e574..00000000 --- a/examples/simple/index.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - # ******************************************************************************* - # Copyright (c) 2024 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # SPDX-License-Identifier: Apache-2.0 - # ******************************************************************************* - -Hello World -================= -This is a simple example of a documentation page using the `docs` tool. - -.. stkh_req:: TestTitle - :id: stkh_req__docs__test_requirement - :status: valid - :safety: QM - :rationale: A simple requirement we need to enable a documentation build - :reqtype: Functional - - Some content to make sure we also can render this - - - -.. .. std_req:: External Link Test Req -.. :id: std_req__iso26262__testing -.. :status: valid -.. :links: gd_req__dynamic_diagram -.. -.. This is some test content diff --git a/src/README.md b/src/README.md index bad1f6af..2a0ba8a6 100644 --- a/src/README.md +++ b/src/README.md @@ -57,15 +57,14 @@ It should be treated as a 'get-started' guide, giving you all needed information ## score_docs_as_code Directory Architecture ``` -examples/ # Shows how to use doc-as-code tooling src/ ├── assets/ # Documentation styling (CSS) ├── decision_records/ # Architecture Decision Records (ADRs) ├── extensions/ # Custom Sphinx extensions │ └── score_metamodel/ -│ ├── checks/ # Sphinx-needs validation -│ └── tests/ # Extension test suite -└── templates/ # Documentation templates +│ ├── checks/ # Sphinx-needs validation +│ └── tests/ # Extension test suite +└── templates/ # HTML templates ``` diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index e52ce16f..565f11aa 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -15,9 +15,8 @@ import os import pkgutil from collections.abc import Callable -from pathlib import Path from dataclasses import dataclass, field -from typing import cast +from pathlib import Path from ruamel.yaml import YAML from sphinx.application import Sphinx @@ -341,7 +340,7 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.needs_reproducible_json = True app.config.needs_json_remove_defaults = True - app.connect("config-inited", parse_external_needs_sources) + _ = app.connect("config-inited", parse_external_needs_sources) discover_checks() @@ -354,7 +353,7 @@ def setup(app: Sphinx) -> dict[str, str | bool]: ), ) - app.connect("build-finished", _run_checks) + _ = app.connect("build-finished", _run_checks) return { "version": "0.1", diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py index 90cd607e..347d5f36 100644 --- a/src/extensions/score_source_code_linker/generate_source_code_links_json.py +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -176,7 +176,3 @@ def generate_source_code_links_json(search_path: Path, file: Path): # DEBUG: Workspace root is None # DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/sandbox/linux-sandbox/26/execroot/_main # DEBUG: Git root is /home/lla2hi/score/docs-as-code - -# TODO docu: -# docs:docs has no source code links -# external repositories have no source code links (to their code) diff --git a/src/incremental.py b/src/incremental.py index a5e4bdb5..16f18981 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -14,6 +14,7 @@ import argparse import logging import os +import sys from pathlib import Path import debugpy @@ -106,4 +107,4 @@ def get_env(name: str) -> str: ] ) else: - sphinx_main(base_arguments) + sys.exit(sphinx_main(base_arguments)) From 90bd22fd92403e5622925d153332601985d01dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 29 Jul 2025 16:04:40 +0200 Subject: [PATCH 090/231] remove the last "mitigates" (#185) --- src/extensions/score_metamodel/metamodel.yaml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 833ac332..7f08d439 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -382,8 +382,6 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ - optional_links: - mitigates: ^.*$ tags: - requirement - requirement_excl_process @@ -622,11 +620,11 @@ needs_types: status: ^(valid|invalid)$ # DFA (Dependent Failure Analysis) - feat_plat_saf_dfa: + plat_saf_dfa: title: Feature Dependent Failure Analysis - prefix: feat_plat_saf_dfa__ + prefix: plat_saf_dfa__ mandatory_options: - id: ^feat_plat_saf_dfa__[0-9a-z_]+$ + id: ^plat_saf_dfa__[0-9a-z_]+$ failure_id: ^.*$ failure_effect: ^.*$ sufficient: ^(yes|no)$ @@ -840,7 +838,7 @@ graph_checks: # as the corresponding ASIL of the Feature or Component that is analyzed. saf_linkage_safety: needs: - include: feat_saf_fmea, comp_saf_fmea, feat_plat_saf_dfa, feat_saf_dfa, comp_saf_dfa + include: feat_saf_fmea, comp_saf_fmea, plat_saf_dfa, feat_saf_dfa, comp_saf_dfa condition: safety == ASIL_B check: mitigated_by: safety != QM From 28ad4eda5754b8416ac0e9b6fd1d9ad847d12916 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 30 Jul 2025 10:37:20 +0200 Subject: [PATCH 091/231] Upgrade to 1.0.0 (#187) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Major Changes to how docs-as-code is used Co-authored-by: Maximilian Sören Pollak --- .devcontainer/devcontainer.json | 2 +- .github/workflows/consumer_test.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/format.yml | 2 +- .github/workflows/test.yml | 2 +- .vscode/settings.json | 2 +- BUILD | 11 +- MODULE.bazel | 4 +- README.md | 28 +-- docs.bzl | 196 ++++-------------- docs/BUILD | 41 ---- docs/conf.py | 9 +- docs/internals/benchmark_results.md | 6 +- docs/requirements/requirements.rst | 1 - pyproject.toml | 8 +- src/BUILD | 39 +--- src/extensions/score_layout/__init__.py | 10 +- src/extensions/score_metamodel/__init__.py | 21 +- .../score_metamodel/external_needs.py | 189 +++++++++++++++++ .../tests/test_external_needs.py | 72 +++++++ src/extensions/score_plantuml.py | 11 +- .../score_source_code_linker/__init__.py | 6 +- src/find_runfiles/__init__.py | 2 +- src/incremental.py | 15 +- src/requirements.txt | 12 +- src/tests/README.md | 16 +- src/tests/test_consumer.py | 21 +- 27 files changed, 387 insertions(+), 343 deletions(-) delete mode 100644 docs/BUILD create mode 100644 src/extensions/score_metamodel/external_needs.py create mode 100644 src/extensions/score_metamodel/tests/test_external_needs.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c3798082..bf388945 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,5 +2,5 @@ "name": "eclipse-s-core", "image": "ghcr.io/eclipse-score/devcontainer:latest", "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel", - "updateContentCommand": "bazel run //docs:ide_support" + "updateContentCommand": "bazel run //:ide_support" } diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 3295f1b6..08827c75 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -61,7 +61,7 @@ jobs: - name: Run Consumer tests id: consumer_tests run: | - bazel run //docs:ide_support + bazel run //:ide_support .venv_docs/bin/python -m pytest -s -v src/tests/ env: FORCE_COLOR: "1" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 454b37f0..8ffcb83e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -38,5 +38,5 @@ jobs: id-token: write with: - bazel-target: "//docs:incremental_release -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}" + bazel-target: "//:docs -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}" retention-days: 3 diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index a2fd8c00..05664bac 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -37,5 +37,5 @@ jobs: bazelisk-cache: true - name: Run formatting checks run: | - bazel run //docs:ide_support + bazel run //:ide_support bazel test //src:format.check diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e696d567..31ea1171 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -37,5 +37,5 @@ jobs: bazelisk-cache: true - name: Run test targets run: | - bazel run //docs:ide_support + bazel run //:ide_support bazel test //src/... diff --git a/.vscode/settings.json b/.vscode/settings.json index 477fe16e..9d8c9f45 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -68,7 +68,7 @@ "esbonio.sphinx.buildCommand": [ "docs", "_build", - "-T", // show details in case of errors in extensions + "-T", // show more details in case of errors "--jobs", "auto", "--conf-dir", diff --git a/BUILD b/BUILD index 121235d7..cb382a56 100644 --- a/BUILD +++ b/BUILD @@ -12,6 +12,7 @@ # ******************************************************************************* load("@score_cr_checker//:cr_checker.bzl", "copyright_checker") +load("//:docs.bzl", "docs") package(default_visibility = ["//visibility:public"]) @@ -27,7 +28,9 @@ copyright_checker( visibility = ["//visibility:public"], ) -exports_files([ - "MODULE.bazel", - "BUILD", -]) +docs( + data = [ + "@score_process//:needs_json", + ], + source_dir = "docs", +) diff --git a/MODULE.bazel b/MODULE.bazel index 2661c653..3b7236c9 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,8 +13,8 @@ module( name = "score_docs_as_code", - version = "0.5.0", - compatibility_level = 0, + version = "1.0.0-RC1", + compatibility_level = 1, ) ############################################################################### diff --git a/README.md b/README.md index 74b0c9bc..0f2d5762 100644 --- a/README.md +++ b/README.md @@ -2,44 +2,30 @@ Docs-as-code tooling for Eclipse S-CORE -## Overview - -The S-CORE docs Sphinx configuration and build code. +Full documentation is on [GitHub Pages](https://eclipse-score.github.io/docs-as-code/). > [!NOTE] > This repository offers a [DevContainer](https://containers.dev/). > For setting this up read [eclipse-score/devcontainer/README.md#inside-the-container](https://github.com/eclipse-score/devcontainer/blob/main/README.md#inside-the-container). -## Building documentation - -#### Run a documentation build: - -#### Integrate latest score main branch - -```bash -bazel run //docs:incremental_latest -``` - -#### Access your documentation at: - -- `_build/` for incremental +## Development of docs-as-code -#### Getting IDE support +### Getting IDE support for docs-as-code development -Create the virtual environment via `bazel run //docs:ide_support`.\ +Create the virtual environment via `bazel run //:ide_support`. If your IDE does not automatically ask you to activate the newly created environment you can activate it. - In VSCode via `ctrl+p` => `Select Python Interpreter` then select `.venv_docs/bin/python` -- In the terminal via `source .venv_docs/bin/activate` +- In the terminal via `. .venv_docs/bin/activate` -#### Format your documentation with: +### Format your documentation with: ```bash bazel test //src:format.check bazel run //src:format.fix ``` -#### Find & fix missing copyright +### Find & fix missing copyright ```bash bazel run //:copyright-check diff --git a/docs.bzl b/docs.bzl index 8d7cb482..ae2d7205 100644 --- a/docs.bzl +++ b/docs.bzl @@ -46,151 +46,72 @@ load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_python_basics//:defs.bzl", "score_virtualenv") -sphinx_requirements = all_requirements + [ - "@score_docs_as_code//src:plantuml_for_python", - "@score_docs_as_code//src/extensions:score_plantuml", - "@score_docs_as_code//src/find_runfiles:find_runfiles", - "@score_docs_as_code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", - "@score_docs_as_code//src/extensions/score_header_service:score_header_service", - "@score_docs_as_code//src/extensions/score_layout:score_layout", - "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", - "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", -] - -def docs(source_files_to_scan_for_needs_links = None, source_dir = "docs", conf_dir = "docs", build_dir_for_incremental = "_build", docs_targets = [], deps = []): +def docs(source_dir = "docs", data = [], deps = []): """ Creates all targets related to documentation. By using this function, you'll get any and all updates for documentation targets in one place. - Current restrictions: - * only callable from 'docs/BUILD' """ - # We are iterating over all provided 'targets' in order to allow for automatic generation of them without - # needing to modify the underlying 'docs.bzl' file. - for target in docs_targets: - suffix = "_" + target["suffix"] if target["suffix"] else "" - external_needs_deps = target.get("target", []) - external_needs_def = target.get("external_needs_info", []) - - sphinx_build_binary( - name = "sphinx_build" + suffix, - visibility = ["//visibility:public"], - data = ["@score_docs_as_code//src:docs_assets", "@score_docs_as_code//src:docs_as_code_py_modules"] + external_needs_deps, - deps = sphinx_requirements + deps, - ) - _incremental( - incremental_name = "incremental" + suffix, - live_name = "live_preview" + suffix, - conf_dir = conf_dir, - source_dir = source_dir, - build_dir = build_dir_for_incremental, - external_needs_deps = external_needs_deps, - external_needs_def = external_needs_def, - extra_dependencies = deps, - ) - _docs( - name = "docs" + suffix, - suffix = suffix, - format = "html", - external_needs_deps = external_needs_deps, - external_needs_def = external_needs_def, - ) - _docs( - name = "docs_needs" + suffix, - suffix = suffix, - format = "needs", - external_needs_deps = external_needs_deps, - external_needs_def = external_needs_def, - ) - - # Virtual python environment for working on the documentation (esbonio). - # incl. python support when working on conf.py and sphinx extensions. - # creates :ide_support target for virtualenv - _ide_support(deps) - - # creates 'needs.json' build target - -def _incremental(incremental_name = "incremental", live_name = "live_preview", source_dir = "docs", conf_dir = "docs", build_dir = "_build", extra_dependencies = list(), external_needs_deps = list(), external_needs_def = None): - """ - A target for building docs incrementally at runtime, incl live preview. - Args: - source_code_linker: The source code linker target to be used for linking source code to documentation. - source_code_links: The output from the source code linker. - source_dir: Directory containing the source files for documentation. - conf_dir: Directory containing the Sphinx configuration. - build_dir: Directory to output the built documentation. - extra_dependencies: Additional dependencies besides the centrally maintained "sphinx_requirements". - """ - - dependencies = sphinx_requirements + extra_dependencies + ["@rules_python//python/runfiles"] - - # Create description tags for the incremental targets. - call_path = native.package_name() - incremental_tag = "cli_help=Build documentation incrementally:\nbazel run //" + call_path + ":" + incremental_name - - if incremental_name == "incremental_latest": - incremental_tag = ( - "cli_help=Build documentation incrementally (use current main branch of imported docs repositories " + - "(e.g. process_description)):\n" + - "bazel run //" + call_path + ":incremental_latest" - ) - elif incremental_name == "incremental_release": - incremental_tag = ( - "cli_help=Build documentation incrementally (use release version imported in MODULE.bazel):\n" + - "bazel run //" + call_path + ":incremental_release" - ) + data = data + ["@score_docs_as_code//src:docs_assets"] + + deps = deps + all_requirements + [ + "@score_docs_as_code//src:plantuml_for_python", + "@score_docs_as_code//src/extensions:score_plantuml", + "@score_docs_as_code//src/find_runfiles:find_runfiles", + "@score_docs_as_code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", + "@score_docs_as_code//src/extensions/score_header_service:score_header_service", + "@score_docs_as_code//src/extensions/score_layout:score_layout", + "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", + "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", + ] + + sphinx_build_binary( + name = "sphinx_build", + visibility = ["//visibility:private"], + data = data, + deps = deps, + ) py_binary( - name = incremental_name, + name = "docs", + tags = ["cli_help=Build documentation [run]"], srcs = ["@score_docs_as_code//src:incremental.py"], - deps = dependencies, - # TODO: Figure out if we need all dependencies as data here or not. - data = ["@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies + external_needs_deps, + data = data, + deps = deps, env = { "SOURCE_DIRECTORY": source_dir, - "CONF_DIRECTORY": conf_dir, - "BUILD_DIRECTORY": build_dir, - "EXTERNAL_NEEDS_INFO": json.encode(external_needs_def), + "DATA": str(data), "ACTION": "incremental", }, - tags = [incremental_tag], ) py_binary( - name = live_name, + name = "live_preview", + tags = ["cli_help=Live preview documentation in the browser [run]"], srcs = ["@score_docs_as_code//src:incremental.py"], - deps = dependencies, - data = ["@score_docs_as_code//src:plantuml", "@score_docs_as_code//src:docs_assets"] + dependencies + external_needs_deps, + data = data, + deps = deps, env = { "SOURCE_DIRECTORY": source_dir, - "CONF_DIRECTORY": conf_dir, - "BUILD_DIRECTORY": build_dir, - "EXTERNAL_NEEDS_INFO": json.encode(external_needs_def), + "DATA": str(data), "ACTION": "live_preview", }, ) -def _ide_support(extra_dependencies): - call_path = native.package_name() score_virtualenv( name = "ide_support", + tags = ["cli_help=Create virtual environment (.venv_docs) for documentation support [run]"], venv_name = ".venv_docs", - reqs = sphinx_requirements + extra_dependencies, - tags = [ - "cli_help=Create virtual environment for documentation:\n" + - "bazel run //" + call_path + ":ide_support", - ], + reqs = deps, + # Add dependencies to ide_support, so esbonio has access to them. + data = data, ) -def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = list(), external_needs_def = list()): - ext_needs_arg = "--define=external_needs_source=" + json.encode(external_needs_def) - - # Clean suffix used in all generated target names - target_suffix = "" if name == "docs" else "_" + name[len("docs"):] - + # creates 'needs.json' build target sphinx_docs( - name = name, + name = "needs_json", srcs = native.glob([ + # TODO: we do not need images etc to generate the json file. "**/*.png", "**/*.svg", "**/*.md", @@ -206,43 +127,16 @@ def _docs(name = "docs", suffix = "", format = "html", external_needs_deps = lis "**/*.csv", "**/*.inc", ], exclude = ["**/tests/*"], allow_empty = True), - config = ":conf.py", + config = ":" + source_dir + "/conf.py", extra_opts = [ "-W", "--keep-going", - ] + [ext_needs_arg], - formats = [ - format, + "-T", # show more details in case of errors + "--jobs", + "auto", + "--define=external_needs_source=" + str(data), ], - sphinx = ":sphinx_build" + suffix, - tags = [ - "manual", - ], - tools = [ - "@score_docs_as_code//src:plantuml", - "@score_docs_as_code//src:docs_assets", - ] + external_needs_deps, - visibility = ["//visibility:public"], - ) - - native.filegroup( - name = "assets" + target_suffix, - srcs = native.glob(["_assets/**"], allow_empty = True), - visibility = ["//visibility:public"], - ) - - native.filegroup( - name = "html" + target_suffix, - srcs = [":" + name], - visibility = ["//visibility:public"], - ) - - pkg_files( - name = "html_files" + target_suffix, - srcs = [":html" + target_suffix], - ) - - pkg_tar( - name = "github_pages" + target_suffix, - srcs = [":html_files" + target_suffix], + formats = ["needs"], + sphinx = ":sphinx_build", + tools = data, ) diff --git a/docs/BUILD b/docs/BUILD deleted file mode 100644 index 8da3d561..00000000 --- a/docs/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("//:docs.bzl", "docs") - -# Creates all documentation targets: -# - `docs:incremental` for building docs incrementally at runtime -# - `docs:live_preview` for live preview in the browser without an IDE -# - `docs:ide_support` for creating python virtualenv for IDE support -# - `docs:docs` for building documentation at build-time - -docs( - conf_dir = "docs", - docs_targets = [ - { - "suffix": "release", # The version imported from MODULE.bazel - "target": [ - "@score_process//process:docs_needs_latest", - ], - "external_needs_info": [ - { - "base_url": "https://eclipse-score.github.io/process_description/main", - "json_path": "/score_process+/process/docs_needs_latest/_build/needs/needs.json", - "id_prefix": "process_", - }, - ], - }, - ], - source_dir = "docs", -) diff --git a/docs/conf.py b/docs/conf.py index bc919911..96074dce 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -21,6 +21,8 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "Score Docs-as-Code" +project_url = "https://eclipse-score.github.io/docs-as-code/" +project_prefix = "DOCS_" author = "S-CORE" version = "0.1" @@ -44,11 +46,11 @@ exclude_patterns = [ # The following entries are not required when building the documentation via 'bazel - # build //docs:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //docs:incremental' or esbonio, these + # build //:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //:docs' or esbonio, these # entries are required to prevent the build from failing. "bazel-*", - ".venv_docs", + ".venv*", ] # Enable markdown rendering @@ -62,3 +64,4 @@ # Enable numref numfig = True +# needs_builder_filter = "" diff --git a/docs/internals/benchmark_results.md b/docs/internals/benchmark_results.md index 0cb03ffb..adcaa265 100644 --- a/docs/internals/benchmark_results.md +++ b/docs/internals/benchmark_results.md @@ -26,7 +26,7 @@ Repository = [process description](https://github.com/eclipse-score/process_desc --- -## Benchmark 1: `bazel run //process:incremental_latest` +## Benchmark 1: `bazel run //:docs` | Scenario | Run 1 | Run 2 | Run 3 | Average | |------------------|---------------|---------------|---------------|-----------| @@ -37,7 +37,7 @@ Repository = [process description](https://github.com/eclipse-score/process_desc --- -## Benchmark 2: `bazel build //process:docs_needs_latest` +## Benchmark 2: `bazel build //:needs_json` | Scenario | Run 1 | Run 2 | Run 3 | Average | |------------------|---------------|---------------|---------------|-----------| @@ -48,7 +48,7 @@ Repository = [process description](https://github.com/eclipse-score/process_desc --- -## Benchmark 3: `bazel run //process:live_preview_latest` +## Benchmark 3: `bazel run //:live_preview` | Scenario | Run 1 | Run 2 | Run 3 | Average | |------------------|---------------|---------------|---------------|-----------| diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 3fd79994..192d46f7 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -117,7 +117,6 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_desc_wording :tags: Common Attributes :implemented: YES - :satisfies: PROCESS_gd_req__req__attr_desc_weak :parent_covered: YES diff --git a/pyproject.toml b/pyproject.toml index e509ec2e..eae04ccc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,21 @@ # This file is at the root level, as it applies to all Python code, # not only to docs or to tools. [tool.pyright] -extends = "bazel-bin/docs/ide_support.runfiles/score_python_basics+/pyproject.toml" +extends = "bazel-bin/ide_support.runfiles/score_python_basics+/pyproject.toml" exclude = [ "**/__pycache__", "**/.*", "**/bazel-*", - "venv/**", + ".venv*/**", ] [tool.ruff] -extend = "bazel-bin/docs/ide_support.runfiles/score_python_basics+/pyproject.toml" +extend = "bazel-bin/ide_support.runfiles/score_python_basics+/pyproject.toml" extend-exclude = [ "**/__pycache__", "/.*", "bazel-*", - "venv/**", + ".venv*/**", ] diff --git a/src/BUILD b/src/BUILD index 4a61e6ad..2446069d 100644 --- a/src/BUILD +++ b/src/BUILD @@ -55,10 +55,10 @@ py_library( ) # In order to update the requirements, change the `requirements.txt` file and run: -# `bazel run //docs:requirements`. +# `bazel run //src:requirements`. # This will update the `requirements_lock.txt` file. # To upgrade all dependencies to their latest versions, run: -# `bazel run //docs:requirements -- --upgrade`. +# `bazel run //src:requirements -- --upgrade`. compile_pip_requirements( name = "requirements", srcs = [ @@ -71,41 +71,6 @@ compile_pip_requirements( ], ) -filegroup( - name = "html", - srcs = [":docs"], - output_group = "html", -) - -pkg_files( - name = "html_files", - srcs = [":html"], - strip_prefix = "html", - #renames={"html": ""}, -) - -pkg_tar( - name = "github-pages", - srcs = [":html_files"], -) - -# 'source_code_linker' needs all targets to be passed to it. -# This is a convenient gathering of all the 'python internal modules' to avoid writing them individiually -py_library( - name = "docs_as_code_py_modules", - srcs = [ - "@score_docs_as_code//src:plantuml_for_python", - "@score_docs_as_code//src/extensions:score_plantuml", - "@score_docs_as_code//src/extensions/score_draw_uml_funcs", - "@score_docs_as_code//src/extensions/score_header_service", - "@score_docs_as_code//src/extensions/score_layout", - "@score_docs_as_code//src/extensions/score_metamodel", - "@score_docs_as_code//src/extensions/score_source_code_linker", - "@score_docs_as_code//src/find_runfiles", - ], - visibility = ["//visibility:public"], -) - filegroup( name = "docs_assets", srcs = glob( diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 18e36571..3e534819 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -10,13 +10,13 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from typing import Any - -from sphinx.application import Sphinx import os from pathlib import Path +from typing import Any + import html_options import sphinx_options +from sphinx.application import Sphinx def setup(app: Sphinx) -> dict[str, str | bool]: @@ -39,10 +39,10 @@ def update_config(app: Sphinx, _config: Any): # For now this seems the only place this is used / needed. # In the future it might be a good idea to make this available in other places, maybe via the 'find_runfiles' lib if r := os.getenv("RUNFILES_DIR"): - dirs = [str(x) for x in Path(r).glob("*score_docs_as_code~")] + dirs = [str(x) for x in Path(r).glob("*score_docs_as_code+")] if dirs: # Happens if 'score_docs_as_code' is used as Module - p = str(r) + "/score_docs_as_code~/src/assets" + p = str(r) + "/score_docs_as_code+/src/assets" else: # Only happens in 'score_docs_as_code' repository p = str(r) + "/_main/src/assets" diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 565f11aa..210cd9fe 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -24,6 +24,7 @@ from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData +from .external_needs import connect_external_needs from .log import CheckLogger logger = logging.get_logger(__name__) @@ -299,24 +300,6 @@ def default_options() -> list[str]: ] -def parse_external_needs_sources(app: Sphinx, config): - # HACK: maybe there is a nicer way for this - if app.config.external_needs_source not in ["[]", ""]: - x = None - # NOTE: Due to upgrades in modules, encoding changed. Need to clean string in order to read it right again. - clean_str = app.config.external_needs_source.replace('\\"', "") - x = json.loads(clean_str) - if r := os.getenv("RUNFILES_DIR"): - if x[0].get("json_path", None): - for a in x: - # This is needed to allow for the needs.json to be found locally - if "json_path" in a.keys(): - a["json_path"] = r + a["json_path"] - app.config.needs_external_needs = x - # Making the prefixes uppercase here to match sphinx_needs, as it does this internally too. - app.config.allowed_external_prefixes = [z["id_prefix"].upper() for z in x] - - def setup(app: Sphinx) -> dict[str, str | bool]: app.add_config_value("external_needs_source", "", rebuild="env") app.add_config_value("allowed_external_prefixes", [], rebuild="env") @@ -340,7 +323,7 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.needs_reproducible_json = True app.config.needs_json_remove_defaults = True - _ = app.connect("config-inited", parse_external_needs_sources) + _ = app.connect("config-inited", connect_external_needs) discover_checks() diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py new file mode 100644 index 00000000..d7a9de90 --- /dev/null +++ b/src/extensions/score_metamodel/external_needs.py @@ -0,0 +1,189 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +import json +import os +import subprocess +import sys +from dataclasses import dataclass +from pathlib import Path + +from sphinx.application import Sphinx +from sphinx.config import Config +from sphinx.util import logging +from sphinx_needs.needsfile import NeedsList + +logger = logging.getLogger(__name__) + + +@dataclass +class ExternalNeedsSource: + bazel_module: str + path_to_target: str + target: str + + +def _parse_bazel_external_need(s: str) -> ExternalNeedsSource | None: + if not s.startswith("@"): + # Local need, not external needs + return None + + if "//" not in s or ":" not in s: + raise ValueError( + f"Unsuported external data dependency: '{s}'. Must contain '//' & ':'" + ) + repo_and_path, target = s.split( + ":", 1 + ) # @score_process//:needs_json => [@score_process//, needs_json] + repo, path_to_target = repo_and_path.split("//", 1) + repo = repo.lstrip("@") + + if path_to_target == "" and target == "needs_json": + return ExternalNeedsSource( + bazel_module=repo, path_to_target=path_to_target, target=target + ) + else: + # Unknown data target. Probably not a needs.json file. + return None + + +def parse_external_needs_sources_from_DATA(v: str) -> list[ExternalNeedsSource]: + if v in ["[]", ""]: + return [] + + logger.debug(f"Parsing external needs sources: {v}") + data = json.loads(v) + + res = [res for el in data if (res := _parse_bazel_external_need(el))] + logger.debug(f"Parsed external needs sources: {res}") + return res + + +def parse_external_needs_sources_from_bazel_query() -> list[ExternalNeedsSource]: + """ + This function detects if the Sphinx app is running without Bazel and sets the + `external_needs_source` config value accordingly. + + When running with Bazel, we pass the `external_needs_source` config value + from the bazel config. + """ + + logger.debug("Detected execution without Bazel. Fetching external needs config...") + # Currently dependencies are stored in the top level BUILD file. + # We could parse it or query bazel. + # Parsing would be MUCH faster, but querying bazel would be more robust. + p = subprocess.run( + ["bazel", "query", "labels(data, //:docs)"], + check=True, + capture_output=True, + text=True, + ) + res = [ + res + for line in p.stdout.splitlines() + if line.strip() + if (res := _parse_bazel_external_need(line)) + ] + logger.debug(f"Parsed external needs sources: {res}") + return res + + +def extend_needs_json_exporter(config: Config, params: list[str]) -> None: + """ + This will add each param to app.config as a config value. + Then it will overwrite the needs.json exporter to include these values. + """ + + for p in params: + # Note: we are currently addinig these values to config after config-inited. + # This is wrong. But good enough. + config.add(p, default="", rebuild="env", types=(), description="") + + if not getattr(config, p): + logger.error( + f"Config value '{p}' is not set. " + + "Please set it in your Sphinx config." + ) + + # Patch json exporter to include our custom fields + # Note: yeah, NeedsList is the json exporter! + orig_function = NeedsList._finalise # pyright: ignore[reportPrivateUsage] + + def temp(self: NeedsList): + for p in params: + self.needs_list[p] = getattr(config, p) # pyright: ignore[reportUnknownMemberType] + + orig_function(self) + + NeedsList._finalise = temp # pyright: ignore[reportPrivateUsage] + + +def connect_external_needs(app: Sphinx, config: Config): + extend_needs_json_exporter(config, ["project_url", "project_prefix"]) + + bazel = app.config.external_needs_source or os.getenv("RUNFILES_DIR") + + if bazel: + external_needs = parse_external_needs_sources_from_DATA( + app.config.external_needs_source + ) # pyright: ignore[reportAny] + else: + external_needs = parse_external_needs_sources_from_bazel_query() # pyright: ignore[reportAny] + + for e in external_needs: + assert not e.path_to_target # path_to_target is always empty + json_file = f"{e.bazel_module}+/{e.target}/_build/needs/needs.json" + if r := os.getenv("RUNFILES_DIR"): + logger.debug("Using runfiles to determine external needs JSON file.") + fixed_json_file = Path(r) / json_file + else: + logger.debug( + "Running outside bazel. Determining git root for external needs JSON file." + ) + git_root = Path.cwd().resolve() + while not (git_root / ".git").exists(): + git_root = git_root.parent + if git_root == Path("/"): + sys.exit("Could not find git root.") + logger.debug(f"Git root found: {git_root}") + fixed_json_file = ( + git_root / "bazel-bin" / "ide_support.runfiles" / json_file + ) + + logger.debug(f"Fixed JSON file path: {json_file} -> {fixed_json_file}") + json_file = fixed_json_file + + try: + needs_json_data = json.loads(Path(json_file).read_text(encoding="utf-8")) # pyright: ignore[reportAny] + except FileNotFoundError: + logger.error( + f"Could not find external needs JSON file at {json_file}. " + + "Something went terribly wrong. " + + "Try running `bazel clean --async && rm -rf _build`." + ) + continue + + assert isinstance(app.config.needs_external_needs, list) # pyright: ignore[reportUnknownMemberType] + app.config.needs_external_needs.append( # pyright: ignore[reportUnknownMemberType] + { + "id_prefix": needs_json_data["project_prefix"], + "base_url": needs_json_data["project_url"] + + "/main", # for now always "main" + "json_path": json_file, + } + ) + # Making the prefixes uppercase here to match sphinx_needs, as it does this internally too. + assert isinstance(app.config.allowed_external_prefixes, list) # pyright: ignore[reportAny] + app.config.allowed_external_prefixes.append( # pyright: ignore[reportUnknownMemberType] + needs_json_data["project_prefix"].upper() # pyright: ignore[reportAny] + ) diff --git a/src/extensions/score_metamodel/tests/test_external_needs.py b/src/extensions/score_metamodel/tests/test_external_needs.py new file mode 100644 index 00000000..d94d19a4 --- /dev/null +++ b/src/extensions/score_metamodel/tests/test_external_needs.py @@ -0,0 +1,72 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import pytest + +from ..external_needs import ExternalNeedsSource, parse_external_needs_sources_from_DATA + + +def test_empty_list(): + assert parse_external_needs_sources_from_DATA("[]") == [] + + +def test_single_entry_with_path(): + result = parse_external_needs_sources_from_DATA('["@repo//foo/bar:baz"]') + # IF a target has a path, it will not be reported as external needs + assert result == [] + + +def test_single_entry_no_path(): + result = parse_external_needs_sources_from_DATA('["@repo//:target"]') + # If a target is not named "needs_json", it will not be reported as external needs + assert result == [] + + +def test_single_entry_json_no_path(): + result = parse_external_needs_sources_from_DATA('["@repo//:needs_json"]') + assert result == [ + ExternalNeedsSource(bazel_module="repo", path_to_target="", target="needs_json") + ] + + +def test_multiple_entries(): + result = parse_external_needs_sources_from_DATA( + '["@repo1//:needs_json", "@repo2//:needs_json"]' + ) + assert result == [ + ExternalNeedsSource( + bazel_module="repo1", path_to_target="", target="needs_json" + ), + ExternalNeedsSource( + bazel_module="repo2", path_to_target="", target="needs_json" + ), + ] + + +def test_multiple_entries_2(): + result = parse_external_needs_sources_from_DATA( + '["@repo1//:needs_json", "@repo2//path:needs_json"]' + ) + + assert result == [ + ExternalNeedsSource( + bazel_module="repo1", path_to_target="", target="needs_json" + ) + ] + + +def test_invalid_entry(): + with pytest.raises(ValueError): + _ = parse_external_needs_sources_from_DATA('["@not_a_valid_string"]') + + +def test_parser(): ... diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index 1690315a..b360dfb2 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -24,7 +24,6 @@ In addition it sets common PlantUML options, like output to svg_obj. """ -from gettext import find import os import sys from pathlib import Path @@ -39,14 +38,14 @@ def get_runfiles_dir() -> Path: if r := os.getenv("RUNFILES_DIR"): # Runfiles are only available when running in Bazel. # bazel build and bazel run are both supported. - # i.e. `bazel build //docs:docs` and `bazel run //docs:incremental`. + # i.e. `bazel build //:docs` and `bazel run //:docs`. logger.debug("Using runfiles to determine plantuml path.") runfiles_dir = Path(r) else: # The only way to land here is when running from within the virtual - # environment created by the `docs:ide_support` rule in the BUILD file. + # environment created by the `:ide_support` rule in the BUILD file. # i.e. esbonio or manual sphinx-build execution within the virtual # environment. # We'll still use the plantuml binary from the bazel build. @@ -59,7 +58,7 @@ def get_runfiles_dir() -> Path: if git_root == Path("/"): sys.exit("Could not find git root.") - runfiles_dir = git_root / "bazel-bin" / "docs" / "ide_support.runfiles" + runfiles_dir = git_root / "bazel-bin" / "ide_support.runfiles" if not runfiles_dir.exists(): sys.exit( @@ -73,10 +72,10 @@ def find_correct_path(runfiles: str) -> str: """ This ensures that the 'plantuml' binary path is found in local 'score_docs_as_code' and module use. """ - dirs = [str(x) for x in Path(runfiles).glob("*score_docs_as_code~")] + dirs = [str(x) for x in Path(runfiles).glob("*score_docs_as_code+")] if dirs: # Happens if 'score_docs_as_code' is used as Module - p = runfiles + "/score_docs_as_code~/src/plantuml" + p = runfiles + "/score_docs_as_code+/src/plantuml" else: # Only happens in 'score_docs_as_code' repository p = runfiles + "/../plantuml" diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 3ea1d3b3..3a4be37a 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -22,8 +22,8 @@ from typing import cast from sphinx.application import Sphinx -from sphinx.environment import BuildEnvironment from sphinx.config import Config +from sphinx.environment import BuildEnvironment from sphinx_needs.data import NeedsInfoType, NeedsMutable, SphinxNeedsData from sphinx_needs.logging import get_logger @@ -33,9 +33,9 @@ generate_source_code_links_json, ) from src.extensions.score_source_code_linker.needlinks import ( + DefaultNeedLink, NeedLink, load_source_code_links_json, - DefaultNeedLink, ) LOGGER = get_logger(__name__) @@ -105,7 +105,7 @@ def setup_once(app: Sphinx, config: Config): def setup(app: Sphinx) -> dict[str, str | bool]: # Esbonio will execute setup() on every iteration. # setup_once will only be called once. - app.connect("config-inited", setup_once) + setup_once(app, app.config) return { "version": "0.1", diff --git a/src/find_runfiles/__init__.py b/src/find_runfiles/__init__.py index a1aec645..c40ea977 100644 --- a/src/find_runfiles/__init__.py +++ b/src/find_runfiles/__init__.py @@ -60,7 +60,7 @@ def get_runfiles_dir_impl( if env_runfiles: # Runfiles are only available when running in Bazel. # bazel build and bazel run are both supported. - # i.e. `bazel build //docs:docs` and `bazel run //docs:incremental`. + # i.e. `bazel build //:docs` and `bazel run //:docs`. _log_debug("Using env[runfiles] to find the runfiles...") if env_runfiles.is_absolute(): diff --git a/src/incremental.py b/src/incremental.py index 16f18981..857b6d98 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -33,8 +33,8 @@ def get_env(name: str) -> str: if __name__ == "__main__": - # Add debuging functionality parser = argparse.ArgumentParser() + # Add debuging functionality parser.add_argument( "-dp", "--debug_port", help="port to listen to debugging client", default=5678 ) @@ -65,8 +65,6 @@ def get_env(name: str) -> str: debugpy.wait_for_client() workspace = os.getenv("BUILD_WORKSPACE_DIRECTORY") - # if workspace: - # os.chdir(workspace) if workspace: workspace += "/" else: @@ -74,15 +72,13 @@ def get_env(name: str) -> str: base_arguments = [ workspace + get_env("SOURCE_DIRECTORY"), - workspace + get_env("BUILD_DIRECTORY"), + workspace + "_build", "-W", # treat warning as errors "--keep-going", # do not abort after one error "-T", # show details in case of errors in extensions "--jobs", "auto", - "--conf-dir", - workspace + get_env("CONF_DIRECTORY"), - f"--define=external_needs_source={get_env('EXTERNAL_NEEDS_INFO')}", + f"--define=external_needs_source={get_env('DATA')}", ] # configure sphinx build with GitHub user and repo from CLI @@ -94,9 +90,8 @@ def get_env(name: str) -> str: action = get_env("ACTION") if action == "live_preview": - build_dir = Path(get_env("BUILD_DIRECTORY")) - (workspace / build_dir / "score_source_code_linker_cache.json").unlink( - missing_ok=False + Path(workspace + "/_build/score_source_code_linker_cache.json").unlink( + missing_ok=True ) sphinx_autobuild_main( base_arguments diff --git a/src/requirements.txt b/src/requirements.txt index 3bfc13bb..a179bd7b 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -37,7 +37,7 @@ babel==2.17.0 \ basedpyright==1.29.2 \ --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d - # via -r external/score_python_basics~/requirements.txt + # via -r external/score_python_basics+/requirements.txt beautifulsoup4==4.13.4 \ --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 @@ -438,7 +438,7 @@ iniconfig==2.1.0 \ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 # via - # -r external/score_python_basics~/requirements.txt + # -r external/score_python_basics+/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ @@ -669,7 +669,7 @@ nodejs-wheel-binaries==22.16.0 \ --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e # via - # -r external/score_python_basics~/requirements.txt + # -r external/score_python_basics+/requirements.txt # basedpyright numpy==2.2.5 \ --hash=sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70 \ @@ -734,7 +734,7 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via - # -r external/score_python_basics~/requirements.txt + # -r external/score_python_basics+/requirements.txt # matplotlib # pytest # sphinx @@ -829,7 +829,7 @@ pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via - # -r external/score_python_basics~/requirements.txt + # -r external/score_python_basics+/requirements.txt # pytest pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -882,7 +882,7 @@ pyspellchecker==0.8.2 \ pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r external/score_python_basics~/requirements.txt + # via -r external/score_python_basics+/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 diff --git a/src/tests/README.md b/src/tests/README.md index ef68ce41..66dcfaab 100644 --- a/src/tests/README.md +++ b/src/tests/README.md @@ -1,24 +1,24 @@ # Docs-As-Code Consumer Tests -This test validates that changes to the docs-as-code system don't break downstream consumers. +This test validates that changes to the docs-as-code system don't break downstream consumers. It tests both local changes and git-based overrides against real consumer repositories. ## Use in CI -If you want to start the consumer tests on a PR inside `docs-as-code`, then all you have to do is comment +If you want to start the consumer tests on a PR inside `docs-as-code`, then all you have to do is comment `/consumer-test` on the PR and this should trigger them. ## Quick Start ```bash # Create the virtual environment -bazel run //docs:ide_support +bazel run //:ide_support # Run with std. configuration .venv_docs/bin/python -m pytest -s src/tests # Run with more verbose output (up to -vvv) -.venv_docs/bin/python -m pytest -s -v src/tests +.venv_docs/bin/python -m pytest -s -v src/tests # Run specific repositories only .venv_docs/bin/python -m pytest -s src/tests --repo=score @@ -89,16 +89,16 @@ For each repository, the test: ```bash # Create the virtual environment -bazel run //docs:ide_support +bazel run //:ide_support # First run - clones everything fresh -.venv_docs/bin/python -m pytest -s -v src/tests --repo=score +.venv_docs/bin/python -m pytest -s -v src/tests --repo=score # Make changes to docs-as-code... # Subsequent runs - much faster due to caching -.venv_docs/bin/python -m pytest -s -v src/tests --repo=score +.venv_docs/bin/python -m pytest -s -v src/tests --repo=score # Final validation - test all repos without cache -.venv_docs/bin/python -m pytest -s -v src/tests --disable-cache +.venv_docs/bin/python -m pytest -s -v src/tests --disable-cache ``` diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index 4c345655..c0fb091b 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -10,10 +10,8 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -import logging import os import re -import argparse import subprocess from collections import defaultdict from dataclasses import dataclass, field @@ -39,11 +37,12 @@ - The script itself takes quiet a bit of time, roughly 5+ min for a full run. - If you need more output, enable it via `-v` or `-vv` - Start the script via the following command: - - bazel run //docs:ide_support + - bazel run //:ide_support - .venv_docs/bin/python -m pytest -s src/tests (If you need more verbosity add `-v` or `-vv`) """ # Max width of the printout +# Trial and error has shown that 80 the best value is for GH CI output len_max = 80 CACHE_DIR = Path.home() / ".cache" / "docs_as_code_consumer_tests" @@ -79,18 +78,16 @@ class Result: ConsumerRepo( name="process_description", git_url="https://github.com/eclipse-score/process_description.git", - commands=["bazel run //process:incremental_latest"], + commands=["bazel run //:docs"], test_commands=[], ), ConsumerRepo( name="score", git_url="https://github.com/eclipse-score/score.git", commands=[ - "bazel run //docs:ide_support", - "bazel run //docs:incremental_latest", - "bazel run //docs:incremental_release", - "bazel build //docs:docs_release", - "bazel build //docs:docs_latest", + "bazel run //:ide_support", + "bazel run //:docs", + "bazel build //:needs_json", ], test_commands=[], ), @@ -98,9 +95,9 @@ class Result: name="module_template", git_url="https://github.com/eclipse-score/module_template.git", commands=[ - "bazel run //docs:ide_support", - "bazel run //docs:incremental", - "bazel build //docs:docs", + "bazel run //:ide_support", + "bazel run //:docs", + "bazel build //:needs_json", ], test_commands=[ "bazel test //tests/...", From 89642464f4e7da7cd7f31e155a230c689d77d0c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 30 Jul 2025 13:48:15 +0200 Subject: [PATCH 092/231] Prepare for release (#190) --- MODULE.bazel | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 3b7236c9..465d4cb0 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.0.0-RC1", + version = "1.0.0", compatibility_level = 1, ) @@ -95,4 +95,4 @@ bazel_dep(name = "score_cr_checker", version = "0.3.1") bazel_dep(name = "score_dash_license_checker", version = "0.1.1") # docs dependency -bazel_dep(name = "score_process", version = "1.0.4") +bazel_dep(name = "score_process", version = "1.0.5") From 96e5a76e87d401ba0cf29031c1109d0c78840c38 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Wed, 30 Jul 2025 15:45:37 +0200 Subject: [PATCH 093/231] Enhance validation for graph checks error messages (#142) --- src/extensions/score_metamodel/__init__.py | 1 - .../score_metamodel/checks/graph_checks.py | 47 +++++---- src/extensions/score_metamodel/metamodel.yaml | 6 +- .../tests/rst/graph/test_metamodel_graph.rst | 98 ++++++++++--------- 4 files changed, 86 insertions(+), 66 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 210cd9fe..ee9f7259 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -170,7 +170,6 @@ def load_metamodel_data(): types_dict = data.get("needs_types", {}) links_dict = data.get("needs_extra_links", {}) graph_check_dict = data.get("graph_checks", {}) - global_base_options = data.get("needs_types_base_options", {}) global_base_options_optional_opts = global_base_options.get("optional_options", {}) diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index ed914388..728534fd 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -12,15 +12,15 @@ # ******************************************************************************* import operator from collections.abc import Callable -from typing import Any, Literal - -from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType, NeedsView +from functools import reduce +from typing import Any from score_metamodel import ( CheckLogger, graph_check, ) +from sphinx.application import Sphinx +from sphinx_needs.data import NeedsInfoType, NeedsView def eval_need_check(need: NeedsInfoType, check: str, log: CheckLogger) -> bool: @@ -70,7 +70,7 @@ def eval_need_condition( oper: dict[str, Any] = { "and": operator.and_, "or": operator.or_, - "not": operator.not_, + "not": lambda x: not x, "xor": operator.xor, } @@ -80,16 +80,17 @@ def eval_need_condition( cond: str = list(condition.keys())[0] vals: list[Any] = list(condition.values())[0] - if cond in ["and", "or", "xor", "not"]: - for i in range(len(vals) - 1): - return oper[cond]( - eval_need_condition(need, vals[i], log), - eval_need_condition(need, vals[i + 1], log), - ) - else: - raise ValueError(f"Binary Operator not defined: {vals}") + if cond == "not": + if not isinstance(vals, list) or len(vals) != 1: + raise ValueError("Operator 'not' requires exactly one operand.") + return oper["not"](eval_need_condition(need, vals[0], log)) - return True + if cond in ["and", "or", "xor"]: + return reduce( + lambda a, b: oper[cond](a, b), + (eval_need_condition(need, val, log) for val in vals), + ) + raise ValueError(f"Unsupported condition operator: {cond}") def get_need_selection( @@ -137,18 +138,25 @@ def check_metamodel_graph( # Convert list to dictionary for easy lookup needs_dict_all = {need["id"]: need for need in all_needs.values()} needs_local = list(all_needs.filter_is_external(False).values()) + # Iterate over all graph checks - for check in graph_checks_global.items(): - apply, eval = check[1].values() - # Get all needs that match the selection criteria + for check_name, check_config in graph_checks_global.items(): + apply = check_config.get("needs") + eval = check_config.get("check") + explanation = check_config.get("explanation", "") + assert explanation != "", ( + f"Explanation for graph check {check_name} is missing. Explanations are mandatory for graph checks." + ) + # Get all needs matching the selection criteria selected_needs = get_need_selection(needs_local, apply, log) for need in selected_needs: for parent_relation in list(eval.keys()): if parent_relation not in need: - msg = f"Attribute not defined: {parent_relation}" + msg = f"Attribute not defined: `{parent_relation}` in need `{need['id']}`." log.warning_for_need(need, msg) continue + parent_ids = need[parent_relation] for parent_id in parent_ids: @@ -160,7 +168,8 @@ def check_metamodel_graph( if not eval_need_condition(parent_need, eval[parent_relation], log): msg = ( - f"parent need `{parent_id}` does not fulfill " + f"Parent need `{parent_id}` does not fulfill " f"condition `{eval[parent_relation]}`." + f" Explanation: {explanation}" ) log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 7f08d439..a548fb30 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -18,7 +18,7 @@ needs_types_base_options: # Custom semantic validation rules # Prohibited Word Option Checks -# Follow this schema to write new checks +# Follow this schema to write new checks # check_name: # types[optional]: # - List of tag types ('tags' option) @@ -821,6 +821,7 @@ graph_checks: condition: safety == QM check: satisfies: safety == QM + explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is QM and its status is valid. # If need-req is `ASIL_B`, parent must be `QM` or `ASIL_B`. req_safety_linkage_asil_b: needs: @@ -831,6 +832,7 @@ graph_checks: or: - safety == ASIL_B - safety == QM + explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is ASIL_B or QM and its status is valid. # saf - ID gd_req__saf_linkage_safety # It shall be checked that Safety Analysis (DFA and FMEA) can only be linked via mitigate against # - Requirements with the same ASIL or @@ -842,4 +844,4 @@ graph_checks: condition: safety == ASIL_B check: mitigated_by: safety != QM - + explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. diff --git a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst index fd41c82a..10c14113 100644 --- a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst +++ b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst @@ -31,7 +31,7 @@ .. Positive Test: Child requirement QM. Parent requirement has the correct related safety level. Parent requirement is `QM`. -#EXPECT-NOT: feat_req__child__1: parent need `feat_req__parent__QM` does not fulfill condition `safety == QM`. +#EXPECT-NOT: feat_req__child__1: Parent need `feat_req__parent__QM` does not fulfill condition `safety == QM`. Explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is QM and its status is valid. .. feat_req:: Child requirement 1 :id: feat_req__child__1 @@ -39,8 +39,9 @@ :satisfies: feat_req__parent__QM :status: valid + .. Positive Test: Child requirement ASIL B. Parent requirement has the correct related safety level. Parent requirement is `QM`. -#EXPECT-NOT: feat_req__child__2: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. +#EXPECT-NOT: feat_req__child__2: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. Explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is QM and its status is valid. .. feat_req:: Child requirement 2 :id: feat_req__child__2 @@ -49,130 +50,139 @@ :status: valid + .. Negative Test: Child requirement QM. Parent requirement is `ASIL_B`. Child cant fulfill the safety level of the parent. -#EXPECT: feat_req__child__4: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. +#EXPECT: feat_req__child__3: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. Explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is QM and its status is valid. -.. comp_req:: Child requirement 4 - :id: feat_req__child__4 +.. comp_req:: Child requirement 3 + :id: feat_req__child__3 :safety: QM :satisfies: feat_req__parent__ASIL_B :status: valid - - .. Parent requirement does not exist -#EXPECT: feat_req__child__9: Parent need `feat_req__parent0__abcd` not found in needs_dict. +#EXPECT: feat_req__child__4: Parent need `feat_req__parent0__abcd` not found in needs_dict. -.. feat_req:: Child requirement 9 - :id: feat_req__child__9 +.. feat_req:: Child requirement 4 + :id: feat_req__child__4 :safety: ASIL_B :status: valid :satisfies: feat_req__parent0__abcd + .. Mitigation of Safety Analysis (FMEA and DFA) shall be checked. Mitigation shall have the same or higher safety level than the analysed item. .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. -#EXPECT: feat_saf_dfa__child__10: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. +#EXPECT: feat_saf_dfa__child__5: Parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_dfa:: Child requirement 10 - :id: feat_saf_dfa__child__10 +.. feat_saf_dfa:: Child requirement 5 + :id: feat_saf_dfa__child__5 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__QM + .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. -#EXPECT-NOT: feat_saf_dfa__child__11: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: feat_saf_dfa__child__6: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_dfa:: Child requirement 11 - :id: feat_saf_dfa__child__11 +.. feat_saf_dfa:: Child requirement 6 + :id: feat_saf_dfa__child__6 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__ASIL_B + .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. -#EXPECT: comp_saf_dfa__child__13: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. +#EXPECT: comp_saf_dfa__child__7: Parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. comp_saf_dfa:: Child requirement 13 - :id: comp_saf_dfa__child__13 +.. comp_saf_dfa:: Child requirement 7 + :id: comp_saf_dfa__child__7 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__QM + .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. -#EXPECT-NOT: comp_saf_dfa__child__14: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: comp_saf_dfa__child__8: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. comp_saf_dfa:: Child requirement 14 - :id: comp_saf_dfa__child__14 +.. comp_saf_dfa:: Child requirement 8 + :id: comp_saf_dfa__child__8 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__ASIL_B + .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. -#EXPECT: feat_saf_dfa__child__16: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. +#EXPECT: feat_saf_dfa__child__9: Parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_dfa:: Child requirement 16 - :id: feat_saf_dfa__child__16 +.. feat_saf_dfa:: Child requirement 9 + :id: feat_saf_dfa__child__9 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__QM + .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. -#EXPECT-NOT: feat_saf_dfa__child__17: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: feat_saf_dfa__child__10: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_dfa:: Child requirement 17 - :id: feat_saf_dfa__child__17 +.. feat_saf_dfa:: Child requirement 10 + :id: feat_saf_dfa__child__10 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__ASIL_B + .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. -#EXPECT: feat_saf_fmea__child__19: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. +#EXPECT: feat_saf_fmea__child__11: Parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_fmea:: Child requirement 19 - :id: feat_saf_fmea__child__19 +.. feat_saf_fmea:: Child requirement 11 + :id: feat_saf_fmea__child__11 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__QM + .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. -#EXPECT-NOT: feat_saf_fmea__child__20: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: feat_saf_fmea__child__12: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_fmea:: Child requirement 20 - :id: feat_saf_fmea__child__20 +.. feat_saf_fmea:: Child requirement 12 + :id: feat_saf_fmea__child__12 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__ASIL_B + .. Positive Test: Linked to a mitigation that is higher to the safety level of the analysed item. -#EXPECT-NOT: feat_saf_fmea__child__21: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: feat_saf_fmea__child__13: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. feat_saf_fmea:: Child requirement 21 - :id: feat_saf_fmea__child__21 +.. feat_saf_fmea:: Child requirement 13 + :id: feat_saf_fmea__child__13 :safety: QM :status: valid :mitigated_by: feat_req__parent__ASIL_B + .. Negative Test: Linked to a mitigation that is lower than the safety level of the analysed item. -#EXPECT: comp_saf_fmea__child__22: parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. +#EXPECT: comp_saf_fmea__child__14: Parent need `feat_req__parent__QM` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. comp_saf_fmea:: Child requirement 22 - :id: comp_saf_fmea__child__22 +.. comp_saf_fmea:: Child requirement 14 + :id: comp_saf_fmea__child__14 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__QM + .. Positive Test: Linked to a mitigation that is equal to the safety level of the analysed item. -#EXPECT-NOT: comp_saf_fmea__child__23: parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. +#EXPECT-NOT: comp_saf_fmea__child__15: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety != QM`. Explanation: An ASIL_B safety requirement must link to a ASIL_B requirement. Please ensure that the linked requirements safety level is not QM and it's status is valid. -.. comp_saf_fmea:: Child requirement 23 - :id: comp_saf_fmea__child__23 +.. comp_saf_fmea:: Child requirement 15 + :id: comp_saf_fmea__child__15 :safety: ASIL_B :status: valid :mitigated_by: feat_req__parent__ASIL_B - From 446bf0e10e4e27943e775adf3ec7777d1c6d0e31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 31 Jul 2025 09:19:14 +0200 Subject: [PATCH 094/231] Update process version (#191) --- MODULE.bazel | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 465d4cb0..49ca9456 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.0.0", + version = "1.0.1", compatibility_level = 1, ) @@ -95,4 +95,4 @@ bazel_dep(name = "score_cr_checker", version = "0.3.1") bazel_dep(name = "score_dash_license_checker", version = "0.1.1") # docs dependency -bazel_dep(name = "score_process", version = "1.0.5") +bazel_dep(name = "score_process", version = "1.1.0") From 8459a1064a0c8aca3fb79431a8c379cdc694f70b Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 5 Aug 2025 23:00:48 +0200 Subject: [PATCH 095/231] update requirements to match process 1.1.0 (#194) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update Tool requirements * Updating Graph Checks * Enhancing Graph Check logic --------- Co-authored-by: Maximilian Sören Pollak --- .../instructions/process_req.instructions.md | 11 + docs/requirements/requirements.rst | 549 ++++++++++++++---- .../score_metamodel/checks/graph_checks.py | 49 +- src/extensions/score_metamodel/metamodel.yaml | 186 ++++-- .../tests/rst/graph/test_metamodel_graph.rst | 8 +- .../tests/test_rules_file_based.py | 24 +- 6 files changed, 650 insertions(+), 177 deletions(-) create mode 100644 .github/instructions/process_req.instructions.md diff --git a/.github/instructions/process_req.instructions.md b/.github/instructions/process_req.instructions.md new file mode 100644 index 00000000..226c9299 --- /dev/null +++ b/.github/instructions/process_req.instructions.md @@ -0,0 +1,11 @@ +--- +applyTo: "docs/requirements/requirements.rst" +--- + +This file contains docs-as-code requirements which derived from upstream process requirements. +Those are specified in `bazel-out/k8-fastbuild/bin/external/score_process+/needs_json/_build/needs/needs.json` + +The docs-as-code requirements are implemented in this repository, most notably in `src/extensions/score_metamodel/metamodel.yaml` +The metamodel has references to docs-as-code requirement ids. + +Ensure all of that is consistent. diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 192d46f7..942943af 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -57,8 +57,9 @@ This section provides an overview of current process requirements and their clar :satisfies: PROCESS_gd_req__req__attr_uid, PROCESS_gd_req__tool__attr_uid, - PROCESS_gd_req__arch__attribute_uid - :parent_covered: YES: together with tool_req__docs_attr_id_scheme + PROCESS_gd_req__arch__attribute_uid, + PROCESS_gd_req__saf_attr_uid, + :parent_covered: NO Docs-as-Code shall enforce that all Need IDs are globally unique across all included documentation instances. @@ -71,8 +72,11 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_id_scheme :implemented: PARTIAL :tags: Common Attributes - :satisfies: PROCESS_gd_req__req__attr_uid, PROCESS_gd_req__arch__attribute_uid - :parent_covered: YES: together with tool_req__docs_attr_id + :satisfies: + PROCESS_gd_req__req__attr_uid, + PROCESS_gd_req__arch__attribute_uid, + PROCESS_gd_req__saf_attr_uid, + :parent_covered: NO: cannot check non-existent "doc__naming_conventions" in PROCESS_gd_req__req__attr_uid Docs-as-Code shall enforce that Need IDs follow the following naming scheme: @@ -80,6 +84,7 @@ This section provides an overview of current process requirements and their clar * A middle part matching the hierarchical structure of the need: * For requirements: a portion of the feature tree or a component acronym * For architecture elements: the structural element (e.g. a part of the feature tree, component acronym) + * For safety analysis (FMEA, DFA): name of analyzed structural element (e.g. Persistency, FEO, etc.) * Additional descriptive text to ensure human readability @@ -91,12 +96,16 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_title :implemented: YES :tags: Common Attributes - :satisfies: PROCESS_gd_req__req__attr_title + :satisfies: + PROCESS_gd_req__req__attr_title, + PROCESS_gd_req__saf__attr_title, :parent_covered: NO: Can not ensure summary + Docs-as-Code shall enforce that all needs have titles and titles do not contain the following words: - Docs-as-Code shall enforce that needs of type :need:`tool_req__docs_req_types` do not have prohibited words - which can be found in the metamodel. + * shall + * must + * will --------------------------- @@ -117,10 +126,19 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_desc_wording :tags: Common Attributes :implemented: YES + :satisfies: + PROCESS_gd_req__req__desc_weak, :parent_covered: YES + Docs-as-Code shall enforce that requirement descriptions do not contain the following weak words: + just, about, really, some, thing, absol-utely - Docs-as-Code shall enforce that Need description do not contain the weak words that are defined in the metamodel + This rule applies to: + + * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + + .. note:: + Artificial "-" added to avoid triggering violation of this requirment in this document. ---------------------------- 🔒 Security Classification @@ -141,10 +159,12 @@ This section provides an overview of current process requirements and their clar This rule applies to: - * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + * all requirement types defined in :need:`tool_req__docs_req_types`, except process and tool requirements. * all architecture elements defined in :need:`tool_req__docs_arch_types`. + + --------------------------- 🛡️ Safety Classification --------------------------- @@ -166,9 +186,11 @@ This section provides an overview of current process requirements and their clar This rule applies to: - * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + * all requirement types defined in :need:`tool_req__docs_req_types`, except process and tool requirements. * all architecture elements defined in :need:`tool_req__docs_arch_types`. + + ---------- 🚦 Status ---------- @@ -177,10 +199,11 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_common_attr_status :tags: Common Attributes :implemented: YES - :parent_covered: YES + :parent_covered: NO: gd_req__saf_attr_status has additional constraints :satisfies: PROCESS_gd_req__req__attr_status, PROCESS_gd_req__arch__attr_status, + PROCESS_gd_req__saf_attr_status, Docs-as-Code shall enforce that the ``status`` attribute has one of the following values: @@ -189,8 +212,43 @@ This section provides an overview of current process requirements and their clar This rule applies to: - * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. + * all requirement types defined in :need:`tool_req__docs_req_types`, except process and tool requirements. * all architecture elements defined in :need:`tool_req__docs_arch_types`. + * all safety analysis elements defined in :need:`tool_req__docs_saf_types`. + + + +---------- +Versioning +---------- + +.. tool_req:: Versioning: enforce attribute + :id: tool_req__docs_common_attr_version + :tags: Common Attributes + :implemented: NO + :parent_covered: NO: to be checked after demo + :satisfies: PROCESS_gd_req__req__attr_version + + Docs-As-Code shall enable and enforce a versioning attribute for all needs. + + .. note:: + Exact nature to be decided, it could e.g. be a number, a string, a semantic version, a date or a hash. + + + +.. tool_req:: Suspicious: Enforce attribute + :id: tool_req__docs_common_attr_suspicious + :tags: Common Attributes + :implemented: NO + :parent_covered: NO: parent talks about setting covered to false, but we want to issue a build error. + :satisfies: PROCESS_gd_req__req__suspicious + :status: invalid + + Docs-as-Code shall check if linked parent needs have different versions, compared to + the version the need was originally linked to. + + + 📚 Documents ############# @@ -199,12 +257,13 @@ This section provides an overview of current process requirements and their clar :id: tool_req__docs_doc_types :tags: Documents :implemented: YES - - .. :satisfies: PROCESS_gd_req__doc_types (next process release) + :parent_covered: YES + :satisfies: PROCESS_gd_req__doc__types Docs-as-Code shall support the following document types: * Generic Document (document) + * Tool Verification Report (doc_tool) .. tool_req:: Mandatory Document attributes @@ -215,7 +274,8 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__doc__author, PROCESS_gd_req__doc__approver, PROCESS_gd_req__doc__reviewer, - :parent_covered: NO + :parent_covered: NO, process requirement has changed and we do not understand the new wording. + :status: invalid Docs-as-Code shall enforce that each :need:`tool_req__docs_doc_types` has the following attributes: @@ -230,7 +290,7 @@ This section provides an overview of current process requirements and their clar :tags: Documents :implemented: NO :satisfies: PROCESS_gd_req__doc__author - :parent_covered: YES: Together with tool_req__docs_doc_attr + :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid Docs-as-Code shall provide an automatic mechanism to determine document authors. @@ -248,7 +308,8 @@ This section provides an overview of current process requirements and their clar :tags: Documents :implemented: NO :satisfies: PROCESS_gd_req__doc__approver - :parent_covered: YES: Together with tool_req__docs_doc_attr + :parent_covered: NO, process requirement has changed and we do not understand the new wording. + :status: invalid Docs-as-Code shall provide an automatic mechanism to determine the document approver. @@ -261,7 +322,8 @@ This section provides an overview of current process requirements and their clar :tags: Documents :implemented: NO :satisfies: PROCESS_gd_req__doc__reviewer - :parent_covered: YES: Together with tool_req__docs_doc_attr + :parent_covered: NO, process requirement has changed and we do not understand the new wording. + :status: invalid Docs-as-Code shall provide an automatic mechanism to determine the document reviewers. @@ -269,6 +331,17 @@ This section provides an overview of current process requirements and their clar request of the file containing the document. +------- +Mapping +------- + +.. needtable:: + :style: table + :types: gd_req + :columns: id;satisfies_back as "tool_req" + :filter: "PROCESS_gd_req__doc" in id + + 📋 Requirements ################ @@ -303,7 +376,7 @@ This section provides an overview of current process requirements and their clar :parent_covered: NO: Can not ensure correct reasoning :satisfies: PROCESS_gd_req__req__attr_rationale - Docs-as-Code shall enforce that each stakeholder requirement contains a ``rationale`` attribute. + Docs-as-Code shall enforce that each stakeholder requirement (stkh_req) contains a ``rationale`` attribute. .. tool_req:: Enforces requirement type classification :id: tool_req__docs_req_attr_reqtype @@ -318,19 +391,22 @@ This section provides an overview of current process requirements and their clar * Functional * Interface * Process - * Legal * Non-Functional .. tool_req:: Enables marking requirements as "covered" :id: tool_req__docs_req_attr_reqcov :tags: Requirements - :implemented: NO + :implemented: PARTIAL :satisfies: PROCESS_gd_req__req__attr_req_cov - :status: invalid - .. warning:: - This requirement is not yet specified. The corresponding parent requirement is - unclear and must be clarified before a precise tool requirement can be defined. + Docs as code shall shall enable marking requirements as covered by their linked children. + + Attribute ``reqcov`` must be one of the following values: + * Yes + * No + + .. note:: + No concept yet, as parents are generally not aware of their children. .. tool_req:: Support requirements test coverage :id: tool_req__docs_req_attr_testcov @@ -346,9 +422,9 @@ This section provides an overview of current process requirements and their clar * Yes * No - .. warning:: - This requirement is not yet specified. The corresponding parent requirement is - unclear and must be clarified before a precise tool requirement can be defined. + .. note:: + No concept yet + ------------------------- 🔗 Links @@ -380,8 +456,16 @@ This section provides an overview of current process requirements and their clar ================================ =========================== .. note:: - Some tool requirements do not have a matching process requirement (gap). - And sometimes we need to link to documents and not requirements?! + Some tool requirements do not have a matching process requirement. + +.. tool_req:: Safety: enforce safe linking + :id: tool_req__docs_common_attr_safety_link_check + :tags: Common Attributes + :implemented: YES + :parent_covered: YES + :satisfies: PROCESS_gd_req__req__linkage_safety + + QM requirements (safety == QM) shall not be linked to safety requirements (safety != QM) via the ``satisfies`` attribute. 🏛️ Architecture ################ @@ -395,41 +479,42 @@ This section provides an overview of current process requirements and their clar :tags: Architecture :satisfies: PROCESS_gd_req__arch__hierarchical_structure, - PROCESS_gd_req__arch__viewpoints, PROCESS_gd_req__arch__build_blocks, - PROCESS_gd_req__arch__build_blocks_corr :implemented: YES :parent_covered: NO :status: invalid - Docs-as-Code shall support the following architecture types: + Docs-as-Code shall support the following architecture element types: - * Feature (Architecture Element) = Feature Architecture Static View (feat_arc_sta) - * Feature Architecture Dynamic View (feat_arc_dyn) - * Feature: Logical Architecture Interface (incl Logical Interface View) (logic_arc_int) - * Feature: Logical Architecture Interface Operation (logic_arc_int_op) - * Component Architecture Static View (comp_arc_sta) - * Component Architecture Dynamic View (comp_arc_dyn) - * Component Architecture Interface = Real Interface (real_arc_int) - * Component Architecture Interface Operation = Real Interface Operation (real_arc_int_op) + * Feature (feat_arc_sta) + * Logical Interface (logic_arc_int) + * Logical Interface Operation (logic_arc_int_op) + * Component (comp_arc_sta) + * Interface (real_arc_int) + * Interface Operation (real_arc_int_op) +-------------------------- +Architecture Attributes +-------------------------- -.. tool_req::Module Views - :id: tool_req__docs_module_views - :tags: Architecture - :satisfies: - PROCESS_gd_req__arch__hierarchical_structure, - PROCESS_gd_req__arch__viewpoints, - PROCESS_gd_req__arch__build_blocks, - PROCESS_gd_req__arch__build_blocks_corr - :implemented: PARTIAL - :parent_covered: NO - :status: invalid +.. tool_req:: Architecture Mandatory Attributes + :id: tool_req__docs_arch_attr_mandatory + :tags: Architecture + :satisfies: + PROCESS_gd_req__arch__attr_mandatory, + PROCESS_gd_req__arch__attr_fulfils, + :implemented: PARTIAL + :parent_covered: YES + :parent_has_problem: YES: Metamodel & Process aren't the same. Some definitions are not consistent in Process + + Docs-as-Code shall enforce that the following attributes are present in all needs of type :need:`tool_req__docs_arch_types` - Docs-as-Code shall support the following module view-types: + * Fulfils + * Safety + * Security + * Status + * UID - * Module = Module Architecture Static View = Top Level SW component container (mod_view_sta) - * Module Architecture Dynamic View = Top Level SW component container (mod_view_dyn) ------------------------ @@ -444,8 +529,8 @@ This section provides an overview of current process requirements and their clar PROCESS_gd_req__arch__linkage_requirement_type, PROCESS_gd_req__arch__attr_fulfils, PROCESS_gd_req__arch__traceability, + PROCESS_gd_req__req__linkage_fulfill :parent_covered: YES - :status: invalid Docs-as-Code shall enforce that linking via the ``fulfils`` attribute follows defined rules. @@ -455,36 +540,52 @@ This section provides an overview of current process requirements and their clar :widths: auto ==================================== ========================================== - Requirement Type Allowed Link Target + Link Source Allowed Link Target ==================================== ========================================== - Functional feature requirements Static / dynamic feature architecture - Interface feature requirements Interface feature architecture - Functional component requirements Static / dynamic component architecture - Interface component requirements Interface component architecture + feat_arc_sta feat_req + feat_arc_dyn feat_req + logic_arc_int comp_req + comp_arc_sta comp_req + comp_arc_dyn comp_req + real_arc_int comp_req ==================================== ========================================== -.. tool_req:: Mandate links for safety + +.. tool_req:: Ensure safety architecture elements link a safety requirement :id: tool_req__docs_arch_link_safety_to_req :tags: Architecture :implemented: PARTIAL :satisfies: PROCESS_gd_req__arch__linkage_requirement :parent_covered: YES - Docs-as-Code shall enforce that architecture model elements of type - :need:`tool_req__docs_arch_types` with ``safety != QM`` are linked to requirements of - type :need:`tool_req__docs_req_types` that are also safety relevant (``safety != - QM``). + Docs-as-Code shall enforce that architecture elements of type + :need:`tool_req__docs_arch_types` with ``safety != QM`` are linked to at least one + requirements of type :need:`tool_req__docs_req_types` with the exact same ``safety`` + value. + +.. tool_req:: Ensure qm architecture elements do not fulfill safety requirements + :id: tool_req__docs_arch_link_qm_to_safety_req + :tags: Architecture + :implemented: PARTIAL + :satisfies: PROCESS_gd_req__arch__linkage_requirement + :parent_covered: YES + + Docs-as-Code shall enforce that architecture elements of type + :need:`tool_req__docs_arch_types` with ``safety == QM`` are not linked to requirements + of type :need:`tool_req__docs_req_types` with ``safety != QM``. + .. tool_req:: Restrict links for safety requirements :id: tool_req__docs_req_arch_link_safety_to_arch :tags: Architecture :implemented: PARTIAL - :satisfies: PROCESS_gd_req__arch__linkage_safety_trace + :satisfies: + PROCESS_gd_req__arch__linkage_safety_trace, + PROCESS_gd_req__req__linkage_safety, :parent_covered: NO - Docs-as-Code shall enforce that architecture model elements of type - :need:`tool_req__docs_arch_types` with ``safety != QM`` can only be linked to other - architecture model elements with ``safety != QM``. + Docs-as-Code shall enforce that valid safety architectural elements (Safety != QM) can + only be linked against valid safety architectural elements. .. tool_req:: Security: Restrict linkage :id: tool_req__docs_arch_link_security @@ -493,29 +594,34 @@ This section provides an overview of current process requirements and their clar :parent_covered: YES :satisfies: PROCESS_gd_req__arch__linkage_security_trace - Docs-as-Code shall enforce that architecture elements with ``security == YES`` are - only linked to other architecture elements with ``security == YES``. + Docs-as-Code shall enforce that security relevant :need:`tool_req__docs_arch_types` (Security == + YES) can only be linked against security relevant :need:`tool_req__docs_arch_types`. ---------------------- 🖼️ Diagram Related ---------------------- .. tool_req:: Support Diagram drawing of architecture - :id: tool_req__docs_arch_diag_draw + :id: tool_req__docs_arch_views :tags: Architecture :implemented: YES - :satisfies: PROCESS_doc_concept__arch__process, PROCESS_gd_req__arch__viewpoints + :satisfies: + PROCESS_gd_req__arch__viewpoints, :parent_covered: YES Docs-as-Code shall enable the rendering of diagrams for the following architecture views: - * Feature View & Component View: - * Static View - * Dynamic View - * Interface View - * Software Module View - * Platform View + * Feature Package Diagram (feat_arc_sta) + * Feature Sequence Diagram (feat_arc_dyn) + * Feature Interface View (logic_arc_int) + * Component Package Diagram (comp_arc_sta) + * Component Sequence Diagram (comp_arc_dyn) + * Component Interface (real_arc_int) + * Module View (mod_view_sta) + .. note:: + feat_arc_sta, comp_arc_sta, logic_arc_int, real_arc_int are architecture elements + AND architecture views. 💻 Detailed Design & Code ########################## @@ -528,18 +634,57 @@ This section provides an overview of current process requirements and their clar :tags: Detailed Design & Code :id: tool_req__docs_dd_link_source_code_link :implemented: YES - :parent_covered: YES - :satisfies: PROCESS_gd_req__req__attr_impl + :parent_covered: NO: we only enable linking, we do not link + :satisfies: + PROCESS_gd_req__req__attr_impl, + PROCESS_gd_req__impl__design_code_link, + + Docs-as-Code shall allow source code to link to needs. + + A link to the corresponding source code location in GitHub shall be generated in the + generated documentation within the linked requirement. + + + +.. tool_req:: Feature Flags + :id: tool_req__docs_dd_feature_flag + :tags: Detailed Design & Code + :implemented: NO + :parent_covered: YES + :satisfies: PROCESS_gd_req__req__linkage_architecture_switch + + Docs-as-Code shall allow for a to-be-defined list of checks to be non-fatal for non + release builds. These are typically better suited for metrics than for checks. + + e.g. PROCESS_gd_req__req__linkage_architecture + + +.. tool_req:: Enable Creation of Dependency Graphs + :id: tool_req__docs__dd_dependency_graph + :tags: Detailed Design & Code + :implemented: NO + :parent_covered: YES + :satisfies: PROCESS_gd_req__impl__dependency_analysis + :status: invalid - Docs-as-Code shall allow source code to link to requirements. + Docs-As-Code shall support generation and rendering of dependency graphs for + components. It shall show all dependencies of a component incl transitive + dependencies. + + .. note:: + Components are defined in `comp_arc_sta`. + A component is also a bazel target. We can use bazel dependency graphs. + + +Testing +####### - A backlink to the corresponding source code location in GitHub shall be generated in - the output as an attribute of the linked requirement. .. tool_req:: Supports linking to test cases - :id: tool_req__docs_dd_link_testcase - :tags: Detailed Design & Code + :id: tool_req__docs_test_link_testcase + :tags: Testing :implemented: PARTIAL + :parent_covered: YES :satisfies: PROCESS_gd_req__req__attr_testlink Docs-as-Code shall allow requirements of type :need:`tool_req__docs_req_types` to @@ -547,21 +692,48 @@ This section provides an overview of current process requirements and their clar This attribute shall support linking test cases to requirements. + +.. tool_req:: Extract Metadata from Tests + :id: tool_req__docs_test_metadata_mandatory_1 + :tags: Testing + :implemented: NO + :parent_covered: NO + :satisfies: PROCESS_gd_req__verification__checks + + Docs-as-Code shall ensure that each test case has TestType and DerivationTechnique set. + +.. tool_req:: Extract Metadata from Tests + :id: tool_req__docs_test_metadata_mandatory_2 + :tags: Testing + :implemented: NO + :parent_covered: NO + :satisfies: PROCESS_gd_req__verification__checks + :status: invalid + + Docs-as-Code shall ensure that each test case has a non empty description. + + .. note:: this will probably be implemented outside of docs-as-code. + +.. tool_req:: Extract Metadata from Tests + :id: tool_req__docs_test_metadata_link_levels + :tags: Testing + :implemented: NO + :parent_covered: NO + :satisfies: PROCESS_gd_req__verification__checks + :status: invalid + + Docs-as-Code shall ensure that test cases link to requirements on the correct level: + + - If Partially/FullyVerifies are set in Feature Integration Test these shall link to Feature Requirements + - If Partially/FullyVerifies are set in Component Integration Test these shall link to Component Requirements + - If Partially/FullyVerifies are set in Unit Test these shall link to Component Requirements + + 🧪 Tool Verification Reports ############################ .. they are so different, that they need their own section -.. tool_req:: Tool Verification Report - :id: tool_req__docs_tvr - :tags: Tool Verification Reports - :implemented: YES - :parent_covered: NO - :satisfies: PROCESS_gd_req__tool__attr_uid - - Docs-as-Code shall support the definition and management of Tool Verification Reports - (``doc_tool``). - .. tool_req:: Enforce safety classification :id: tool_req__docs_tvr_safety :tags: Tool Verification Reports @@ -569,7 +741,7 @@ This section provides an overview of current process requirements and their clar :parent_covered: YES :satisfies: PROCESS_gd_req__tool__attr_safety_affected, PROCESS_gd_req__tool__check_mandatory - Docs-as-Code shall enforce that every Tool Verification Report includes a + Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a ``safety_affected`` attribute with one of the following values: * YES @@ -582,8 +754,8 @@ This section provides an overview of current process requirements and their clar :parent_covered: YES :satisfies: PROCESS_gd_req__tool__attr_security_affected, PROCESS_gd_req__tool__check_mandatory - Docs-as-Code shall enforce that every Tool Verification Report includes a - ``security_affected`` attribute with one of the following values: + Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a + `security_affected` attribute with one of the following values: * YES * NO @@ -596,8 +768,8 @@ This section provides an overview of current process requirements and their clar :satisfies: PROCESS_gd_req__tool__attr_status, PROCESS_gd_req__tool__check_mandatory :parent_covered: YES - Docs-as-Code shall enforce that every Tool Verification Report includes a ``status`` - attribute with one of the following values: + Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a + `status` attribute with one of the following values: * draft * evaluated @@ -627,12 +799,175 @@ This section provides an overview of current process requirements and their clar * Standard requirement (std_req) -🛡️ Safety Analysis -################### +🛡️ Safety Analysis (DFA + FMEA) +############################### -.. note:: - Safety analysis is not yet defined yet. This is just a placeholder for future - requirements. + +.. tool_req:: Safety Analysis Need Types + :id: tool_req__docs_saf_types + :implemented: NO + :tags: Safety Analysis + :satisfies: + PROCESS_gd_req__saf_structure, + PROCESS_gd_req__saf_attr_uid, + :parent_covered: YES + + Docs-As-Code shall support the following need types: + + * Feature FMEA (Failure Modes and Effect Analysis) -> feat_saf_fmea + * Component FMEA (Failure Modes and Effect Analysis) -> comp_saf_fmea + * Feature DFA (Dependend Failure Analysis) -> feat_saf_dfa + * Component DFA (Dependent Failure Analysis) -> comp_saf_dfa + + +.. tool_req:: Safety Analysis Mitigation Attribute + :id: tool_req__docs_saf_attrs_mitigated_by + :implemented: NO + :tags: Safety Analysis + :satisfies: + PROCESS_gd_req__saf__attr_mitigated_by, + PROCESS_gd_req__saf_attr_requirements, + PROCESS_gd_req__saf_attr_requirements_check, + :parent_covered: YES + + Docs-As-Code shall enforce valid needs (`status` == `valid`) of type + :need:`tool_req__docs_saf_types` to have at least one `mitigated_by` link to a + requirement on the corresponding level. + + +.. tool_req:: Safety Analysis Mitigation Issue Attribute + :id: tool_req__docs_saf_attrs_mitigation_issue + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf__attr_mitigation_issue + :parent_covered: NO + + Docs-As-Code shall allow needs of type :need:`tool_req__docs_saf_types` to have a + `mitigation_issue` attribute which links to a GitHub issue. + + +.. tool_req:: Safety Analysis Sufficient Attribute + :id: tool_req__docs_saf_attrs_sufficient + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf_attr_sufficient + :parent_covered: YES + + Docs-As-Code shall enforce needs of type :need:`tool_req__docs_saf_types` to + have a `sufficient` attribute , which can have one of the following values: + + * yes + * no + +.. tool_req:: Safety Analysis Sufficient Check + :id: tool_req__docs_saf_attrs_sufficient_check + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf_attr_sufficient + :parent_covered: YES + + Docs-As-Code shall ensure needs of type :need:`tool_req__docs_saf_types` with + `sufficient` == `yes` have a `mitigated_by` entry. + + +.. tool_req:: Safety Analysis Mandatory Content + :id: tool_req__docs_saf_attrs_content + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf_argument + :parent_covered: NO + + Docs-As-Code shall enforce needs of type :need:`tool_req__docs_saf_types` to have a + non empty content. + + + +.. tool_req:: Safety Analysis Linkage Violates + :id: tool_req__docs_saf_attrs_violates + :implemented: NO + :tags: Safety Analysis + :satisfies: + PROCESS_gd_req__saf_linkage_check, + PROCESS_gd_req__saf_linkage, + :parent_covered: YES + + Docs-As-Code shall enforce that needs of type :need:`tool_req__docs_saf_types` have a + `violates` links to at least one dynamic / static diagram according to the table. + + | Source | Target | + | -- | -- | + | feat_saf_dfa | feat_arc_sta | + | comp_saf_dfa | comp_arc_sta | + | feat_saf_fmea | feat_arc_dyn | + | comp_saf_fmea | comp_arc_dyn | + + + +.. tool_req:: FMEA: fault id attribute + :id: tool_req__docs_saf_attr_fmea_fault_id + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf_attr_fault_id + :parent_covered: NO + + Docs-As-Code shall enforce that needs of type DFA (see + :need:`tool_req__docs_saf_types`) have a `fault_id` attribute. + + Allowed values are listed as ID in tables at :need:`PROCESS_gd_guidl__dfa_failure_initiators`. + + +.. tool_req:: DFA: failure id attribute + :id: tool_req__docs_saf_attr_dfa_failure_id + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf_attr_failure_id + :parent_covered: NO + + Docs-As-Code shall enforce that needs of type DFA (see + :need:`tool_req__docs_saf_types`) have a `fault_id` attribute. + + Allowed values are listed as ID in tables at :need:`PROCESS_gd_guidl__dfa_failure_initiators`. + + +.. tool_req:: Failure Effect + :id: tool_req__docs_saf_attr_fmea_failure_effect + :implemented: NO + :tags: Safety Analysis + :satisfies: PROCESS_gd_req__saf_attr_feffect + :parent_covered: NO + :status: invalid + + Docs-As-Code shall enforce that every Safety Analysis has a short description of the failure effect (e.g. failure lead to an unintended actuation of the analysed element) + +------- +Mapping +------- + +.. needtable:: + :style: table + :types: gd_req + :columns: id;satisfies_back as "tool_req" + :filter: "PROCESS_gd_req__saf" in id + + +🗺️ Full Mapping +################ + +Process to tools: + +.. needtable:: + :style: table + :types: gd_req + :columns: id;satisfies_back as "tool_req" + +Overview of Tool to Process Requirements +######################################## + +.. needtable:: + :types: tool_req + :filter: any(s.startswith("PROCESS_gd_req") for s in satisfies) + :columns: satisfies as "Process Requirement" ;id as "Tool Requirement";implemented;source_code_link + :style: table .. diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index 728534fd..06b7a200 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -20,6 +20,7 @@ graph_check, ) from sphinx.application import Sphinx +from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType, NeedsView @@ -75,6 +76,11 @@ def eval_need_condition( } if not isinstance(condition, dict): + if not isinstance(condition, str): + raise ValueError( + f"Invalid condition type: condition ({type(condition)})," + + " expected str or dict." + ) return eval_need_check(need, condition, log) cond: str = list(condition.keys())[0] @@ -93,8 +99,11 @@ def eval_need_condition( raise ValueError(f"Unsupported condition operator: {cond}") -def get_need_selection( - needs: list[NeedsInfoType], selection: dict[str, str], log: CheckLogger +def filter_needs_by_criteria( + needs_types: list[NeedType], + needs: list[NeedsInfoType], + needs_selection_criteria: dict[str, str], + log: CheckLogger, ) -> list[NeedsInfoType]: """Create a list of needs that match the selection criteria.: - If it is an include selection add the include to the pattern @@ -102,19 +111,23 @@ def get_need_selection( """ selected_needs: list[NeedsInfoType] = [] - pattern = [] - need_pattern: str = list(selection.keys())[0] + pattern: list[str] = [] + need_pattern: str = list(needs_selection_criteria.keys())[0] # Verify Inputs if need_pattern in ["include", "exclude"]: - for pat in list(selection.values())[0].split(","): + for pat in list(needs_selection_criteria.values())[0].split(","): pattern.append(pat.lstrip()) else: - raise ValueError(f"Invalid need selection: {selection}") + raise ValueError(f"Invalid need selection: {needs_selection_criteria}") - if "condition" in selection: - condition = selection["condition"] + if "condition" in needs_selection_criteria: + condition = needs_selection_criteria["condition"] else: - raise ValueError(f"Invalid selection: {selection}") + raise ValueError(f"Invalid selection: {needs_selection_criteria}") + + for pat in pattern: + if not any(need_type["directive"] == pat for need_type in needs_types): + log.warning(f"Unknown need type `{pat}` in graph check.") for need in needs: if need_pattern == "include": @@ -141,23 +154,25 @@ def check_metamodel_graph( # Iterate over all graph checks for check_name, check_config in graph_checks_global.items(): - apply = check_config.get("needs") - eval = check_config.get("check") + needs_selection_criteria: dict[str, str] = check_config.get("needs") + check_to_perform: dict[str, str | dict] = check_config.get("check") explanation = check_config.get("explanation", "") assert explanation != "", ( f"Explanation for graph check {check_name} is missing. Explanations are mandatory for graph checks." ) # Get all needs matching the selection criteria - selected_needs = get_need_selection(needs_local, apply, log) + selected_needs = filter_needs_by_criteria( + app.config.needs_types, needs_local, needs_selection_criteria, log + ) for need in selected_needs: - for parent_relation in list(eval.keys()): + for parent_relation in list(check_to_perform.keys()): if parent_relation not in need: msg = f"Attribute not defined: `{parent_relation}` in need `{need['id']}`." log.warning_for_need(need, msg) continue - parent_ids = need[parent_relation] + parent_ids: list[str] = need[parent_relation] for parent_id in parent_ids: parent_need = needs_dict_all.get(parent_id) @@ -166,10 +181,12 @@ def check_metamodel_graph( log.warning_for_need(need, msg) continue - if not eval_need_condition(parent_need, eval[parent_relation], log): + if not eval_need_condition( + parent_need, check_to_perform[parent_relation], log + ): msg = ( f"Parent need `{parent_id}` does not fulfill " - f"condition `{eval[parent_relation]}`." + f"condition `{check_to_perform[parent_relation]}`." f" Explanation: {explanation}" ) log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index a548fb30..fd11bcc8 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -14,8 +14,8 @@ needs_types_base_options: optional_options: + # req-Id: tool_req__docs_dd_link_source_code_link source_code_link: ^https://github.com/.* - # Custom semantic validation rules # Prohibited Word Option Checks # Follow this schema to write new checks @@ -34,7 +34,7 @@ prohibited_words_checks: - shall - must - will - # req-Id: tool_req__docs_common_attr_description + # req-Id: tool_req__docs_common_attr_desc_wording content_check: types: - requirement_excl_process @@ -78,7 +78,7 @@ needs_types: optional_links: links: "^.*$" - # Standards + # Standard Requirement and Work Product # req-Id: tool_req__docs_stdreq_types std_req: title: Standard Requirement @@ -114,7 +114,7 @@ needs_types: contains: ^gd_(req|temp|chklst|guidl|meth)__.*$ has: ^doc_(getstrt|concept)__.*$ - # Guidances + # req-Id: tool_req__docs_req_types gd_req: title: Process Requirements prefix: gd_req__ @@ -200,7 +200,7 @@ needs_types: id: ^doc_getstrt__[0-9a-z_]*$ status: ^(valid|draft)$ - # Documents, score, and other modules only + # req-Id: tool_req__docs_doc_types document: title: Generic Document prefix: doc__ @@ -210,13 +210,14 @@ needs_types: optional_options: safety: "^(QM|ASIL_B)$" security: "^(YES|NO)$" + # req-Id: tool_req__docs_doc_attr author: ^.*$ approver: ^.*$ reviewer: ^.*$ optional_links: realizes: "^wp__.+$" - # req-Id: tool_req__docs_tvr + # req-Id: tool_req__docs_doc_types doc_tool: title: Tool Verification Report prefix: doc_tool__ @@ -230,6 +231,11 @@ needs_types: # req-Id: tool_req__docs_tvr_security security_affected: "^(YES|NO)$" tcl: "^(LOW|HIGH)$" + optional_options: + # req-Id: tool_req__docs_doc_attr + author: ^.*$ + approver: ^.*$ + reviewer: ^.*$ optional_links: realizes: "^wp__.+$" @@ -241,7 +247,7 @@ needs_types: mandatory_options: id: ^stkh_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype - reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_safety safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status @@ -250,10 +256,9 @@ needs_types: # content: ^[\s\S]+$ # req-Id: tool_req__docs_req_attr_rationale rationale: ^.+$ - optional_options: # req-Id: tool_req__docs_common_attr_security - # TODO: move to mandatory once https://github.com/eclipse-score/process_description/pull/133 is merged security: ^(YES|NO)$ + optional_options: codelink: ^.*$ testlink: ^.*$ # req-Id: tool_req__docs_req_attr_reqcov @@ -273,7 +278,7 @@ needs_types: mandatory_options: id: ^feat_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype - reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -303,7 +308,7 @@ needs_types: mandatory_options: id: ^comp_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype - reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -361,12 +366,12 @@ needs_types: # req-Id: tool_req__docs_req_types aou_req: - title: Assumption of Use + title: Assumption of Use Requirement prefix: aou_req__ mandatory_options: id: ^aou_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype - reqtype: ^(Functional|Interface|Process|Legal|Non-Functional)$ + reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -386,10 +391,13 @@ needs_types: - requirement - requirement_excl_process + # - Architecture - - # Architecture + # Architecture Element & View + # req-Id: tool_req__docs_arch_types + # req-Id: tool_req__docs_arch_views feat_arc_sta: - title: Feature Architecture Static View + title: Feature & Feature Package Diagram prefix: feat_arc_sta__ color: #FEDCD2 style: card @@ -407,9 +415,12 @@ needs_types: fulfils: ^feat_req__.+$ tags: - architecture_element + - architecture_view + # Architecture View + # req-Id: tool_req__docs_arch_views feat_arc_dyn: - title: Feature Architecture Dynamic View + title: Feature Sequence Diagram prefix: feat_arc_dyn__ color: #FEDCD2 style: card @@ -424,10 +435,13 @@ needs_types: mandatory_links: fulfils: ^feat_req__.+$ tags: - - architecture_element + - architecture_view + # Architecture Element & View + # req-Id: tool_req__docs_arch_types + # req-Id: tool_req__docs_arch_views logic_arc_int: - title: Logical Architecture Interfaces + title: Logical Interface & Feature Interface View prefix: logic_arc_int__ color: #FEDCD2 style: card @@ -444,9 +458,12 @@ needs_types: fulfils: ^comp_req__.+$ tags: - architecture_element + - architecture_view + # Architecture Element + # req-Id: tool_req__docs_arch_types logic_arc_int_op: - title: Logical Architecture Interface Operation + title: Logical Interface Operation prefix: logic_arc_int_op__ color: #FEDCD2 style: card @@ -463,6 +480,8 @@ needs_types: tags: - architecture_element + # Architecture View + # req-Id: tool_req__docs_arch_views mod_view_sta: title: Module Architecture Static View prefix: mod_view_sta__ @@ -472,7 +491,10 @@ needs_types: id: ^mod_view_sta__[0-9a-z_]+$ mandatory_links: includes: ^comp_arc_sta__.+$ + tags: + - architecture_view + # No process requirement mod_view_dyn: title: Module Architecture Dynamic View prefix: mod_view_dyn__ @@ -481,8 +503,11 @@ needs_types: mandatory_options: id: ^mod_view_dyn__[0-9a-z_]+$ + # Architecture Element & View + # req-Id: tool_req__docs_arch_types + # req-Id: tool_req__docs_arch_views comp_arc_sta: - title: Component Architecture Static View + title: Component & Component Package Diagram prefix: comp_arc_sta__ color: #FEDCD2 style: card @@ -501,9 +526,12 @@ needs_types: fulfils: ^comp_req__.+$ tags: - architecture_element + - architecture_view + # Architecture View + # req-Id: tool_req__docs_arch_views comp_arc_dyn: - title: Component Architecture Dynamic View + title: Component Sequence Diagram prefix: comp_arc_dyn__ color: #FEDCD2 style: card @@ -518,10 +546,13 @@ needs_types: optional_links: fulfils: ^comp_req__.+$ tags: - - architecture_element + - architecture_view + # Architecture Element & View + # req-Id: tool_req__docs_arch_types + # req-Id: tool_req__docs_arch_views real_arc_int: - title: Component Architecture Interfaces + title: Interface & Component Interface prefix: real_arc_int__ color: #FEDCD2 style: card @@ -538,9 +569,12 @@ needs_types: fulfils: ^comp_req__.+$ tags: - architecture_element + - architecture_view + # Architecture Element + # req-Id: tool_req__docs_arch_types real_arc_int_op: - title: Component Architecture Interface Operation + title: Interface Operation prefix: real_arc_int_op__ color: #FEDCD2 style: card @@ -559,6 +593,8 @@ needs_types: tags: - architecture_element + # - architecture end - + review_header: prefix: review__header title: Review Header @@ -620,6 +656,7 @@ needs_types: status: ^(valid|invalid)$ # DFA (Dependent Failure Analysis) + # No requirement!! plat_saf_dfa: title: Feature Dependent Failure Analysis prefix: plat_saf_dfa__ @@ -637,74 +674,116 @@ needs_types: optional_links: mitigated_by: ^(feat_req__.*|aou_req__.*)$ + # req-Id: tool_req__docs_saf_types feat_saf_dfa: - title: Feature Dependent Failure Analysis + title: Feature DFA (Dependent Failure Analysis) prefix: feat_saf_dfa__ mandatory_options: id: ^feat_saf_dfa__[0-9a-z_]+$ + # req-Id: tool_req__docs_saf_attr_dfa_failure_id failure_id: ^.*$ failure_effect: ^.*$ + # req-Id: tool_req__docs_saf_attrs_sufficient sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + # req-Id: tool_req__docs_saf_attrs_content content: ^[\s\S]+$ mandatory_links: + # req-Id: tool_req__docs_saf_attrs_violates violates: ^feat_arc_sta__[0-9a-z_]+$ optional_options: + # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ optional_links: + # req-Id: tool_req__docs_saf_attrs_mitigated_by + # (only mandatory once valid status == valid) mitigated_by: ^(feat_req__.*|aou_req__.*)$ + tags: + - dependent_failure_analysis + - safety_analysis + # req-Id: tool_req__docs_saf_types comp_saf_dfa: - title: Component Dependent Failure Analysis + title: Component DFA (Dependent Failure Analysis) prefix: comp_saf_dfa__ mandatory_options: id: ^comp_saf_dfa__[0-9a-z_]+$ + # req-Id: tool_req__docs_saf_attr_dfa_failure_id failure_id: ^.*$ failure_effect: ^.*$ + # req-Id: tool_req__docs_saf_attrs_sufficient sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + # req-Id: tool_req__docs_saf_attrs_content content: ^[\s\S]+$ optional_options: + # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ mandatory_links: + # req-Id: tool_req__docs_saf_attrs_violates violates: ^comp_arc_sta__[0-9a-z_]+$ optional_links: + # req-Id: tool_req__docs_saf_attrs_mitigated_by + # (only mandatory once valid status == valid) mitigated_by: ^(comp_req__.*|aou_req__.*)$ + tags: + - dependent_failure_analysis + - safety_analysis # FMEA (Failure Mode and Effects Analysis) + # req-Id: tool_req__docs_saf_types feat_saf_fmea: - title: Feature Failure Mode and Effects Analysis + title: Feature FMEA (Failure Mode and Effects Analysis) prefix: feat_saf_fmea__ mandatory_options: id: ^feat_saf_fmea__[0-9a-z_]+$ + # req-Id: tool_req__docs_saf_attr_fmea_fault_id fault_id: ^.*$ failure_effect: ^.*$ + # req-Id: tool_req__docs_saf_attrs_sufficient sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + # req-Id: tool_req__docs_saf_attrs_content content: ^[\s\S]+$ optional_options: + # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ mandatory_links: + # req-Id: tool_req__docs_saf_attrs_violates violates: ^feat_arc_dyn__[0-9a-z_]+$ optional_links: + # req-Id: tool_req__docs_saf_attrs_mitigated_by + # (only mandatory once valid status == valid) mitigated_by: ^(feat_req__.*|aou_req__.*)$ + tags: + - failure_mode_effects_analysis + - safety_analysis + # req-Id: tool_req__docs_saf_types comp_saf_fmea: - title: Component Failure Mode and Effects Analysis + title: Component FMEA (Failure Mode and Effects Analysis) prefix: comp_saf_fmea__ mandatory_options: id: ^comp_saf_fmea__[0-9a-z_]+$ + # req-Id: tool_req__docs_saf_attr_fmea_fault_id fault_id: ^.*$ failure_effect: ^.*$ + # req-Id: tool_req__docs_saf_attrs_sufficient sufficient: ^(yes|no)$ status: ^(valid|invalid)$ + # req-Id: tool_req__docs_saf_attrs_content content: ^[\s\S]+$ optional_options: + # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ mandatory_links: + # req-Id: tool_req__docs_saf_attrs_violates violates: ^comp_arc_dyn__[0-9a-z_]+$ optional_links: mitigated_by: ^(comp_req__.*|aou_req__.*)$ + tags: + - failure_mode_effects_analysis + - safety_analysis # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from @@ -810,29 +889,48 @@ needs_extra_links: # req- Id: gd_req__req__linkage_architecture # req- Id: gd_req__req__linkage_safety -# Checks if the child requirement has the at least the same safety level as the parent requirement. It's allowed to "overfill" the safety level of the parent. -# ASIL decomposition is not foreseen in S-CORE. Therefore it's not allowed to have a child requirement with a lower safety level than the parent requirement as -# it is possible in an decomposition case. -# If need-req is `QM`, parent must be `QM`. graph_checks: - req_safety_linkage_qm: + # req-Id: tool_req__docs_common_attr_safety_link_check + tool_req__docs_common_attr_safety_link_check: needs: - include: comp_req, feat_req + include: stkh_req, feat_req, comp_req, aou_req, gd_req, tool_req condition: safety == QM check: satisfies: safety == QM - explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is QM and its status is valid. - # If need-req is `ASIL_B`, parent must be `QM` or `ASIL_B`. - req_safety_linkage_asil_b: + explanation: QM requirements cannot satisfy ASIL requirements. + + # req-Id: tool_req__docs_arch_link_qm_to_safety_req + tool_req__docs_arch_link_qm_to_safety_req: needs: - include: comp_req, feat_req - condition: safety == ASIL_B + include: feat_arc_sta, logic_arc_int, logic_arc_int_op, comp_arc_sta, real_arc_int, real_arc_int_op + condition: safety == QM check: - satisfies: - or: - - safety == ASIL_B - - safety == QM - explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is ASIL_B or QM and its status is valid. + fulfils: safety != QM + explanation: An QM architecture element cannot implement ASIL requirements. + + # req-Id: tool_req__docs_req_arch_link_safety_to_arch + tool_req__docs_req_arch_link_safety_to_arch: + needs: + include: feat_arc_sta, logic_arc_int, logic_arc_int_op, comp_arc_sta, real_arc_int, real_arc_int_op + condition: + and: + - safety != QM + - status == valid + check: + implements: # TODO: which attribute??? + and: + - safety != QM + - status == valid + explanation: An safety architecture element can only link other safety architecture elements. + + tool_req__docs_arch_link_security: + needs: + include: feat_arc_sta, logic_arc_int, logic_arc_int_op, comp_arc_sta, real_arc_int, real_arc_int_op + condition: security == YES + check: + implements: security == YES # Which attribute??? + explanation: An security architecture element can only link other security architecture elements. + # saf - ID gd_req__saf_linkage_safety # It shall be checked that Safety Analysis (DFA and FMEA) can only be linked via mitigate against # - Requirements with the same ASIL or diff --git a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst index 10c14113..b93561e0 100644 --- a/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst +++ b/src/extensions/score_metamodel/tests/rst/graph/test_metamodel_graph.rst @@ -52,10 +52,10 @@ .. Negative Test: Child requirement QM. Parent requirement is `ASIL_B`. Child cant fulfill the safety level of the parent. -#EXPECT: feat_req__child__3: Parent need `feat_req__parent__ASIL_B` does not fulfill condition `safety == QM`. Explanation: An ASIL requirement must link at least one parent/upstream ASIL requirement for correct decomposition. Please ensure the parent’s safety level is QM and its status is valid. +#EXPECT: QM requirements cannot satisfy ASIL requirements. .. comp_req:: Child requirement 3 - :id: feat_req__child__3 + :id: feat_req__qm_child_with_asil_parent :safety: QM :satisfies: feat_req__parent__ASIL_B :status: valid @@ -63,10 +63,10 @@ .. Parent requirement does not exist -#EXPECT: feat_req__child__4: Parent need `feat_req__parent0__abcd` not found in needs_dict. +#EXPECT: unknown outgoing link .. feat_req:: Child requirement 4 - :id: feat_req__child__4 + :id: feat_req__linking_to_unknown_parent :safety: ASIL_B :status: valid :satisfies: feat_req__parent0__abcd diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index fdff5fa1..7dbd4c38 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -145,6 +145,18 @@ def extract_test_data(rst_file: Path) -> RstData | None: return rst_data +def filter_warnings_by_position( + rst_data: RstData, + warning_info: WarningInfo, + warnings: list[str], +) -> list[str]: + return [ + warning + for warning in warnings + if (f"{rst_data.filename}:{str(warning_info.lineno)}" in warning) + ] + + def warning_matches( rst_data: RstData, warning_info: WarningInfo, @@ -153,11 +165,8 @@ def warning_matches( ) -> bool: ### Checks if any element of the warning list is includes the given warning info. # It returns True if found otherwise False. - for warning in warnings: - if ( - f"{rst_data.filename}:{str(warning_info.lineno)}" in warning - and expected_message in warning - ): + for warning in filter_warnings_by_position(rst_data, warning_info, warnings): + if expected_message in warning: return True return False @@ -190,7 +199,10 @@ def test_rst_files( for warning_info in rst_data.warning_infos: for w in warning_info.expected: if not warning_matches(rst_data, warning_info, w, warnings): - raise AssertionError(f"Expected warning: '{w}' not found") + actual = filter_warnings_by_position(rst_data, warning_info, warnings) + raise AssertionError( + f"Expected warning: '{w}' not found. Received: {actual}" + ) for w in warning_info.not_expected: if warning_matches(rst_data, warning_info, w, warnings): raise AssertionError(f"Unexpected warning: '{w}' found") From ec62bcc34e6dbdd377abe1c83cc8d19c757a7654 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Thu, 7 Aug 2025 08:00:27 +0200 Subject: [PATCH 096/231] fix needs_json visibility across repositories (#198) --- docs.bzl | 1 + 1 file changed, 1 insertion(+) diff --git a/docs.bzl b/docs.bzl index ae2d7205..3c940e99 100644 --- a/docs.bzl +++ b/docs.bzl @@ -139,4 +139,5 @@ def docs(source_dir = "docs", data = [], deps = []): formats = ["needs"], sphinx = ":sphinx_build", tools = data, + visibility = ["//visibility:public"], ) From e6bf7c2b8e0c6e72e5534b4b8a441d6dab6e7e96 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Mon, 11 Aug 2025 12:07:30 +0200 Subject: [PATCH 097/231] Improve check_id_format (#192) * Improve check_id_format by adding parts option in the metamodel and removing the hardcoded need in the check, then adapt tests * handle example_feature case in check_id_length * Adding exceptions for example_feature * Fixing IDs --------- Co-authored-by: Maximilian Pollak --- MODULE.bazel | 2 +- docs/requirements/requirements.rst | 122 +++++++++--------- src/extensions/score_metamodel/__init__.py | 4 + .../checks/attributes_format.py | 72 ++++------- .../score_metamodel/checks/check_options.py | 10 +- .../checks/id_contains_feature.py | 2 + src/extensions/score_metamodel/metamodel.yaml | 51 +++++++- .../test_attributes_format_id_format.rst | 32 +++-- .../score_source_code_linker/__init__.py | 2 +- 9 files changed, 165 insertions(+), 132 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 49ca9456..7085ecc6 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.0.1", + version = "1.0.2-CW1", compatibility_level = 1, ) diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 942943af..e4d8c858 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -55,9 +55,9 @@ This section provides an overview of current process requirements and their clar :implemented: YES :tags: Common Attributes :satisfies: - PROCESS_gd_req__req__attr_uid, - PROCESS_gd_req__tool__attr_uid, - PROCESS_gd_req__arch__attribute_uid, + PROCESS_gd_req__req_attr_uid, + PROCESS_gd_req__tool_attr_uid, + PROCESS_gd_req__arch_attribute_uid, PROCESS_gd_req__saf_attr_uid, :parent_covered: NO @@ -73,10 +73,10 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :tags: Common Attributes :satisfies: - PROCESS_gd_req__req__attr_uid, - PROCESS_gd_req__arch__attribute_uid, + PROCESS_gd_req__req_attr_uid, + PROCESS_gd_req__arch_attribute_uid, PROCESS_gd_req__saf_attr_uid, - :parent_covered: NO: cannot check non-existent "doc__naming_conventions" in PROCESS_gd_req__req__attr_uid + :parent_covered: NO: cannot check non-existent "doc__naming_conventions" in PROCESS_gd_req__req_attr_uid Docs-as-Code shall enforce that Need IDs follow the following naming scheme: @@ -97,8 +97,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :tags: Common Attributes :satisfies: - PROCESS_gd_req__req__attr_title, - PROCESS_gd_req__saf__attr_title, + PROCESS_gd_req__req_attr_title, + PROCESS_gd_req__saf_attr_title, :parent_covered: NO: Can not ensure summary Docs-as-Code shall enforce that all needs have titles and titles do not contain the following words: @@ -117,7 +117,7 @@ This section provides an overview of current process requirements and their clar :tags: Common Attributes :parent_covered: NO: Can not cover 'ISO/IEC/IEEE/29148' :implemented: YES - :satisfies: PROCESS_gd_req__req__attr_description + :satisfies: PROCESS_gd_req__req_attr_description Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has a description (content) @@ -127,7 +127,7 @@ This section provides an overview of current process requirements and their clar :tags: Common Attributes :implemented: YES :satisfies: - PROCESS_gd_req__req__desc_weak, + PROCESS_gd_req__req_desc_weak, :parent_covered: YES Docs-as-Code shall enforce that requirement descriptions do not contain the following weak words: @@ -149,8 +149,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :tags: Common Attributes :satisfies: - PROCESS_gd_req__req__attr_security, - PROCESS_gd_req__arch__attr_security, + PROCESS_gd_req__req_attr_security, + PROCESS_gd_req__arch_attr_security, Docs-as-Code shall enforce that the ``security`` attribute has one of the following values: @@ -175,8 +175,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :parent_covered: YES :satisfies: - PROCESS_gd_req__req__attr_safety, - PROCESS_gd_req__arch__attr_safety + PROCESS_gd_req__req_attr_safety, + PROCESS_gd_req__arch_attr_safety Docs-as-Code shall enforce that the ``safety`` attribute has one of the following values: @@ -201,8 +201,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :parent_covered: NO: gd_req__saf_attr_status has additional constraints :satisfies: - PROCESS_gd_req__req__attr_status, - PROCESS_gd_req__arch__attr_status, + PROCESS_gd_req__req_attr_status, + PROCESS_gd_req__arch_attr_status, PROCESS_gd_req__saf_attr_status, Docs-as-Code shall enforce that the ``status`` attribute has one of the following values: @@ -227,7 +227,7 @@ Versioning :tags: Common Attributes :implemented: NO :parent_covered: NO: to be checked after demo - :satisfies: PROCESS_gd_req__req__attr_version + :satisfies: PROCESS_gd_req__req_attr_version Docs-As-Code shall enable and enforce a versioning attribute for all needs. @@ -241,7 +241,7 @@ Versioning :tags: Common Attributes :implemented: NO :parent_covered: NO: parent talks about setting covered to false, but we want to issue a build error. - :satisfies: PROCESS_gd_req__req__suspicious + :satisfies: PROCESS_gd_req__req_suspicious :status: invalid Docs-as-Code shall check if linked parent needs have different versions, compared to @@ -258,7 +258,7 @@ Versioning :tags: Documents :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__doc__types + :satisfies: PROCESS_gd_req__doc_types Docs-as-Code shall support the following document types: @@ -271,9 +271,9 @@ Versioning :tags: Documents :implemented: NO :satisfies: - PROCESS_gd_req__doc__author, - PROCESS_gd_req__doc__approver, - PROCESS_gd_req__doc__reviewer, + PROCESS_gd_req__doc_author, + PROCESS_gd_req__doc_approver, + PROCESS_gd_req__doc_reviewer, :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -289,7 +289,7 @@ Versioning :id: tool_req__docs_doc_attr_author_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc__author + :satisfies: PROCESS_gd_req__doc_author :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -307,7 +307,7 @@ Versioning :id: tool_req__docs_doc_attr_approver_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc__approver + :satisfies: PROCESS_gd_req__doc_approver :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -321,7 +321,7 @@ Versioning :id: tool_req__docs_doc_attr_reviewer_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc__reviewer + :satisfies: PROCESS_gd_req__doc_reviewer :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -353,7 +353,7 @@ Mapping :id: tool_req__docs_req_types :tags: Requirements :implemented: YES - :satisfies: PROCESS_gd_req__req__structure + :satisfies: PROCESS_gd_req__req_structure :parent_covered: YES: Together with tool_req__docs_linkage Docs-as-Code shall support the following requirement types: @@ -374,7 +374,7 @@ Mapping :tags: Requirements :implemented: YES :parent_covered: NO: Can not ensure correct reasoning - :satisfies: PROCESS_gd_req__req__attr_rationale + :satisfies: PROCESS_gd_req__req_attr_rationale Docs-as-Code shall enforce that each stakeholder requirement (stkh_req) contains a ``rationale`` attribute. @@ -382,7 +382,7 @@ Mapping :id: tool_req__docs_req_attr_reqtype :tags: Requirements :implemented: YES - :satisfies: PROCESS_gd_req__req__attr_type + :satisfies: PROCESS_gd_req__req_attr_type Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` except process and tool requirements has a ``reqtype`` attribute with one of the @@ -397,7 +397,7 @@ Mapping :id: tool_req__docs_req_attr_reqcov :tags: Requirements :implemented: PARTIAL - :satisfies: PROCESS_gd_req__req__attr_req_cov + :satisfies: PROCESS_gd_req__req_attr_req_cov Docs as code shall shall enable marking requirements as covered by their linked children. @@ -413,7 +413,7 @@ Mapping :tags: Requirements :implemented: PARTIAL :parent_covered: YES - :satisfies: PROCESS_gd_req__req__attr_test_covered + :satisfies: PROCESS_gd_req__req_attr_test_covered :status: invalid Docs-As-Code shall allow for every need of type :need:`tool_req__docs_req_types` to @@ -434,7 +434,7 @@ Mapping :id: tool_req__docs_req_link_satisfies_allowed :tags: Requirements :implemented: PARTIAL - :satisfies: PROCESS_gd_req__req__linkage, PROCESS_gd_req__req__traceability + :satisfies: PROCESS_gd_req__req_linkage, PROCESS_gd_req__req_traceability :parent_covered: YES :status: invalid @@ -463,7 +463,7 @@ Mapping :tags: Common Attributes :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__req__linkage_safety + :satisfies: PROCESS_gd_req__req_linkage_safety QM requirements (safety == QM) shall not be linked to safety requirements (safety != QM) via the ``satisfies`` attribute. @@ -478,8 +478,8 @@ Mapping :id: tool_req__docs_arch_types :tags: Architecture :satisfies: - PROCESS_gd_req__arch__hierarchical_structure, - PROCESS_gd_req__arch__build_blocks, + PROCESS_gd_req__arch_hierarchical_structure, + PROCESS_gd_req__arch_build_blocks, :implemented: YES :parent_covered: NO :status: invalid @@ -501,8 +501,8 @@ Architecture Attributes :id: tool_req__docs_arch_attr_mandatory :tags: Architecture :satisfies: - PROCESS_gd_req__arch__attr_mandatory, - PROCESS_gd_req__arch__attr_fulfils, + PROCESS_gd_req__arch_attr_mandatory, + PROCESS_gd_req__arch_attr_fulfils, :implemented: PARTIAL :parent_covered: YES :parent_has_problem: YES: Metamodel & Process aren't the same. Some definitions are not consistent in Process @@ -526,10 +526,10 @@ Architecture Attributes :tags: Architecture :implemented: PARTIAL :satisfies: - PROCESS_gd_req__arch__linkage_requirement_type, - PROCESS_gd_req__arch__attr_fulfils, - PROCESS_gd_req__arch__traceability, - PROCESS_gd_req__req__linkage_fulfill + PROCESS_gd_req__arch_linkage_requirement_type, + PROCESS_gd_req__arch_attr_fulfils, + PROCESS_gd_req__arch_traceability, + PROCESS_gd_req__req_linkage_fulfill :parent_covered: YES Docs-as-Code shall enforce that linking via the ``fulfils`` attribute follows defined rules. @@ -555,7 +555,7 @@ Architecture Attributes :id: tool_req__docs_arch_link_safety_to_req :tags: Architecture :implemented: PARTIAL - :satisfies: PROCESS_gd_req__arch__linkage_requirement + :satisfies: PROCESS_gd_req__arch_linkage_requirement :parent_covered: YES Docs-as-Code shall enforce that architecture elements of type @@ -567,7 +567,7 @@ Architecture Attributes :id: tool_req__docs_arch_link_qm_to_safety_req :tags: Architecture :implemented: PARTIAL - :satisfies: PROCESS_gd_req__arch__linkage_requirement + :satisfies: PROCESS_gd_req__arch_linkage_requirement :parent_covered: YES Docs-as-Code shall enforce that architecture elements of type @@ -580,8 +580,8 @@ Architecture Attributes :tags: Architecture :implemented: PARTIAL :satisfies: - PROCESS_gd_req__arch__linkage_safety_trace, - PROCESS_gd_req__req__linkage_safety, + PROCESS_gd_req__arch_linkage_safety_trace, + PROCESS_gd_req__req_linkage_safety, :parent_covered: NO Docs-as-Code shall enforce that valid safety architectural elements (Safety != QM) can @@ -592,7 +592,7 @@ Architecture Attributes :tags: Architecture :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__arch__linkage_security_trace + :satisfies: PROCESS_gd_req__arch_linkage_security_trace Docs-as-Code shall enforce that security relevant :need:`tool_req__docs_arch_types` (Security == YES) can only be linked against security relevant :need:`tool_req__docs_arch_types`. @@ -606,7 +606,7 @@ Architecture Attributes :tags: Architecture :implemented: YES :satisfies: - PROCESS_gd_req__arch__viewpoints, + PROCESS_gd_req__arch_viewpoints, :parent_covered: YES Docs-as-Code shall enable the rendering of diagrams for the following architecture views: @@ -636,8 +636,8 @@ Architecture Attributes :implemented: YES :parent_covered: NO: we only enable linking, we do not link :satisfies: - PROCESS_gd_req__req__attr_impl, - PROCESS_gd_req__impl__design_code_link, + PROCESS_gd_req__req_attr_impl, + PROCESS_gd_req__impl_design_code_link, Docs-as-Code shall allow source code to link to needs. @@ -651,20 +651,20 @@ Architecture Attributes :tags: Detailed Design & Code :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__req__linkage_architecture_switch + :satisfies: PROCESS_gd_req__req_linkage_architecture_switch Docs-as-Code shall allow for a to-be-defined list of checks to be non-fatal for non release builds. These are typically better suited for metrics than for checks. - e.g. PROCESS_gd_req__req__linkage_architecture + e.g. PROCESS_gd_req__req_linkage_architecture .. tool_req:: Enable Creation of Dependency Graphs - :id: tool_req__docs__dd_dependency_graph + :id: tool_req__docs_dd_dependency_graph :tags: Detailed Design & Code :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__impl__dependency_analysis + :satisfies: PROCESS_gd_req__impl_dependency_analysis :status: invalid Docs-As-Code shall support generation and rendering of dependency graphs for @@ -685,7 +685,7 @@ Testing :tags: Testing :implemented: PARTIAL :parent_covered: YES - :satisfies: PROCESS_gd_req__req__attr_testlink + :satisfies: PROCESS_gd_req__req_attr_testlink Docs-as-Code shall allow requirements of type :need:`tool_req__docs_req_types` to include a ``testlink`` attribute. @@ -698,7 +698,7 @@ Testing :tags: Testing :implemented: NO :parent_covered: NO - :satisfies: PROCESS_gd_req__verification__checks + :satisfies: PROCESS_gd_req__verification_checks Docs-as-Code shall ensure that each test case has TestType and DerivationTechnique set. @@ -707,7 +707,7 @@ Testing :tags: Testing :implemented: NO :parent_covered: NO - :satisfies: PROCESS_gd_req__verification__checks + :satisfies: PROCESS_gd_req__verification_checks :status: invalid Docs-as-Code shall ensure that each test case has a non empty description. @@ -719,7 +719,7 @@ Testing :tags: Testing :implemented: NO :parent_covered: NO - :satisfies: PROCESS_gd_req__verification__checks + :satisfies: PROCESS_gd_req__verification_checks :status: invalid Docs-as-Code shall ensure that test cases link to requirements on the correct level: @@ -739,7 +739,7 @@ Testing :tags: Tool Verification Reports :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__tool__attr_safety_affected, PROCESS_gd_req__tool__check_mandatory + :satisfies: PROCESS_gd_req__tool_attr_safety_affected, PROCESS_gd_req__tool_check_mandatory Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a ``safety_affected`` attribute with one of the following values: @@ -752,7 +752,7 @@ Testing :tags: Tool Verification Reports :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__tool__attr_security_affected, PROCESS_gd_req__tool__check_mandatory + :satisfies: PROCESS_gd_req__tool_attr_security_affected, PROCESS_gd_req__tool_check_mandatory Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a `security_affected` attribute with one of the following values: @@ -765,7 +765,7 @@ Testing :id: tool_req__docs_tvr_status :tags: Tool Verification Reports :implemented: YES - :satisfies: PROCESS_gd_req__tool__attr_status, PROCESS_gd_req__tool__check_mandatory + :satisfies: PROCESS_gd_req__tool_attr_status, PROCESS_gd_req__tool_check_mandatory :parent_covered: YES Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a @@ -825,7 +825,7 @@ Testing :implemented: NO :tags: Safety Analysis :satisfies: - PROCESS_gd_req__saf__attr_mitigated_by, + PROCESS_gd_req__saf_attr_mitigated_by, PROCESS_gd_req__saf_attr_requirements, PROCESS_gd_req__saf_attr_requirements_check, :parent_covered: YES @@ -839,7 +839,7 @@ Testing :id: tool_req__docs_saf_attrs_mitigation_issue :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf__attr_mitigation_issue + :satisfies: PROCESS_gd_req__saf_attr_mitigation_issue :parent_covered: NO Docs-As-Code shall allow needs of type :need:`tool_req__docs_saf_types` to have a diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index ee9f7259..efa88b2c 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -39,6 +39,7 @@ @dataclass class ScoreNeedType(NeedType): tags: list[str] + parts: int @dataclass @@ -207,6 +208,8 @@ def load_metamodel_data(): one_type["mandatory_options"] = mandatory_options tags = directive_data.get("tags", []) one_type["tags"] = tags + parts = directive_data.get("parts", 3) + one_type["parts"] = parts optional_options = directive_data.get("optional_options", {}) optional_options.update(global_base_options_optional_opts) @@ -296,6 +299,7 @@ def default_options() -> list[str]: "has_forbidden_dead_links", "tags", "arch", + "parts", ] diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 3cd01f98..7fc3d840 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from score_metamodel import CheckLogger, local_check, ScoreNeedType, ProhibitedWordCheck +from score_metamodel import CheckLogger, ProhibitedWordCheck, ScoreNeedType, local_check from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType @@ -24,7 +24,7 @@ def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeed raise ValueError(f"Need type {directive} not found in needs_types") -# req-#id: gd_req__req__attr_uid +# req-#id: gd_req__req_attr_uid @local_check def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): """ @@ -32,51 +32,21 @@ def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): the requirement id or not. --- """ - # These folders are taken from 'https://github.com/eclipse-score/process_description/tree/main/process' - # This means, any needs within any of these folders (no matter where they are) will not be required to have 3 parts - process_folder_names = [ - "general_concepts", - "introduction", - "process_areas", - "roles", - "standards", - "workflows", - "workproducts", - "process", - ] - # Split the string by underscores - parts = need["id"].split("__") - if need["type"] in [ - "std_wp", - "document", # This is used in 'platform_managment' in score. - "doc_tool", - "gd_guidl", - "workflow", - "gd_chklst", - "std_req", - "tool_req", - "role", - "doc_concept", - "gd_temp", - "gd_method", - "gd_req", - "workproduct", - "doc_getstrt", - ] or any(prefix in str(need.get("docname", "")) for prefix in process_folder_names): - if len(parts) != 2 and len(parts) != 3: - msg = ( - "expected to consisting of one of these 2 formats:" - "`__` or " - "`____`." - ) - log.warning_for_option(need, "id", msg) - else: - if len(parts) != 3: + need_options = get_need_type(app.config.needs_types, need["type"]) + expected_parts = need_options.get("parts", 3) + id_parts = need["id"].split("__") + id_parts_len = len(id_parts) + + if id_parts_len != expected_parts: + msg = "" + if expected_parts == 2: + msg = "expected to consist of this format: `__`. Only one '__' is allowed in this need's id." + elif expected_parts == 3: msg = ( - "expected to consisting of this format: " - "`____`." + "expected to consist of this format: " + "`____`. Only two '__' are allowed in this need's id." ) - log.warning_for_option(need, "id", msg) + log.warning_for_option(need, "id", msg) @local_check @@ -86,12 +56,18 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): While the recommended limit is 30 characters, this check enforces a strict maximum of 45 characters. If the ID exceeds 45 characters, a warning is logged specifying the actual length. + Any examples that are required to have 3 parts (2x'__') have an exception, and get 17 extra characters + to compensate for the lenght of `_example_feature_` that would be replaced by actually feature names. --- """ - if len(need["id"]) > 45: + max_lenght = 45 + parts = need["id"].split("__") + if parts[1] == "example_feature": + max_lenght += 17 # _example_feature_ + if len(need["id"]) > max_lenght: msg = ( f"exceeds the maximum allowed length of 45 characters " - f"(current length: {len(need['id'])})." + f"(current length: {len(need['id']) if 'example_feature' not in need['id'] else len(need['id']) - 17})." ) log.warning_for_option(need, "id", msg) @@ -110,7 +86,7 @@ def _check_options_for_prohibited_words( log.warning_for_need(need, msg) -# req-#id: gd_req__req__attr_desc_weak +# req-#id: gd_req__req_attr_desc_weak # # req-#id: gd_req__requirements_attr_title @local_check def check_for_prohibited_words(app: Sphinx, need: NeedsInfoType, log: CheckLogger): diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index aae651c6..61bb67b9 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -91,12 +91,12 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: ) -# req-#id: gd_req__req__attr_type +# req-#id: gd_req__req_attr_type # req-#id: gd_req__requirements_attr_security -# req-#id: gd_req__req__attr_safety -# req-#id: gd_req__req__attr_status -# req-#id: gd_req__req__attr_rationale -# req-#id: gd_req__req__attr_mandatory +# req-#id: gd_req__req_attr_safety +# req-#id: gd_req__req_attr_status +# req-#id: gd_req__req_attr_rationale +# req-#id: gd_req__req_attr_mandatory @local_check def check_options( app: Sphinx, diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index 2c0712b7..d7115098 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -38,6 +38,8 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): # Get the part of the string after the first two underscores: the path feature = parts[1] + if feature == "example_feature": + return featureparts = re.split(r"[_-]", feature) dir_docname = os.path.dirname(str(need.get("docname", ""))) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index fd11bcc8..6dbcfbab 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -59,6 +59,7 @@ needs_types: status: "^(draft|valid)$" optional_links: links: "^.*$" + parts: 3 tenet: title: "Tenet" @@ -68,6 +69,7 @@ needs_types: status: "^(draft|valid)$" optional_links: links: "^.*$" + parts: 3 assertion: title: "Assertion" @@ -77,6 +79,7 @@ needs_types: status: "^(draft|valid)$" optional_links: links: "^.*$" + parts: 3 # Standard Requirement and Work Product # req-Id: tool_req__docs_stdreq_types @@ -88,6 +91,7 @@ needs_types: status: ^(valid)$ optional_links: links: ^.*$ + parts: 3 std_wp: title: Standard Work Product @@ -95,6 +99,7 @@ needs_types: mandatory_options: id: std_wp__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-z_]*$ status: ^(valid)$ + parts: 3 # Workflow # req-Id: tool_req__docs_wf_types @@ -113,6 +118,7 @@ needs_types: supported_by: ^rl__.*$ contains: ^gd_(req|temp|chklst|guidl|meth)__.*$ has: ^doc_(getstrt|concept)__.*$ + parts: 2 # req-Id: tool_req__docs_req_types gd_req: @@ -130,6 +136,7 @@ needs_types: complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ tags: - requirement + parts: 2 gd_temp: title: Process Template @@ -139,6 +146,7 @@ needs_types: status: ^(valid|draft)$ optional_links: complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ + parts: 2 gd_chklst: title: Process Checklist @@ -148,6 +156,7 @@ needs_types: status: ^(valid|draft)$ optional_links: complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ + parts: 2 gd_guidl: title: Process Guideline @@ -157,6 +166,7 @@ needs_types: status: ^(valid|draft)$ optional_links: complies: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + parts: 2 gd_method: title: Process Method @@ -166,6 +176,8 @@ needs_types: status: ^(valid|draft)$ optional_links: complies: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + parts: 2 + # S-CORE Workproduct workproduct: title: Workproduct @@ -175,6 +187,7 @@ needs_types: status: ^(valid|draft)$ optional_links: complies: std_(wp__iso26262|wp__isosae21434|wp__isopas8926|iic_aspice_40)__.*$ + parts: 2 # Role role: @@ -184,6 +197,7 @@ needs_types: id: ^rl__[0-9a-z_]*$ optional_links: contains: ^rl__.*$ + parts: 2 # Documents, process_description only doc_concept: @@ -192,6 +206,7 @@ needs_types: mandatory_options: id: ^doc_concept__[0-9a-z_]*$ status: ^(valid|draft)$ + parts: 2 doc_getstrt: title: Getting Startet @@ -199,6 +214,7 @@ needs_types: mandatory_options: id: ^doc_getstrt__[0-9a-z_]*$ status: ^(valid|draft)$ + parts: 2 # req-Id: tool_req__docs_doc_types document: @@ -216,6 +232,7 @@ needs_types: reviewer: ^.*$ optional_links: realizes: "^wp__.+$" + parts: 2 # req-Id: tool_req__docs_doc_types doc_tool: @@ -238,6 +255,7 @@ needs_types: reviewer: ^.*$ optional_links: realizes: "^wp__.+$" + parts: 2 # Requirements # req-Id: tool_req__docs_req_types @@ -269,6 +287,7 @@ needs_types: tags: - requirement - requirement_excl_process + parts: 3 # req-Id: tool_req__docs_req_types feat_req: @@ -300,6 +319,7 @@ needs_types: tags: - requirement - requirement_excl_process + parts: 3 # req-Id: tool_req__docs_req_types comp_req: @@ -330,6 +350,7 @@ needs_types: tags: - requirement - requirement_excl_process + parts: 3 # req-Id: tool_req__docs_req_types tool_req: @@ -363,6 +384,7 @@ needs_types: tags: - requirement - requirement_excl_process + parts: 2 # req-Id: tool_req__docs_req_types aou_req: @@ -390,9 +412,9 @@ needs_types: tags: - requirement - requirement_excl_process + parts: 3 # - Architecture - - # Architecture Element & View # req-Id: tool_req__docs_arch_types # req-Id: tool_req__docs_arch_views @@ -416,6 +438,7 @@ needs_types: tags: - architecture_element - architecture_view + parts: 3 # Architecture View # req-Id: tool_req__docs_arch_views @@ -436,6 +459,8 @@ needs_types: fulfils: ^feat_req__.+$ tags: - architecture_view + - architecture_element + parts: 3 # Architecture Element & View # req-Id: tool_req__docs_arch_types @@ -459,6 +484,7 @@ needs_types: tags: - architecture_element - architecture_view + parts: 3 # Architecture Element # req-Id: tool_req__docs_arch_types @@ -479,6 +505,7 @@ needs_types: included_by: ^logic_arc_int__.+$ tags: - architecture_element + parts: 3 # Architecture View # req-Id: tool_req__docs_arch_views @@ -493,6 +520,7 @@ needs_types: includes: ^comp_arc_sta__.+$ tags: - architecture_view + parts: 3 # No process requirement mod_view_dyn: @@ -502,6 +530,7 @@ needs_types: style: card mandatory_options: id: ^mod_view_dyn__[0-9a-z_]+$ + parts: 3 # Architecture Element & View # req-Id: tool_req__docs_arch_types @@ -527,6 +556,7 @@ needs_types: tags: - architecture_element - architecture_view + parts: 3 # Architecture View # req-Id: tool_req__docs_arch_views @@ -547,6 +577,8 @@ needs_types: fulfils: ^comp_req__.+$ tags: - architecture_view + - architecture_element + parts: 3 # Architecture Element & View # req-Id: tool_req__docs_arch_types @@ -570,6 +602,7 @@ needs_types: tags: - architecture_element - architecture_view + parts: 3 # Architecture Element # req-Id: tool_req__docs_arch_types @@ -592,6 +625,7 @@ needs_types: implements: ^logic_arc_int_op__.+$ tags: - architecture_element + parts: 3 # - architecture end - @@ -604,6 +638,7 @@ needs_types: approvers: ^.*$ hash: ^.*$ template: ^.*$ + parts: 3 # Implementation dd_sta: @@ -621,6 +656,7 @@ needs_types: satisfies: ^comp_arc_sta__.*$ optional_links: includes: ^sw_unit__.*$ + parts: 3 dd_dyn: title: Dynamic detailed design @@ -635,6 +671,7 @@ needs_types: mandatory_links: implements: ^comp_req__.*$ satisfies: ^comp_arc_sta__.*$ + parts: 3 sw_unit: title: Software unit @@ -644,6 +681,8 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + parts: 3 + sw_unit_int: title: Software unit interfaces prefix: sw_unit_int__ @@ -654,6 +693,7 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + parts: 3 # DFA (Dependent Failure Analysis) # No requirement!! @@ -673,6 +713,7 @@ needs_types: mitigation_issue: ^https://github.com/.*$ optional_links: mitigated_by: ^(feat_req__.*|aou_req__.*)$ + parts: 3 # req-Id: tool_req__docs_saf_types feat_saf_dfa: @@ -701,6 +742,7 @@ needs_types: tags: - dependent_failure_analysis - safety_analysis + parts: 3 # req-Id: tool_req__docs_saf_types comp_saf_dfa: @@ -729,6 +771,7 @@ needs_types: tags: - dependent_failure_analysis - safety_analysis + parts: 3 # FMEA (Failure Mode and Effects Analysis) # req-Id: tool_req__docs_saf_types @@ -758,6 +801,7 @@ needs_types: tags: - failure_mode_effects_analysis - safety_analysis + parts: 3 # req-Id: tool_req__docs_saf_types comp_saf_fmea: @@ -784,6 +828,7 @@ needs_types: tags: - failure_mode_effects_analysis - safety_analysis + parts: 3 # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from @@ -886,8 +931,8 @@ needs_extra_links: # - condition: defines the condition that should be checked # - [and / or / xor / not] ############################################################## -# req- Id: gd_req__req__linkage_architecture -# req- Id: gd_req__req__linkage_safety +# req- Id: gd_req__req_linkage_architecture +# req- Id: gd_req__req_linkage_safety graph_checks: # req-Id: tool_req__docs_common_attr_safety_link_check diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst index 3c8025c5..79b0d0ca 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_format_id_format.rst @@ -14,25 +14,31 @@ #CHECK: check_id_format .. Id does not consists of 3 parts -#EXPECT: stk_req__test.id (stk_req__test): expected to consisting of this format: `____`. +#EXPECT: stkh_req__test.id (stkh_req__test): expected to consist of this format: `____`. .. stkh_req:: This is a test - :id: stk_req__test + :id: stkh_req__test + +.. Id consists of 3 parts +#EXPECT-NOT: stkh_req__test__abcd.id (stkh_req__test__abcd): expected to consist of this format: `____`. + +.. stkh_req:: This is a test + :id: stkh_req__test__abcd .. Id follows pattern -#EXPECT-NOT: expected to consisting of this format: `____`. +#EXPECT: stkh_req__test__test__abcd.id (stkh_req__test__test__abcd): expected to consist of this format: `____`. -.. std_wp:: This is a test - :id: std_wp__test__test__abcd +.. stkh_req:: This is a test + :id: stkh_req__test__test__abcd -.. Id starts with wp and number of parth is neither 2 nor 3 -#EXPECT: wp__test__test__abcd.id (wp__test__test__abcd): expected to consisting of one of these 2 formats:`__` or `____`. +.. Id starts with wp and number of parts is 3 +#EXPECT: wp__test__abcd.id (wp__test__abcd): expected to consist of this format: `__`. -.. std_wp:: This is a test - :id: wp__test__test__abcd +.. workproduct:: This is a test + :id: wp__test__abcd -.. Id is valid, because it starts with wp and contains 3 parts -#EXPECT-NOT: expected to consisting of one of these 2 formats:`__` or `____`. +.. Id is invalid, because it starts with wp and contains 2 parts +#EXPECT-NOT: wp__test.id (wp__test): expected to consist of this format: `__`. -.. std_wp:: This is a test - :id: wp__test__abce +.. workproduct:: This is a test + :id: wp__test diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 3a4be37a..3d56d1b9 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -225,7 +225,7 @@ def get_current_git_hash(ws_root: Path) -> str: raise -# re-qid: gd_req__req__attr_impl +# re-qid: gd_req__req_attr_impl def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: """ 'Main' function that facilitates the running of all other functions From 61be512c28cbc98a12a372996efb022a5d0c152b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 11 Aug 2025 16:08:59 +0200 Subject: [PATCH 098/231] Increase versions (#203) --- MODULE.bazel | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 7085ecc6..0d023d96 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.0.2-CW1", + version = "1.0.2", compatibility_level = 1, ) @@ -95,4 +95,4 @@ bazel_dep(name = "score_cr_checker", version = "0.3.1") bazel_dep(name = "score_dash_license_checker", version = "0.1.1") # docs dependency -bazel_dep(name = "score_process", version = "1.1.0") +bazel_dep(name = "score_process", version = "1.1.1-Beta") From f064cef672b5bbd4b06887cbabb8dfae89cdd350 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Tue, 12 Aug 2025 10:41:40 +0200 Subject: [PATCH 099/231] linter (#202) --- .github/workflows/lint.yml | 38 ++++++++++++++++++++++++++++++++++++++ MODULE.bazel | 17 +++++++++++++++++ scripts/run-linters.sh | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) create mode 100644 .github/workflows/lint.yml create mode 100755 scripts/run-linters.sh diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..8417b6c1 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,38 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Linting Checks + +on: + pull_request: + types: [opened, reopened, synchronize] + merge_group: + types: [checks_requested] + +jobs: + lint: + runs-on: ubuntu-latest + continue-on-error: true + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + + - name: Setup Bazel + uses: bazel-contrib/setup-bazel@0.15.0 + with: + disk-cache: true + repository-cache: true + bazelisk-cache: true + + - name: Run Linter Script + run: bash scripts/run-linters.sh diff --git a/MODULE.bazel b/MODULE.bazel index 0d023d96..efe5e633 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -96,3 +96,20 @@ bazel_dep(name = "score_dash_license_checker", version = "0.1.1") # docs dependency bazel_dep(name = "score_process", version = "1.1.1-Beta") + +# Add Linter +bazel_dep(name = "rules_multitool", version = "1.2.0") +bazel_dep( + name = "score_linter", + version = "0.1.0", +) + +multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") +use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") + +register_toolchains( + "@ruff_hub//toolchains:all", + "@actionlint_hub//toolchains:all", + "@shellcheck_hub//toolchains:all", + "@yamlfmt_hub//toolchains:all", +) diff --git a/scripts/run-linters.sh b/scripts/run-linters.sh new file mode 100755 index 00000000..bb2dde4a --- /dev/null +++ b/scripts/run-linters.sh @@ -0,0 +1,36 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +#!/usr/bin/env bash +set -euo pipefail + +bazel run //:ide_support + +echo "Running Ruff linter..." +bazel run @score_linter//:ruff check + +echo "Running basedpyright..." +.venv/bin/python3 -m basedpyright + +echo "Running Actionlint..." +bazel run @score_linter//:actionlint + +echo "Running Shellcheck..." +find . \ + -type d \( -name .git -o -name .venv -o -name bazel-out -o -name node_modules \) -prune -false \ + -o -type f -exec grep -Il '^#!.*sh' {} \; | \ +xargs bazel run @score_linter//:shellcheck -- + +echo "Running Yamlfmt..." +bazel run @score_linter//:yamlfmt -- $(find . \ + -type d \( -name .git -o -name .venv -o -name bazel-out -o -name node_modules \) -prune -false \ + -o -type f \( -name "*.yaml" -o -name "*.yml" \) | tr '\n' '\0' | xargs -0) From e0d5de6fb7d97c039fdcf40fd527a6956e508c40 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Wed, 13 Aug 2025 10:52:55 +0200 Subject: [PATCH 100/231] Ensure checks exists if added to filter(#206) * Add assert for existence of high CHECK in RST file tests and make Unit test for that. * Improve new and modified functions description * Improve modified function description --- src/extensions/score_metamodel/__init__.py | 18 +++++- .../tests/test_metamodel__init__.py | 61 +++++++++++++++++++ 2 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 src/extensions/score_metamodel/tests/test_metamodel__init__.py diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index efa88b2c..4e2b32ec 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -53,12 +53,24 @@ class ProhibitedWordCheck: def parse_checks_filter(filter: str) -> list[str]: """ - Parse the checks filter string into a list of individual checks. - When empty, an empty list is returned = all checks are enabled. + Parses a comma-separated list of check names. + Returns all names after trimming spaces and ensures + each exists in local_checks or graph_checks. """ if not filter: return [] - return [check.strip() for check in filter.split(",")] + checks = [check.strip() for check in filter.split(",")] + + # Validate all checks exist in either local_checks or graph_checks + all_check_names = {c.__name__ for c in local_checks} | { + c.__name__ for c in graph_checks + } + for check in checks: + assert check in all_check_names, ( + f"Check: '{check}' is not one of the defined local or graph checks" + ) + + return checks def discover_checks(): diff --git a/src/extensions/score_metamodel/tests/test_metamodel__init__.py b/src/extensions/score_metamodel/tests/test_metamodel__init__.py new file mode 100644 index 00000000..1cd7041a --- /dev/null +++ b/src/extensions/score_metamodel/tests/test_metamodel__init__.py @@ -0,0 +1,61 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import pytest + +from src.extensions.score_metamodel.__init__ import ( + graph_checks, + local_checks, + parse_checks_filter, +) + + +def dummy_local_check(app, need, log): + pass + + +def dummy_graph_check(app, needs_view, log): + pass + + +@pytest.fixture(autouse=True) +def setup_checks(): + """Reset and set test-only local and graph checks before each test.""" + local_checks.clear() + graph_checks.clear() + local_checks.append(dummy_local_check) + graph_checks.append(dummy_graph_check) + + +def test_returns_empty_list_when_filter_is_empty(): + """Return an empty list if no filter string is provided.""" + assert parse_checks_filter("") == [] + + +def test_returns_valid_checks(): + """Return the provided valid check names.""" + result = parse_checks_filter("dummy_local_check,dummy_graph_check") + assert result == ["dummy_local_check", "dummy_graph_check"] + + +def test_strips_whitespace(): + """Remove surrounding spaces from each check name.""" + result = parse_checks_filter(" dummy_local_check , dummy_graph_check ") + assert result == ["dummy_local_check", "dummy_graph_check"] + + +def test_raises_assertion_for_invalid_check(): + """Raise AssertionError if a check name is unknown.""" + with pytest.raises(AssertionError) as exc_info: + parse_checks_filter("non_existing_check") + assert "non_existing_check" in str(exc_info.value) + assert "not one of the defined local or graph checks" in str(exc_info.value) From e95e6629488a1e8c41e921a64bab4a0576103920 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Wed, 13 Aug 2025 15:58:56 +0200 Subject: [PATCH 101/231] Fix source code links strings in code (#208) * Correct all source code links to start with pattern # req-Id * Replace old gd_req source code links to the correct related tool req ----------------------------------------------------- Signed-off-by: Alexander Lanin Co-authored-by: Alexander Lanin --- .../score_metamodel/checks/attributes_format.py | 6 +++--- .../score_metamodel/checks/check_options.py | 14 +++++++------- src/extensions/score_metamodel/metamodel.yaml | 2 -- .../score_source_code_linker/__init__.py | 2 +- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 7fc3d840..608bd82f 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -24,7 +24,7 @@ def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeed raise ValueError(f"Need type {directive} not found in needs_types") -# req-#id: gd_req__req_attr_uid +# req-Id: tool_req__docs_common_attr_id_scheme @local_check def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): """ @@ -86,8 +86,8 @@ def _check_options_for_prohibited_words( log.warning_for_need(need, msg) -# req-#id: gd_req__req_attr_desc_weak -# # req-#id: gd_req__requirements_attr_title +# req-Id: tool_req__docs_common_attr_desc_wording +# req-Id: tool_req__docs_common_attr_title @local_check def check_for_prohibited_words(app: Sphinx, need: NeedsInfoType, log: CheckLogger): need_options = get_need_type(app.config.needs_types, need["type"]) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 61bb67b9..1fe7354b 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -14,9 +14,9 @@ from score_metamodel import ( CheckLogger, + ScoreNeedType, default_options, local_check, - ScoreNeedType, ) from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType @@ -91,12 +91,12 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: ) -# req-#id: gd_req__req_attr_type -# req-#id: gd_req__requirements_attr_security -# req-#id: gd_req__req_attr_safety -# req-#id: gd_req__req_attr_status -# req-#id: gd_req__req_attr_rationale -# req-#id: gd_req__req_attr_mandatory +# req-Id: tool_req__docs_req_attr_reqtype +# req-Id: tool_req__docs_common_attr_security +# req-Id: tool_req__docs_common_attr_safety +# req-Id: tool_req__docs_common_attr_status +# req-Id: tool_req__docs_req_attr_rationale +# req-Id: tool_req__docs_arch_attr_mandatory @local_check def check_options( app: Sphinx, diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 6dbcfbab..f336f415 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -931,8 +931,6 @@ needs_extra_links: # - condition: defines the condition that should be checked # - [and / or / xor / not] ############################################################## -# req- Id: gd_req__req_linkage_architecture -# req- Id: gd_req__req_linkage_safety graph_checks: # req-Id: tool_req__docs_common_attr_safety_link_check diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 3d56d1b9..8046f168 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -225,7 +225,7 @@ def get_current_git_hash(ws_root: Path) -> str: raise -# re-qid: gd_req__req_attr_impl +# req-Id: tool_req__docs_dd_link_source_code_link def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: """ 'Main' function that facilitates the running of all other functions From 958177e685d3d9c854bac28912c4d0c6a693c5f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 13 Aug 2025 16:01:12 +0200 Subject: [PATCH 102/231] Fix venv (#209) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Maximilian Sören Pollak --- scripts/run-linters.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-linters.sh b/scripts/run-linters.sh index bb2dde4a..f58d92b8 100755 --- a/scripts/run-linters.sh +++ b/scripts/run-linters.sh @@ -19,7 +19,7 @@ echo "Running Ruff linter..." bazel run @score_linter//:ruff check echo "Running basedpyright..." -.venv/bin/python3 -m basedpyright +.venv_docs/bin/python3 -m basedpyright echo "Running Actionlint..." bazel run @score_linter//:actionlint From fd4b97c78db368602c348c29bd01227dab1b22ee Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Thu, 14 Aug 2025 12:46:41 +0200 Subject: [PATCH 103/231] Fix most of line length errors plus some other errors (#210) --- .../score_draw_uml_funcs/__init__.py | 10 +-- .../score_draw_uml_funcs/helpers.py | 3 +- .../score_header_service/__init__.py | 1 + .../test/test_header_service.py | 3 +- .../test/test_header_service_integration.py | 5 +- src/extensions/score_layout/__init__.py | 3 +- src/extensions/score_layout/html_options.py | 3 +- src/extensions/score_metamodel/__init__.py | 14 ++- .../checks/attributes_format.py | 21 +++-- .../score_metamodel/checks/graph_checks.py | 8 +- .../checks/id_contains_feature.py | 10 ++- .../score_metamodel/checks/standards.py | 8 +- .../score_metamodel/external_needs.py | 12 +-- .../score_metamodel/tests/__init__.py | 8 +- .../tests/test_check_options.py | 3 +- .../score_metamodel/tests/test_standards.py | 4 +- src/extensions/score_plantuml.py | 3 +- .../score_source_code_linker/__init__.py | 18 ++-- .../generate_source_code_links_json.py | 25 +----- .../score_source_code_linker/needlinks.py | 8 +- .../tests/test_requirement_links.py | 22 ++--- .../tests/test_source_link.py | 24 +++-- src/tests/test_consumer.py | 88 ++++++++++++------- 23 files changed, 168 insertions(+), 136 deletions(-) diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index 00712bcb..e3f254ec 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -42,9 +42,7 @@ get_impl_comp_from_logic_iface, get_interface_from_component, get_interface_from_int, - get_logical_interface_real, get_module, - get_real_interface_logical, ) from sphinx.application import Sphinx from sphinx_needs.logging import get_logger @@ -97,10 +95,10 @@ def draw_comp_incl_impl_int( :param dict[str,str] need: Component which should be drawn :param dict all_needs: Dictionary containing all needs - :param dict[str,dict] proc_impl_interfaces: Dictionary containing all implemented interfaces - which were already processed during this cycle - :param dict[str,dict] proc_used_interfaces: Dictionary containing all used interfaces - which were already processed during this cycle + :param dict[str,dict] proc_impl_interfaces: Dictionary containing + all implemented interfaces which were already processed during this cycle + :param dict[str,dict] proc_used_interfaces: Dictionary containing + all used interfaces which were already processed during this cycle """ # Draw outer component structure_text = f"{gen_struct_element('component', need)} {{\n" diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index ef59ebbe..09594a1d 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -269,7 +269,8 @@ def get_logical_interface_real( logical_ifop_need = all_needs.get(logical_ifop[0]) if not logical_ifop_need: logger.info( - f"{real_ifop}: Logical Interface Operation Need not defined, probably misspelled!" + f"{real_ifop}: Logical Interface Operation Need not defined, " + "probably misspelled!" ) continue diff --git a/src/extensions/score_header_service/__init__.py b/src/extensions/score_header_service/__init__.py index 829d5154..b82187f5 100644 --- a/src/extensions/score_header_service/__init__.py +++ b/src/extensions/score_header_service/__init__.py @@ -11,6 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* from sphinx.application import Sphinx + from src.extensions.score_header_service.header_service import register diff --git a/src/extensions/score_header_service/test/test_header_service.py b/src/extensions/score_header_service/test/test_header_service.py index 3c232642..b02ed58d 100644 --- a/src/extensions/score_header_service/test/test_header_service.py +++ b/src/extensions/score_header_service/test/test_header_service.py @@ -15,9 +15,10 @@ from unittest.mock import ANY, MagicMock, patch import pytest -import src.extensions.score_header_service.header_service as hs from sphinx.util.docutils import SphinxDirective +import src.extensions.score_header_service.header_service as hs + @pytest.fixture(scope="session", autouse=True) def add_metadata(record_testsuite_property: Callable[[str, str | list[str]], None]): diff --git a/src/extensions/score_header_service/test/test_header_service_integration.py b/src/extensions/score_header_service/test/test_header_service_integration.py index ef7d9f83..3caf69e0 100644 --- a/src/extensions/score_header_service/test/test_header_service_integration.py +++ b/src/extensions/score_header_service/test/test_header_service_integration.py @@ -16,7 +16,6 @@ from unittest.mock import MagicMock, patch import pytest -import src.extensions.score_header_service.header_service as hs from pytest import TempPathFactory from sphinx.testing.util import SphinxTestApp @@ -85,8 +84,8 @@ def wrapper(use_github_data: bool = True): "src.extensions.score_header_service", ] needs_types = [ - dict(title = "Review Header", directive = "review_header", color="#BFD8D2", style="node", - prefix = "review_header__"), + dict(title = "Review Header", directive = "review_header", + color="#BFD8D2", style="node", prefix = "review_header__"), ] needs_id_regex = ".*" needs_extra_options = [ diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 3e534819..aa1b0761 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -37,7 +37,8 @@ def update_config(app: Sphinx, _config: Any): # Setting HTML static path # For now this seems the only place this is used / needed. - # In the future it might be a good idea to make this available in other places, maybe via the 'find_runfiles' lib + # In the future it might be a good idea to make this available in other places, + # maybe via the 'find_runfiles' lib if r := os.getenv("RUNFILES_DIR"): dirs = [str(x) for x in Path(r).glob("*score_docs_as_code+")] if dirs: diff --git a/src/extensions/score_layout/html_options.py b/src/extensions/score_layout/html_options.py index 24ada62a..5794a9bd 100644 --- a/src/extensions/score_layout/html_options.py +++ b/src/extensions/score_layout/html_options.py @@ -73,7 +73,8 @@ def return_html_context(app: Sphinx) -> dict[str, str]: and not app.config.html_context.get("github_repo") ): return { - # still required for use_edit_page_button and other elements except version switcher + # still required for use_edit_page_button and other elements + # except version switcher "github_user": "dummy", "github_repo": "dummy", "github_version": "main", diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 4e2b32ec..1127a3b4 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -105,7 +105,8 @@ def _run_checks(app: Sphinx, exception: Exception | None) -> None: if exception: return - # Filter out external needs, as checks are only intended to be run on internal needs. + # Filter out external needs, as checks are only intended to be run + # on internal needs. needs_all_needs = SphinxNeedsData(app.env).get_needs_view() logger.debug(f"Running checks for {len(needs_all_needs)} needs") @@ -145,13 +146,14 @@ def is_check_enabled(check: local_check_function | graph_check_function): if log.has_infos: log.info( - "Some needs have issues related to the new checks. See the log for more information." + "Some needs have issues related to the new checks. " + "See the log for more information." ) # TODO: exit code def convert_checks_to_dataclass(checks_dict) -> list[ProhibitedWordCheck]: - prohibited_words_checks = [ + return [ ProhibitedWordCheck( name=check_name, option_check={k: v for k, v in check_config.items() if k != "types"}, @@ -159,7 +161,6 @@ def convert_checks_to_dataclass(checks_dict) -> list[ProhibitedWordCheck]: ) for check_name, check_config in checks_dict.items() ] - return prohibited_words_checks def load_metamodel_data(): @@ -190,11 +191,6 @@ def load_metamodel_data(): proh_checks_dict = data.get("prohibited_words_checks", {}) prohibited_words_checks = convert_checks_to_dataclass(proh_checks_dict) - # prohibited_words_checks= [ProhibitedWordCheck(**check) for check in pro_checks.values()] - - # stop_words_list = global_base_options.get("prohibited_words", {}).get("title", []) - # weak_words_list = global_base_options.get("prohibited_words", {}).get("content", []) - # Default options by sphinx, sphinx-needs or anything else we need to account for default_options_list = default_options() diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 608bd82f..2d3a6cb3 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -40,11 +40,15 @@ def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): if id_parts_len != expected_parts: msg = "" if expected_parts == 2: - msg = "expected to consist of this format: `__`. Only one '__' is allowed in this need's id." + msg = ( + "expected to consist of this format: `__`. " + "Only one '__' is allowed in this need's id." + ) elif expected_parts == 3: msg = ( "expected to consist of this format: " - "`____`. Only two '__' are allowed in this need's id." + "`____`. " + "Only two '__' are allowed in this need's id." ) log.warning_for_option(need, "id", msg) @@ -56,8 +60,9 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): While the recommended limit is 30 characters, this check enforces a strict maximum of 45 characters. If the ID exceeds 45 characters, a warning is logged specifying the actual length. - Any examples that are required to have 3 parts (2x'__') have an exception, and get 17 extra characters - to compensate for the lenght of `_example_feature_` that would be replaced by actually feature names. + Any examples that are required to have 3 parts (2x'__') have an exception, + and get 17 extra characters to compensate for the lenght of `_example_feature_` + that would be replaced by actually feature names. --- """ max_lenght = 45 @@ -67,7 +72,8 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): if len(need["id"]) > max_lenght: msg = ( f"exceeds the maximum allowed length of 45 characters " - f"(current length: {len(need['id']) if 'example_feature' not in need['id'] else len(need['id']) - 17})." + "(current length: " + f"{len(need['id']) if 'example_feature' not in need['id'] else len(need['id']) - 17})." ) log.warning_for_option(need, "id", msg) @@ -82,7 +88,10 @@ def _check_options_for_prohibited_words( forbidden_words = prohibited_word_checks.option_check[option] for word in need[option].split(): if word in forbidden_words: - msg = f"contains a weak word: `{word}` in option: `{option}`. Please revise the wording." + msg = ( + f"contains a weak word: `{word}` in option: `{option}`. " + "Please revise the wording." + ) log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index 06b7a200..fae247ee 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -158,7 +158,8 @@ def check_metamodel_graph( check_to_perform: dict[str, str | dict] = check_config.get("check") explanation = check_config.get("explanation", "") assert explanation != "", ( - f"Explanation for graph check {check_name} is missing. Explanations are mandatory for graph checks." + f"Explanation for graph check {check_name} is missing. " + "Explanations are mandatory for graph checks." ) # Get all needs matching the selection criteria selected_needs = filter_needs_by_criteria( @@ -168,7 +169,10 @@ def check_metamodel_graph( for need in selected_needs: for parent_relation in list(check_to_perform.keys()): if parent_relation not in need: - msg = f"Attribute not defined: `{parent_relation}` in need `{need['id']}`." + msg = ( + f"Attribute not defined: `{parent_relation}` " + f"in need `{need['id']}`." + ) log.warning_for_need(need, msg) continue diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index d7115098..f347f7cb 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -13,13 +13,12 @@ import os import re -from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType - from score_metamodel import ( CheckLogger, local_check, ) +from sphinx.application import Sphinx +from sphinx_needs.data import NeedsInfoType @local_check @@ -66,5 +65,8 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): log.warning_for_option( need, "id", - f"Featurepart '{featureparts}' not in path '{docname}' or abbreviation not ok, expected: '{initials}'.", + ( + f"Featurepart '{featureparts}' not in path '{docname}' " + f"or abbreviation not ok, expected: '{initials}'." + ), ) diff --git a/src/extensions/score_metamodel/checks/standards.py b/src/extensions/score_metamodel/checks/standards.py index c0f6c66f..76cf5392 100644 --- a/src/extensions/score_metamodel/checks/standards.py +++ b/src/extensions/score_metamodel/checks/standards.py @@ -224,18 +224,18 @@ def my_pie_linked_standard_requirements_by_tag( :labels: Linked, Not Linked :legend: :colors: LightSeaGreen, lightgray - :filter-func: score_metamodel.checks.standards.my_pie_linked_standard_requirements_by_tag(aspice40_man5) + :filter-func: path.to.function(tag_name) The call: - => score_metamodel.checks.standards.my_pie_linked_standard_requirements_by_tag(aspice40_man5) + => path.to.function(tag_name) would then pass 'aspice40_man5' as the arg1 and you have access to it then that way. NOTE:: There can not be any `.`(dots) in the tag passed into this function Return: - The direct return of this function is None. Sphinx-needs will get the mutated `results` - list, and use this to display/generate the piechart. + The direct return of this function is None. Sphinx-needs will get + the mutated `results`list, and use this to display/generate the piechart. """ count_linked = 0 diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index d7a9de90..913392ae 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -52,9 +52,9 @@ def _parse_bazel_external_need(s: str) -> ExternalNeedsSource | None: return ExternalNeedsSource( bazel_module=repo, path_to_target=path_to_target, target=target ) - else: - # Unknown data target. Probably not a needs.json file. - return None + + # Unknown data target. Probably not a needs.json file. + return None def parse_external_needs_sources_from_DATA(v: str) -> list[ExternalNeedsSource]: @@ -148,7 +148,8 @@ def connect_external_needs(app: Sphinx, config: Config): fixed_json_file = Path(r) / json_file else: logger.debug( - "Running outside bazel. Determining git root for external needs JSON file." + "Running outside bazel. " + "Determining git root for external needs JSON file." ) git_root = Path.cwd().resolve() while not (git_root / ".git").exists(): @@ -182,7 +183,8 @@ def connect_external_needs(app: Sphinx, config: Config): "json_path": json_file, } ) - # Making the prefixes uppercase here to match sphinx_needs, as it does this internally too. + # Making the prefixes uppercase here to match sphinx_needs, + # as it does this internally too. assert isinstance(app.config.allowed_external_prefixes, list) # pyright: ignore[reportAny] app.config.allowed_external_prefixes.append( # pyright: ignore[reportUnknownMemberType] needs_json_data["project_prefix"].upper() # pyright: ignore[reportAny] diff --git a/src/extensions/score_metamodel/tests/__init__.py b/src/extensions/score_metamodel/tests/__init__.py index a915b2d1..c675ba4a 100644 --- a/src/extensions/score_metamodel/tests/__init__.py +++ b/src/extensions/score_metamodel/tests/__init__.py @@ -46,8 +46,8 @@ def assert_no_infos(self): def assert_warning(self, expected_substring: str, expect_location: bool = True): """ - Assert that the logger warning was called exactly once with a message containing - a specific substring. + Assert that the logger warning was called exactly once with a message + containing a specific substring. This also verifies that the defaults from need() are used correctly. So you must use need() to create the need object that is passed @@ -71,8 +71,8 @@ def assert_warning(self, expected_substring: str, expect_location: bool = True): def assert_info(self, expected_substring: str, expect_location: bool = True): """ - Assert that the logger info was called exactly once with a message containing - a specific substring. + Assert that the logger info was called exactly once with a message + containing a specific substring. This also verifies that the defaults from need() are used correctly. So you must use need() to create the need object that is passed diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index f2eb5f33..09485048 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -15,13 +15,12 @@ from unittest.mock import Mock import pytest -from sphinx.application import Sphinx - from score_metamodel.checks.check_options import ( check_extra_options, check_options, ) from score_metamodel.tests import fake_check_logger, need +from sphinx.application import Sphinx @pytest.mark.metadata( diff --git a/src/extensions/score_metamodel/tests/test_standards.py b/src/extensions/score_metamodel/tests/test_standards.py index 670f3cb7..bda4e197 100644 --- a/src/extensions/score_metamodel/tests/test_standards.py +++ b/src/extensions/score_metamodel/tests/test_standards.py @@ -15,6 +15,7 @@ # from sphinx.application import Sphinx import pytest + from src.extensions.score_metamodel.checks import standards from src.extensions.score_metamodel.tests import need # ,fake_check_logger @@ -946,7 +947,8 @@ def test_assert_multiple_kwargs(self): # Test if our assert works with pytest.raises( AssertionError, - match="Can only provide one tag to `my_pie_linked_standard_requirements_by_tag`", + match="Can only provide one tag to " + + "`my_pie_linked_standard_requirements_by_tag`", ): standards.my_pie_linked_standard_requirements_by_tag( needs, results, arg1="test_tag", arg2="test_test_tag" diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index b360dfb2..2669e0cd 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -70,7 +70,8 @@ def get_runfiles_dir() -> Path: def find_correct_path(runfiles: str) -> str: """ - This ensures that the 'plantuml' binary path is found in local 'score_docs_as_code' and module use. + This ensures that the 'plantuml' binary path is found in local 'score_docs_as_code' + and module use. """ dirs = [str(x) for x in Path(runfiles).glob("*score_docs_as_code+")] if dirs: diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 8046f168..7d047501 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -146,7 +146,9 @@ def group_by_need(source_code_links: list[NeedLink]) -> dict[str, list[NeedLink] def parse_git_output(str_line: str) -> str: if len(str_line.split()) < 2: LOGGER.warning( - f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. Expected example: 'origin git@github.com:user/repo.git'" + "Got wrong input line from 'get_github_repo_info'. " + f"Input: {str_line}." + "Expected example: 'origin git@github.com:user/repo.git'" ) return "" url = str_line.split()[1] # Get the URL part @@ -170,11 +172,13 @@ def get_github_repo_info(git_root_cwd: Path) -> str: else: # If we do not find 'origin' we just take the first line LOGGER.info( - "Did not find origin remote name. Will now take first result from: 'git remote -v'" + "Did not find origin remote name. " + "Will now take first result from: 'git remote -v'" ) repo = parse_git_output(process.stdout.split("\n")[0]) assert repo != "", ( - "Remote repository is not defined. Make sure you have a remote set. Check this via 'git remote -v'" + "Remote repository is not defined. Make sure you have a remote set. " + "Check this via 'git remote -v'" ) return repo @@ -246,10 +250,11 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: needs ) # TODO: why do we create a copy? Can we also needs_copy = needs[:]? copy(needs)? - for id, need in needs.items(): + for _, need in needs.items(): if need.get("source_code_link"): LOGGER.debug( - f"?? Need {need['id']} already has source_code_link: {need.get('source_code_link')}" + f"?? Need {need['id']} already has source_code_link: " + f"{need.get('source_code_link')}" ) source_code_links = load_source_code_links_json(get_cache_filename(app.outdir)) @@ -283,7 +288,8 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: Needs_Data.remove_need(need["id"]) Needs_Data.add_need(need) - # source_code_link of affected needs was overwritten. Make sure it's empty in all others! + # source_code_link of affected needs was overwritten. + # Make sure it's empty in all others! for need in needs.values(): if need["id"] not in source_code_links_by_need: need["source_code_link"] = "" diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py index 347d5f36..12b71408 100644 --- a/src/extensions/score_source_code_linker/generate_source_code_links_json.py +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -1,5 +1,5 @@ # ******************************************************************************* -# Copyright (c) 2024 Contributors to the Eclipse Foundation +# Copyright (c) 2025 Contributors to the Eclipse Foundation # # See the NOTICE file(s) distributed with this work for additional # information regarding copyright ownership. @@ -18,9 +18,7 @@ """ import os -import sys from pathlib import Path -from pprint import pprint from src.extensions.score_source_code_linker.needlinks import ( NeedLink, @@ -74,7 +72,9 @@ def _extract_references_from_file(root: Path, file_path: Path) -> list[NeedLink] """Scan a single file for template strings and return findings.""" assert root.is_absolute(), "Root path must be absolute" assert not file_path.is_absolute(), "File path must be relative to the root" - # assert file_path.is_relative_to(root), f"File path ({file_path}) must be relative to the root ({root})" + # assert file_path.is_relative_to(root), ( + # f"File path ({file_path}) must be relative to the root ({root})" + # ) assert (root / file_path).exists(), ( f"File {file_path} does not exist in root {root}." ) @@ -159,20 +159,3 @@ def generate_source_code_links_json(search_path: Path, file: Path): """ needlinks = find_all_need_references(search_path) store_source_code_links_json(file, needlinks) - - -# incremental_latest: -# DEBUG: Workspace root is /home/lla2hi/score/docs-as-code -# DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/execroot/_main/bazel-out/k8-fastbuild/bin/docs/incremental_latest.runfiles/_main -# DEBUG: Git root is /home/lla2hi/score/docs-as-code - -# incremental_release: (-> bazel build sandbox of process repository) -# DEBUG: Workspace root is None -# DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/sandbox/linux-sandbox/25/execroot/_main (-> process repo!!) -# rst files are in .../bazel-out/k8-fastbuild/bin/external/score_process~/process/_docs_needs_latest/score_process~/* -# DEBUG: Git root is /home/lla2hi/score/docs-as-code - -# docs_latest: -# DEBUG: Workspace root is None -# DEBUG: Current working directory is /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/sandbox/linux-sandbox/26/execroot/_main -# DEBUG: Git root is /home/lla2hi/score/docs-as-code diff --git a/src/extensions/score_source_code_linker/needlinks.py b/src/extensions/score_source_code_linker/needlinks.py index dbb52b38..406ad941 100644 --- a/src/extensions/score_source_code_linker/needlinks.py +++ b/src/extensions/score_source_code_linker/needlinks.py @@ -59,13 +59,13 @@ def needlink_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: need=d["need"], full_line=d["full_line"], ) - else: - # It's something else, pass it on to other decoders - return d + # It's something else, pass it on to other decoders + return d def store_source_code_links_json(file: Path, needlist: list[NeedLink]): - # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + # After `rm -rf _build` or on clean builds the directory does not exist, + # so we need to create it file.parent.mkdir(exist_ok=True) with open(file, "w") as f: json.dump( diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index d706884e..4b57f8b3 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -14,14 +14,14 @@ import os import subprocess import tempfile -from pathlib import Path -from sphinx_needs.data import NeedsMutable -from src.extensions.score_metamodel.tests import need as test_need from dataclasses import asdict +from pathlib import Path from typing import Any - import pytest +from sphinx_needs.data import NeedsMutable + +from src.extensions.score_metamodel.tests import need as test_need # Import the module under test # Note: You'll need to adjust these imports based on your actual module structure @@ -29,7 +29,6 @@ find_need, get_cache_filename, get_current_git_hash, - get_github_base_url, get_github_link, get_github_repo_info, group_by_need, @@ -37,8 +36,8 @@ ) from src.extensions.score_source_code_linker.needlinks import ( NeedLink, - store_source_code_links_json, load_source_code_links_json, + store_source_code_links_json, ) """ @@ -84,9 +83,9 @@ def needlink_test_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: need=d["need"], full_line=decode_comment(d["full_line"]), ) - else: - # It's something else, pass it on to other decoders - return d + + # It's something else, pass it on to other decoders + return d @pytest.fixture @@ -401,7 +400,7 @@ def test_get_github_repo_info_https_remote(git_repo_with_https_remote): def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes): - """Test getting GitHub repository information with multiple remotes (should prefer origin).""" + """Test GitHub repo info retrieval with multiple remotes (origin preferred).""" result = get_github_repo_info(git_repo_multiple_remotes) assert result == "test-user/test-repo" @@ -591,7 +590,8 @@ def another_function(): ["git", "commit", "-m", "Add implementation files"], cwd=git_repo, check=True ) - # Create needlinks manually (simulating what generate_source_code_links_json would do) + # Create needlinks manually + # (simulating what generate_source_code_links_json would do) needlinks = [ NeedLink( file=Path("src/implementation1.py"), diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index 8771f48d..6c40f5e3 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -11,32 +11,30 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import json +import os +import shutil +import subprocess from collections import Counter from collections.abc import Callable from pathlib import Path +from typing import cast import pytest -import os -import subprocess -import shutil - -from typing import cast from pytest import TempPathFactory from sphinx.testing.util import SphinxTestApp from sphinx_needs.data import SphinxNeedsData - from test_requirement_links import needlink_test_decoder + from src.extensions.score_source_code_linker import get_github_base_url, get_github_link -from src.extensions.score_source_code_linker.needlinks import NeedLink from src.extensions.score_source_code_linker.generate_source_code_links_json import ( find_ws_root, ) +from src.extensions.score_source_code_linker.needlinks import NeedLink @pytest.fixture() def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: - repo_path = tmp_path_factory.mktemp("test_git_repo") - return repo_path + return tmp_path_factory.mktemp("test_git_repo") @pytest.fixture() @@ -222,7 +220,6 @@ def basic_needs(): @pytest.fixture() def example_source_link_text_all_ok(sphinx_base_dir): - repo_path = sphinx_base_dir return { "TREQ_ID_1": [ NeedLink( @@ -254,7 +251,6 @@ def example_source_link_text_all_ok(sphinx_base_dir): @pytest.fixture() def example_source_link_text_non_existent(sphinx_base_dir): - repo_path = sphinx_base_dir return [ { "TREQ_ID_200": [ @@ -282,12 +278,14 @@ def compare_json_files(file1: Path, golden_file: Path): with open(golden_file, "r") as f2: json2 = json.load(f2, object_hook=needlink_test_decoder) assert len(json1) == len(json2), ( - f"{file1}'s lenth are not the same as in the golden file lenght. Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" + f"{file1}'s lenth are not the same as in the golden file lenght. " + f"Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" ) c1 = Counter(n for n in json1) c2 = Counter(n for n in json2) assert c1 == c2, ( - f"Testfile does not have same needs as golden file. Testfile: {c1}\nGoldenFile: {c2}" + "Testfile does not have same needs as golden file. " + f"Testfile: {c1}\nGoldenFile: {c2}" ) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index c0fb091b..b5e93ae3 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -29,16 +29,20 @@ ) """ -This script's main usecase is to test consumers of Docs-As-Code with the new changes made in PR's. -This enables us to find new issues and problems we introduce with changes that we otherwise would only know much later. +This script's main usecase is to test consumers of Docs-As-Code with +the new changes made in PR's. +This enables us to find new issues and problems we introduce with changes +that we otherwise would only know much later. There are several things to note. -- The `print` function has been overwritten by the 'rich' package to allow for richer text output. +- The `print` function has been overwritten by the 'rich' package to allow for richer +text output. - The script itself takes quiet a bit of time, roughly 5+ min for a full run. - If you need more output, enable it via `-v` or `-vv` - Start the script via the following command: - bazel run //:ide_support - - .venv_docs/bin/python -m pytest -s src/tests (If you need more verbosity add `-v` or `-vv`) + - .venv_docs/bin/python -m pytest -s src/tests + (If you need more verbosity add `-v` or `-vv`) """ # Max width of the printout @@ -159,7 +163,8 @@ def filter_repos(repo_filter: str | None) -> list[ConsumerRepo]: # This prevents accidentally running zero tests due to typos if not filtered_repos and repo_filter: print( - f"[red]No valid repositories found in filter, running all repositories instead[/red]" + "[red]No valid repositories found in filter, " + "running all repositories instead[/red]" ) return REPOS_TO_TEST @@ -173,15 +178,13 @@ def replace_bazel_dep_with_local_override(module_content: str) -> str: pattern = rf'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' # Replacement with local_path_override - replacement = f"""bazel_dep(name = "score_docs_as_code", version = "0.0.0") + replacement = """bazel_dep(name = "score_docs_as_code", version = "0.0.0") local_path_override( module_name = "score_docs_as_code", path = "../docs_as_code" )""" - modified_content = re.sub(pattern, replacement, module_content) - - return modified_content + return re.sub(pattern, replacement, module_content) def replace_bazel_dep_with_git_override( @@ -196,9 +199,7 @@ def replace_bazel_dep_with_git_override( commit = "{git_hash}" )''' - modified_content = re.sub(pattern, replacement, module_content) - - return modified_content + return re.sub(pattern, replacement, module_content) def strip_ansi_codes(text: str) -> str: @@ -219,7 +220,8 @@ def parse_bazel_output(BR: BuildOutput, pytestconfig) -> BuildOutput: print(f"[DEBUG] Warning {i}: {repr(warning)}") for raw_warning in split_warnings: - # In the CLI we seem to have some ansi codes in the warnings. Need to strip those + # In the CLI we seem to have some ansi codes in the warnings. + # Need to strip those clean_warning = strip_ansi_codes(raw_warning).strip() logger = "[NO SPECIFIC LOGGER]" @@ -239,19 +241,25 @@ def print_overview_logs(BR: BuildOutput): warning_loggers = list(BR.warnings.keys()) len_left_test_result = len_max - len("TEST RESULTS") print( - f"[blue]{'=' * int(len_left_test_result / 2)}TEST RESULTS{'=' * int(len_left_test_result / 2)}[/blue]" + f"[blue]{'=' * int(len_left_test_result / 2)}" + f"TEST RESULTS" + f"{'=' * int(len_left_test_result / 2)}[/blue]" ) print(f"[navy_blue]{'=' * len_max}[/navy_blue]") warning_total_loggers_msg = f"Warning Loggers Total: {len(warning_loggers)}" len_left_loggers = len_max - len(warning_total_loggers_msg) print( - f"[blue]{'=' * int(len_left_loggers / 2)}{warning_total_loggers_msg}{'=' * int(len_left_loggers / 2)}[/blue]" + f"[blue]{'=' * int(len_left_loggers / 2)}" + f"{warning_total_loggers_msg}" + f"{'=' * int(len_left_loggers / 2)}[/blue]" ) warning_loggers = list(BR.warnings.keys()) - warning_total_msg = f"Logger Warnings Accumulated" + warning_total_msg = "Logger Warnings Accumulated" len_left_loggers_total = len_max - len(warning_total_msg) print( - f"[blue]{'=' * int(len_left_loggers_total / 2)}{warning_total_msg}{'=' * int(len_left_loggers_total / 2)}[/blue]" + f"[blue]{'=' * int(len_left_loggers_total / 2)}" + f"{warning_total_msg}" + f"{'=' * int(len_left_loggers_total / 2)}[/blue]" ) for logger in warning_loggers: if len(BR.warnings[logger]) == 0: @@ -260,7 +268,9 @@ def print_overview_logs(BR: BuildOutput): warning_logger_msg = f"{logger} has {len(BR.warnings[logger])} warnings" len_left_logger = len_max - len(warning_logger_msg) print( - f"[{color}]{'=' * int(len_left_logger / 2)}{warning_logger_msg}{'=' * int(len_left_logger / 2)}[/{color}]" + f"[{color}]{'=' * int(len_left_logger / 2)}" + f"{warning_logger_msg}" + f"{'=' * int(len_left_logger / 2)}[/{color}]" ) print(f"[blue]{'=' * len_max}[/blue]") @@ -271,7 +281,9 @@ def verbose_printout(BR: BuildOutput): for logger in warning_loggers: len_left_logger = len_max - len(logger) print( - f"[cornflower_blue]{'=' * int(len_left_logger / 2)}{logger}{'=' * int(len_left_logger / 2)}[/cornflower_blue]" + f"[cornflower_blue]{'=' * int(len_left_logger / 2)}" + f"{logger}" + f"{'=' * int(len_left_logger / 2)}[/cornflower_blue]" ) warnings = BR.warnings[logger] len_left_warnings = len_max - len(f"Warnings Found: {len(warnings)}\n") @@ -279,7 +291,9 @@ def verbose_printout(BR: BuildOutput): if logger == "[NO SPECIFIC LOGGER]": color = "orange1" print( - f"[{color}]{'=' * int(len_left_warnings / 2)}{f'Warnings Found: {len(warnings)}'}{'=' * int(len_left_warnings / 2)}[/{color}]" + f"[{color}]{'=' * int(len_left_warnings / 2)}" + f"{f'Warnings Found: {len(warnings)}'}" + f"{'=' * int(len_left_warnings / 2)}[/{color}]" ) print("\n".join(f"[{color}]{x}[/{color}]" for x in warnings)) @@ -291,13 +305,19 @@ def print_running_cmd(repo: str, cmd: str, local_or_git: str): len_left_local = len_max - len(local_or_git) print(f"\n[cyan]{'=' * len_max}[/cyan]") print( - f"[cornflower_blue]{'=' * int(len_left_repo / 2)}{repo}{'=' * int(len_left_repo / 2)}[/cornflower_blue]" + f"[cornflower_blue]{'=' * int(len_left_repo / 2)}" + f"{repo}" + f"{'=' * int(len_left_repo / 2)}[/cornflower_blue]" ) print( - f"[cornflower_blue]{'=' * int(len_left_local / 2)}{local_or_git}{'=' * int(len_left_local / 2)}[/cornflower_blue]" + f"[cornflower_blue]{'=' * int(len_left_local / 2)}" + f"{local_or_git}" + f"{'=' * int(len_left_local / 2)}[/cornflower_blue]" ) print( - f"[cornflower_blue]{'=' * int(len_left_cmd / 2)}{cmd}{'=' * int(len_left_cmd / 2)}[/cornflower_blue]" + f"[cornflower_blue]{'=' * int(len_left_cmd / 2)}" + f"{cmd}" + f"{'=' * int(len_left_cmd / 2)}[/cornflower_blue]" ) print(f"[cyan]{'=' * len_max}[/cyan]") @@ -310,7 +330,8 @@ def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]: - '[NO SPECIFIC LOGGER]' warnings are always ignored """ - # Unsure if this is good, as sometimes the returncode is 1 but it should still go through? + # Unsure if this is good, as sometimes the returncode is 1 + # but it should still go through? # Logging for feedback here if BR.returncode != 0: return False, f"Build failed with return code {BR.returncode}" @@ -354,7 +375,9 @@ def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig): result_msg = f"{repo_name} - {cmd}: {status}" len_left = len_max - len(result_msg) print( - f"[{color}]{'=' * int(len_left / 2)}{result_msg}{'=' * int(len_left / 2)}[/{color}]" + f"[{color}]{'=' * int(len_left / 2)}" + f"{result_msg}" + f"{'=' * int(len_left / 2)}[/{color}]" ) print(f"[{color}]Reason: {reason}[/{color}]") print(f"[{color}]{'=' * len_max}[/{color}]") @@ -479,7 +502,8 @@ def setup_test_environment(sphinx_base_dir, pytestconfig): print(f"[DEBUG] gh_url: {gh_url}") print(f"[DEBUG] current_hash: {current_hash}") print( - f"[DEBUG] Working directory has uncommitted changes: {has_uncommitted_changes(curr_path)}" + "[DEBUG] Working directory has uncommitted changes: " + f"{has_uncommitted_changes(curr_path)}" ) # Create symlink for local docs-as-code @@ -564,9 +588,11 @@ def test_and_clone_repos_updated(sphinx_base_dir, pytestconfig): return print( - f"[green]Testing {len(repos_to_test)} repositories: {[r.name for r in repos_to_test]}[/green]" + f"[green]Testing {len(repos_to_test)} repositories: " + f"{[r.name for r in repos_to_test]}[/green]" ) - # This might be hacky, but currently the best way I could solve the issue of going to the right place. + # This might be hacky, but currently the best way I could solve the issue + # of going to the right place. gh_url, current_hash = setup_test_environment(sphinx_base_dir, pytestconfig) overall_success = True @@ -615,11 +641,13 @@ def test_and_clone_repos_updated(sphinx_base_dir, pytestconfig): if not is_success: overall_success = False - # NOTE: We have to change directories back to the parent, otherwise the cloning & override will not be correct + # NOTE: We have to change directories back to the parent + # otherwise the cloning & override will not be correct os.chdir(Path.cwd().parent) # Printing a 'overview' table as a result print_result_table(results) assert overall_success, ( - "Consumer Tests failed, see table for which commands specifically. Enable verbosity for warning/error printouts" + "Consumer Tests failed, see table for which commands specifically. " + "Enable verbosity for warning/error printouts" ) From 4e272bd7516a26e64c890e2f028bdcd2491e426c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 14 Aug 2025 23:52:25 +0200 Subject: [PATCH 104/231] Adding helper_lib (#207) Adding a helper_lib which adds some common used functionality. Added following functions: - find workspace root (based on bazel workspace environment var) - find git root - get & parse git remote repo - get git hash --- docs/conf.py | 1 - src/extensions/score_metamodel/BUILD | 2 +- src/extensions/score_metamodel/__init__.py | 7 + .../score_metamodel/external_needs.py | 1 - src/extensions/score_source_code_linker/BUILD | 1 + .../score_source_code_linker/__init__.py | 102 ++--------- .../generate_source_code_links_json.py | 25 --- .../tests/test_requirement_links.py | 100 ++--------- .../tests/test_source_link.py | 18 +- src/helper_lib/BUILD | 31 ++++ src/helper_lib/__init__.py | 158 ++++++++++++++++++ src/helper_lib/test_helper_lib.py | 70 ++++++++ src/tests/test_consumer.py | 40 ++--- 13 files changed, 323 insertions(+), 233 deletions(-) create mode 100644 src/helper_lib/BUILD create mode 100644 src/helper_lib/__init__.py create mode 100644 src/helper_lib/test_helper_lib.py diff --git a/docs/conf.py b/docs/conf.py index 96074dce..fc646026 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -29,7 +29,6 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - extensions = [ "sphinx_design", "sphinx_needs", diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index fa6b976e..014b6ca4 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -25,7 +25,7 @@ py_library( ], visibility = ["//visibility:public"], # TODO: Figure out if all requirements are needed or if we can break it down a bit - deps = all_requirements, + deps = all_requirements + ["@score_docs_as_code//src/helper_lib"], ) score_py_pytest( diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 1127a3b4..7f5cbdd2 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -24,6 +24,13 @@ from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData +from src.helper_lib import ( + find_git_root, + find_ws_root, + get_current_git_hash, + get_github_repo_info, +) + from .external_needs import connect_external_needs from .log import CheckLogger diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index 913392ae..1e94388e 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -52,7 +52,6 @@ def _parse_bazel_external_need(s: str) -> ExternalNeedsSource | None: return ExternalNeedsSource( bazel_module=repo, path_to_target=path_to_target, target=target ) - # Unknown data target. Probably not a needs.json file. return None diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 9828b22b..08735e00 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -24,6 +24,7 @@ py_library( ), imports = ["."], visibility = ["//visibility:public"], + deps = ["@score_docs_as_code//src/helper_lib"], ) score_py_pytest( diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 7d047501..ce6fe246 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -15,7 +15,6 @@ source code links from a JSON file and add them to the needs. """ -import subprocess from collections import defaultdict from copy import deepcopy from pathlib import Path @@ -28,8 +27,6 @@ from sphinx_needs.logging import get_logger from src.extensions.score_source_code_linker.generate_source_code_links_json import ( - find_git_root, - find_ws_root, generate_source_code_links_json, ) from src.extensions.score_source_code_linker.needlinks import ( @@ -37,6 +34,12 @@ NeedLink, load_source_code_links_json, ) +from src.helper_lib import ( + find_git_root, + find_ws_root, + get_current_git_hash, + get_github_base_url, +) LOGGER = get_logger(__name__) # Outcomment this to enable more verbose logging @@ -62,13 +65,13 @@ def setup_once(app: Sphinx, config: Config): LOGGER.debug(f"DEBUG: Git root is {find_git_root()}") # Run only for local files! - # ws_root is not set when running on external repositories (dependencies). + # ws_root is not set when running on any on bazel run command repositories (dependencies) ws_root = find_ws_root() if not ws_root: return # When BUILD_WORKSPACE_DIRECTORY is set, we are inside a git repository. - assert find_git_root(ws_root) + assert find_git_root() # Extension: score_source_code_linker app.add_config_value( @@ -143,92 +146,15 @@ def group_by_need(source_code_links: list[NeedLink]) -> dict[str, list[NeedLink] return source_code_links_by_need -def parse_git_output(str_line: str) -> str: - if len(str_line.split()) < 2: - LOGGER.warning( - "Got wrong input line from 'get_github_repo_info'. " - f"Input: {str_line}." - "Expected example: 'origin git@github.com:user/repo.git'" - ) - return "" - url = str_line.split()[1] # Get the URL part - # Handle SSH format (git@github.com:user/repo.git) - if url.startswith("git@"): - path = url.split(":")[1] - else: - path = "/".join(url.split("/")[3:]) # Get part after github.com/ - return path.replace(".git", "") - - -def get_github_repo_info(git_root_cwd: Path) -> str: - process = subprocess.run( - ["git", "remote", "-v"], capture_output=True, text=True, cwd=git_root_cwd - ) - repo = "" - for line in process.stdout.split("\n"): - if "origin" in line and "(fetch)" in line: - repo = parse_git_output(line) - break - else: - # If we do not find 'origin' we just take the first line - LOGGER.info( - "Did not find origin remote name. " - "Will now take first result from: 'git remote -v'" - ) - repo = parse_git_output(process.stdout.split("\n")[0]) - assert repo != "", ( - "Remote repository is not defined. Make sure you have a remote set. " - "Check this via 'git remote -v'" - ) - return repo - - -def get_git_root(git_root: Path = Path()) -> Path: - # This is kinda ugly, doing this to reduce type errors. - # There might be a nicer way to do this - if git_root == Path(): - passed_git_root = find_git_root() - if passed_git_root is None: - return Path() - else: - passed_git_root = git_root - return passed_git_root - - -def get_github_base_url(git_root: Path = Path()) -> str: - passed_git_root = get_git_root(git_root) - repo_info = get_github_repo_info(passed_git_root) - return f"https://github.com/{repo_info}" - - -def get_github_link( - git_root: Path = Path(), needlink: NeedLink = DefaultNeedLink() -) -> str: - passed_git_root = get_git_root(git_root) - base_url = get_github_base_url( - passed_git_root - ) # Pass git_root to avoid double lookup +def get_github_link(needlink: NeedLink = DefaultNeedLink()) -> str: + passed_git_root = find_git_root() + if passed_git_root is None: + passed_git_root = Path() + base_url = get_github_base_url() current_hash = get_current_git_hash(passed_git_root) return f"{base_url}/blob/{current_hash}/{needlink.file}#L{needlink.line}" -def get_current_git_hash(ws_root: Path) -> str: - try: - result = subprocess.run( - ["git", "log", "-n", "1", "--pretty=format:%H"], - cwd=ws_root, - capture_output=True, - check=True, - ) - decoded_result = result.stdout.strip().decode() - - assert all(c in "0123456789abcdef" for c in decoded_result) - return decoded_result - except Exception as e: - LOGGER.warning(f"Unexpected error: {ws_root}", exc_info=e) - raise - - # req-Id: tool_req__docs_dd_link_source_code_link def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: """ @@ -280,7 +206,7 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: need_as_dict = cast(dict[str, object], need) need_as_dict["source_code_link"] = ", ".join( - f"{get_github_link(ws_root, n)}<>{n.file}:{n.line}" for n in needlinks + f"{get_github_link(n)}<>{n.file}:{n.line}" for n in needlinks ) # NOTE: Removing & adding the need is important to make sure diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py index 12b71408..18f5ee28 100644 --- a/src/extensions/score_source_code_linker/generate_source_code_links_json.py +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -25,31 +25,6 @@ store_source_code_links_json, ) - -def find_ws_root() -> Path | None: - """Find the current MODULE.bazel file""" - ws_dir = os.environ.get("BUILD_WORKSPACE_DIRECTORY", None) - return Path(ws_dir) if ws_dir else None - - -def find_git_root(start_path: str | Path = "") -> Path | None: - """Find the git root directory starting from the given path or __file__.""" - if start_path == "": - start_path = __file__ - - git_root = Path(start_path).resolve() - esbonio_search = False - while not (git_root / ".git").exists(): - git_root = git_root.parent - if git_root == Path("/"): - # fallback to cwd when building with python -m sphinx docs _build -T - if esbonio_search: - return None - git_root = Path.cwd().resolve() - esbonio_search = True - return git_root - - TAGS = [ "# " + "req-traceability:", "# " + "req-Id:", diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index 4b57f8b3..1eb748ec 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -28,17 +28,19 @@ from src.extensions.score_source_code_linker import ( find_need, get_cache_filename, - get_current_git_hash, get_github_link, - get_github_repo_info, group_by_need, - parse_git_output, ) from src.extensions.score_source_code_linker.needlinks import ( NeedLink, load_source_code_links_json, store_source_code_links_json, ) +from src.helper_lib import ( + get_current_git_hash, + get_github_repo_info, + parse_remote_git_output, +) """ # ────────────────ATTENTION─────────────── @@ -83,7 +85,6 @@ def needlink_test_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: need=d["need"], full_line=decode_comment(d["full_line"]), ) - # It's something else, pass it on to other decoders return d @@ -120,7 +121,6 @@ def git_repo(temp_dir): cwd=git_dir, check=True, ) - return git_dir @@ -355,35 +355,35 @@ def test_group_by_need_empty_list(): def test_parse_git_output_ssh_format(): """Test parsing git remote output in SSH format.""" git_line = "origin git@github.com:test-user/test-repo.git (fetch)" - result = parse_git_output(git_line) + result = parse_remote_git_output(git_line) assert result == "test-user/test-repo" def test_parse_git_output_https_format(): """Test parsing git remote output in HTTPS format.""" git_line = "origin https://github.com/test-user/test-repo.git (fetch)" - result = parse_git_output(git_line) + result = parse_remote_git_output(git_line) assert result == "test-user/test-repo" def test_parse_git_output_ssh_format_without_git_suffix(): """Test parsing git remote output in SSH format without .git suffix.""" git_line = "origin git@github.com:test-user/test-repo (fetch)" - result = parse_git_output(git_line) + result = parse_remote_git_output(git_line) assert result == "test-user/test-repo" def test_parse_git_output_invalid_format(): """Test parsing invalid git remote output.""" git_line = "invalid" - result = parse_git_output(git_line) + result = parse_remote_git_output(git_line) assert result == "" def test_parse_git_output_empty_string(): """Test parsing empty git remote output.""" git_line = "" - result = parse_git_output(git_line) + result = parse_remote_git_output(git_line) assert result == "" @@ -407,9 +407,6 @@ def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes): def test_get_current_git_hash(git_repo): """Test getting current git hash.""" - print("==== GIt REPO====") - a = git_repo - print(a) result = get_current_git_hash(git_repo) # Verify it's a valid git hash (40 hex characters) @@ -423,28 +420,6 @@ def test_get_current_git_hash_invalid_repo(temp_dir): get_current_git_hash(temp_dir) -# def test_get_github_base_url_with_real_repo(git_repo): -# """Test getting GitHub base URL with real repository.""" -# # Temporarily set the git repo as the current directory context -# original_cwd = os.getcwd() -# os.chdir(git_repo) -# -# try: -# # We need to temporarily patch find_git_root to return our test repo -# import src.extensions.score_source_code_linker as module -# -# original_find_git_root = module.find_git_root -# module.find_git_root = lambda: git_repo -# -# result = get_github_base_url() -# expected = "https://github.com/test-user/test-repo" -# assert result == expected -# -# finally: -# module.find_git_root = original_find_git_root -# os.chdir(original_cwd) - - def test_get_github_link_with_real_repo(git_repo): """Test generating GitHub link with real repository.""" # Create a needlink @@ -456,7 +431,9 @@ def test_get_github_link_with_real_repo(git_repo): full_line="#" + " req-Id: REQ_001", ) - result = get_github_link(git_repo, needlink) + # Have to change directories in order to ensure that we get the right/any .git file + os.chdir(Path(git_repo).absolute()) + result = get_github_link(needlink) # Should contain the base URL, hash, file path, and line number assert "https://github.com/test-user/test-repo/blob/" in result @@ -511,7 +488,7 @@ def test_cache_file_with_encoded_comments(temp_dir): store_source_code_links_json(cache_file, needlinks) # Check the raw JSON to verify encoding - with open(cache_file, "r") as f: + with open(cache_file) as f: raw_content = f.read() assert "#" + " req-Id:" in raw_content # Should be encoded assert "#-----req-Id:" not in raw_content # Original should not be present @@ -629,10 +606,10 @@ def another_function(): assert len(grouped["TREQ_ID_2"]) == 1 # Test GitHub link generation - + # Have to change directories in order to ensure that we get the right/any .git file os.chdir(Path(git_repo).absolute()) for needlink in loaded_links: - github_link = get_github_link(git_repo, needlink) + github_link = get_github_link(needlink) assert "https://github.com/test-user/test-repo/blob/" in github_link assert f"src/{needlink.file.name}#L{needlink.line}" in github_link @@ -665,48 +642,5 @@ def test_multiple_commits_hash_consistency(git_repo): ) os.chdir(Path(git_repo).absolute()) - github_link = get_github_link(git_repo, needlink) + github_link = get_github_link(needlink) assert new_hash in github_link - - -# Test error handling -def test_git_operations_with_no_commits(temp_dir): - """Test git operations on repo with no commits.""" - git_dir = temp_dir / "empty_repo" - git_dir.mkdir() - - # Initialize git repo but don't commit anything - subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) - subprocess.run( - ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True - ) - subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) - - os.chdir(Path(git_dir).absolute()) - # Should raise an exception when trying to get hash - with pytest.raises(Exception): - a = get_current_git_hash(git_dir) - - -def test_git_repo_with_no_remotes(temp_dir): - """Test git repository with no remotes.""" - git_dir = temp_dir / "no_remote_repo" - git_dir.mkdir() - - # Initialize git repo - subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) - subprocess.run( - ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True - ) - subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) - - # Create a test file and commit - test_file = git_dir / "test_file.py" - test_file.write_text("# Test file\nprint('hello')\n") - subprocess.run(["git", "add", "."], cwd=git_dir, check=True) - subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) - os.chdir(git_dir) - - # Should raise an exception when trying to get repo info - with pytest.raises(AssertionError): - get_github_repo_info(git_dir) diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index 6c40f5e3..3e0b0ed6 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -26,10 +26,8 @@ from test_requirement_links import needlink_test_decoder from src.extensions.score_source_code_linker import get_github_base_url, get_github_link -from src.extensions.score_source_code_linker.generate_source_code_links_json import ( - find_ws_root, -) from src.extensions.score_source_code_linker.needlinks import NeedLink +from src.helper_lib import find_ws_root @pytest.fixture() @@ -255,7 +253,7 @@ def example_source_link_text_non_existent(sphinx_base_dir): { "TREQ_ID_200": [ NeedLink( - file=Path(f"src/bad_implementation.py"), + file=Path("src/bad_implementation.py"), line=2, tag="#" + " req-Id:", need="TREQ_ID_200", @@ -266,16 +264,14 @@ def example_source_link_text_non_existent(sphinx_base_dir): ] -def make_source_link(ws_root: Path, needlinks): - return ", ".join( - f"{get_github_link(ws_root, n)}<>{n.file}:{n.line}" for n in needlinks - ) +def make_source_link(needlinks): + return ", ".join(f"{get_github_link(n)}<>{n.file}:{n.line}" for n in needlinks) def compare_json_files(file1: Path, golden_file: Path): - with open(file1, "r") as f1: + with open(file1) as f1: json1 = json.load(f1, object_hook=needlink_test_decoder) - with open(golden_file, "r") as f2: + with open(golden_file) as f2: json2 = json.load(f2, object_hook=needlink_test_decoder) assert len(json1) == len(json2), ( f"{file1}'s lenth are not the same as in the golden file lenght. " @@ -315,7 +311,7 @@ def test_source_link_integration_ok( assert f"TREQ_ID_{i}" in needs_data need_as_dict = cast(dict[str, object], needs_data[f"TREQ_ID_{i}"]) expected_link = make_source_link( - ws_root, example_source_link_text_all_ok[f"TREQ_ID_{i}"] + example_source_link_text_all_ok[f"TREQ_ID_{i}"] ) # extra_options are only available at runtime # Compare contents, regardless of order. diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD new file mode 100644 index 00000000..8e0e16b2 --- /dev/null +++ b/src/helper_lib/BUILD @@ -0,0 +1,31 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_python_basics//:defs.bzl", "score_py_pytest") + +py_library( + name = "helper_lib", + srcs = ["__init__.py"], + imports = ["."], + visibility = ["//visibility:public"], +) + +score_py_pytest( + name = "helper_lib_tests", + size = "small", + srcs = ["test_helper_lib.py"], + deps = [ + ":helper_lib", + ] + all_requirements, +) diff --git a/src/helper_lib/__init__.py b/src/helper_lib/__init__.py new file mode 100644 index 00000000..08d366ef --- /dev/null +++ b/src/helper_lib/__init__.py @@ -0,0 +1,158 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +import os +import subprocess +from pathlib import Path + +from sphinx_needs.logging import get_logger + +LOGGER = get_logger(__name__) + + +def find_ws_root() -> Path | None: + """ + Find the current MODULE.bazel workspace root directory. + + Execution context behavior: + - 'bazel run' => ✅ Full workspace path + - 'bazel build' => ❌ None (sandbox isolation) + - 'direct sphinx' => ❌ None (no Bazel environment) + """ + ws_dir = os.environ.get("BUILD_WORKSPACE_DIRECTORY", None) + return Path(ws_dir) if ws_dir else None + + +def find_git_root() -> Path | None: + """ + Find the git root directory, starting from workspace root or current directory. + + Execution context behavior: + - 'bazel run' => ✅ Git root path (starts from workspace) + - 'bazel build' => ❌ None (sandbox has no .git) + - 'direct sphinx' => ✅ Git root path (fallback to cwd) + """ + start_path = find_ws_root() + if start_path is None: + start_path = Path.cwd() + git_root = Path(start_path).resolve() + while not (git_root / ".git").exists(): + git_root = git_root.parent + if git_root == Path("/"): + return None + return git_root + + +def parse_remote_git_output(str_line: str) -> str: + """ + Parse git remote output and extract / format. + + Example: + Input: 'origin git@github.com:MaximilianSoerenPollak/docs-as-code.git' + Output: 'MaximilianSoerenPollak/docs-as-code' + """ + if len(str_line.split()) < 2: + LOGGER.warning( + f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. " + + "Expected example: 'origin git@github.com:user/repo.git'" + ) + return "" + url = str_line.split()[1] # Get the URL part + # Handle SSH format (git@github.com:user/repo.git) Get part after github.com/ + path = url.split(":")[1] if url.startswith("git@") else "/".join(url.split("/")[3:]) + return path.replace(".git", "") + + +def get_github_repo_info(git_root_cwd: Path) -> str: + """ + Extract GitHub repository info from git remotes. + + Execution context behavior: + - Works consistently across all contexts when given valid git directory + - Fails only when input path has no git repository + + Args: + git_root_cwd: Path to directory containing .git folder + + Returns: + Repository in format 'user/repo' or 'org/repo' + """ + process = subprocess.run( + ["git", "remote", "-v"], capture_output=True, text=True, cwd=git_root_cwd + ) + repo = "" + for line in process.stdout.split("\n"): + if "origin" in line and "(fetch)" in line: + repo = parse_remote_git_output(line) + break + else: + # If we do not find 'origin' we just take the first line + LOGGER.info( + "Did not find origin remote name. Will now take first result from:" + + "'git remote -v'" + ) + repo = parse_remote_git_output(process.stdout.split("\n")[0]) + assert repo != "", ( + "Remote repository is not defined. Make sure you have a remote set. " + + "Check this via 'git remote -v'" + ) + return repo + + +def get_github_base_url() -> str: + """ + Generate GitHub base URL for the current repository. + + Execution context behavior: + - 'bazel run' => ✅ Correct GitHub URL + - 'bazel build' => ⚠️ Uses Path() fallback when git_root is None + - 'direct sphinx' => ✅ Correct GitHub URL + + Returns: + GitHub URL in format 'https://github.com/user/repo' + """ + passed_git_root = find_git_root() + if passed_git_root is None: + passed_git_root = Path() + repo_info = get_github_repo_info(passed_git_root) + return f"https://github.com/{repo_info}" + + +def get_current_git_hash(git_root: Path) -> str: + """ + Get the current git commit hash. + + Execution context behavior: + - Works consistently across all contexts when given valid git directory + - Fails only when input path has no git repository + + Args: + git_root: Path to directory containing .git folder + + Returns: + Full commit hash (40 character hex string) + """ + try: + result = subprocess.run( + ["git", "log", "-n", "1", "--pretty=format:%H"], + cwd=git_root, + capture_output=True, + check=True, + ) + decoded_result = result.stdout.strip().decode() + + assert all(c in "0123456789abcdef" for c in decoded_result) + return decoded_result + except Exception as e: + LOGGER.warning(f"Unexpected error: {git_root}", exc_info=e) + raise diff --git a/src/helper_lib/test_helper_lib.py b/src/helper_lib/test_helper_lib.py new file mode 100644 index 00000000..78042cb0 --- /dev/null +++ b/src/helper_lib/test_helper_lib.py @@ -0,0 +1,70 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import os +import subprocess +import tempfile +from pathlib import Path + +import pytest + +from src.helper_lib import get_current_git_hash, get_github_repo_info + + +@pytest.fixture +def temp_dir(): + """Create a temporary directory for tests.""" + with tempfile.TemporaryDirectory() as temp_dir: + yield Path(temp_dir) + + +# Test error handling +def test_git_operations_with_no_commits(temp_dir): + """Test git operations on repo with no commits.""" + git_dir = temp_dir / "empty_repo" + git_dir.mkdir() + + # Initialize git repo but don't commit anything + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + os.chdir(Path(git_dir).absolute()) + # Should raise an exception when trying to get hash + with pytest.raises(Exception): + get_current_git_hash(git_dir) + + +def test_git_repo_with_no_remotes(temp_dir): + """Test git repository with no remotes.""" + git_dir = temp_dir / "no_remote_repo" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + os.chdir(git_dir) + + # Should raise an exception when trying to get repo info + with pytest.raises(AssertionError): + get_github_repo_info(git_dir) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index b5e93ae3..6c722d25 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -24,9 +24,7 @@ from rich.table import Table from src.extensions.score_source_code_linker import get_github_base_url -from src.extensions.score_source_code_linker.generate_source_code_links_json import ( - find_git_root, -) +from src.helper_lib import find_git_root """ This script's main usecase is to test consumers of Docs-As-Code with @@ -120,10 +118,9 @@ def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig) -> Path: temp_dir = tmp_path_factory.mktemp("testing_dir") print(f"[blue]Using temporary directory: {temp_dir}[/blue]") return temp_dir - else: - CACHE_DIR.mkdir(parents=True, exist_ok=True) - print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]") - return CACHE_DIR + CACHE_DIR.mkdir(parents=True, exist_ok=True) + print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]") + return CACHE_DIR def get_current_git_commit(curr_path: Path): @@ -175,7 +172,7 @@ def replace_bazel_dep_with_local_override(module_content: str) -> str: """ """ # Pattern to match the bazel_dep line - pattern = rf'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' + pattern = r'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' # Replacement with local_path_override replacement = """bazel_dep(name = "score_docs_as_code", version = "0.0.0") @@ -190,7 +187,7 @@ def replace_bazel_dep_with_local_override(module_content: str) -> str: def replace_bazel_dep_with_git_override( module_content: str, git_hash: str, gh_url: str ) -> str: - pattern = rf'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' + pattern = r'bazel_dep\(name = "score_docs_as_code", version = "[^"]+"\)' replacement = f'''bazel_dep(name = "score_docs_as_code", version = "0.0.0") git_override( @@ -343,9 +340,8 @@ def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]: if logger == "[NO SPECIFIC LOGGER]": # Always ignore these continue - else: - # Any other logger is critical/not ignored - critical_warnings.extend(warnings) + # Any other logger is critical/not ignored + critical_warnings.extend(warnings) if critical_warnings: return False, f"Found {len(critical_warnings)} critical warnings" @@ -481,29 +477,27 @@ def run_test_commands(): def setup_test_environment(sphinx_base_dir, pytestconfig): """Set up the test environment and return necessary paths and metadata.""" - os.chdir(sphinx_base_dir) - curr_path = Path(__file__).parent - git_root = find_git_root(curr_path) + git_root = find_git_root() + if git_root is None: + assert False, "Git root was none" + gh_url = get_github_base_url() + current_hash = get_current_git_commit(git_root) + + os.chdir(Path(sphinx_base_dir).absolute()) verbosity = pytestconfig.get_verbosity() if verbosity >= 2: - print(f"[DEBUG] curr_path: {curr_path}") print(f"[DEBUG] git_root: {git_root}") - if git_root is None: - assert False, "Git root was none" - # Get GitHub URL and current hash for git override - gh_url = get_github_base_url(git_root) - current_hash = get_current_git_commit(curr_path) if verbosity >= 2: print(f"[DEBUG] gh_url: {gh_url}") print(f"[DEBUG] current_hash: {current_hash}") print( "[DEBUG] Working directory has uncommitted changes: " - f"{has_uncommitted_changes(curr_path)}" + f"{has_uncommitted_changes(git_root)}" ) # Create symlink for local docs-as-code @@ -562,7 +556,7 @@ def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url, use_cache=T os.chdir(repo_name) # Read original MODULE.bazel - with open("MODULE.bazel", "r") as f: + with open("MODULE.bazel") as f: module_orig = f.read() # Prepare override versions From 66dc723cbf30ce86ee16598505c99e3fce8a3fe0 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Fri, 15 Aug 2025 15:36:32 +0200 Subject: [PATCH 105/231] Fix linting errors part 2 (#211) * Solve most of the linting errors * Reduce complexities of functions * Remove unused and repeated function from source code linker and fix tests --- .../score_draw_uml_funcs/__init__.py | 316 ++++++++++++------ src/extensions/score_metamodel/__init__.py | 8 - .../checks/attributes_format.py | 9 +- .../score_metamodel/checks/check_options.py | 54 +-- .../tests/test_check_options.py | 2 +- .../score_source_code_linker/__init__.py | 7 +- .../tests/test_requirement_links.py | 4 +- .../tests/test_source_link.py | 14 +- src/find_runfiles/test_find_runfiles.py | 47 ++- src/helper_lib/test_helper_lib.py | 2 +- src/tests/test_consumer.py | 70 ++-- 11 files changed, 321 insertions(+), 212 deletions(-) diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index e3f254ec..96e0b93c 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -83,6 +83,36 @@ def scripts_directory_hash(): # ╰──────────────────────────────────────────────────────────────────────────────╯ +def _process_interfaces( + iface_list: list[str], + relation: str, + need: dict[str, str], + all_needs: dict[str, dict[str, str]], + proc_dict: dict[str, str] | dict[str, list[str]], + linkage_text: str, +) -> str: + """Helper to process either implemented or used interfaces.""" + for iface in iface_list: + # check for misspelled interface + if not all_needs.get(iface, []): + logger.info(f"{need}: {relation} {iface} could not be found") + continue + + if relation == "implements": + if not proc_dict.get(iface, []): + linkage_text += ( + f"{gen_link_text(need, '-u->', all_needs[iface], 'implements')} \n" + ) + proc_dict[iface] = need["id"] + else: # "uses" + if not proc_dict.get(iface, []): + proc_dict[iface] = [need["id"]] + else: + proc_dict[iface].append(need["id"]) + + return linkage_text + + def draw_comp_incl_impl_int( need: dict[str, str], all_needs: dict[str, dict[str, str]], @@ -133,36 +163,25 @@ def draw_comp_incl_impl_int( local_impl_interfaces = get_interface_from_component(need, "implements", all_needs) local_used_interfaces = get_interface_from_component(need, "uses", all_needs) - # Add all interfaces which are implemented by component to global list - # and provide implementation - for iface in local_impl_interfaces: - # check for misspelled implements - if not all_needs.get(iface, []): - logger.info(f"{need}: implements {iface} could not be found") - continue - - if not proc_impl_interfaces.get(iface, []): - linkage_text += f"{ - gen_link_text( - need, - '-u->', - all_needs[iface], - 'implements', - ) - } \n" - proc_impl_interfaces[iface] = need["id"] - - # Add all elements which are used by component to global list - for iface in local_used_interfaces: - # check for misspelled used - if not all_needs.get(iface, []): - logger.info(f"{need}: uses {iface} could not be found") - continue + # Process implemented interfaces + linkage_text = _process_interfaces( + local_impl_interfaces, + "implements", + need, + all_needs, + proc_impl_interfaces, + linkage_text, + ) - if not proc_used_interfaces.get(iface, []): - proc_used_interfaces[iface] = [need["id"]] - else: - proc_used_interfaces[iface].append(need["id"]) + # Process used interfaces + linkage_text = _process_interfaces( + local_used_interfaces, + "uses", + need, + all_needs, + proc_used_interfaces, + linkage_text, + ) return structure_text, linkage_text, proc_impl_interfaces, proc_used_interfaces @@ -191,6 +210,68 @@ def draw_impl_interface( return local_impl_interfaces +def _process_impl_interfaces( + need: dict[str, str], + all_needs: dict[str, dict[str, str]], + proc_impl_interfaces: dict[str, str], + structure_text: str, +) -> str: + """Handle implemented interfaces outside the boxes.""" + local_impl_interfaces = draw_impl_interface(need, all_needs, set()) + # Add all interfaces which are implemented by component to global list + # and provide implementation + for iface in local_impl_interfaces: + # check for misspelled implements + if not all_needs.get(iface, []): + logger.info(f"{need}: implements {iface} could not be found") + continue + if not proc_impl_interfaces.get(iface, []): + structure_text += gen_interface_element(iface, all_needs, True) + return structure_text + + +def _process_used_interfaces( + need: dict[str, str], + all_needs: dict[str, dict[str, str]], + proc_impl_interfaces: dict[str, str], + proc_used_interfaces: dict[str, list[str]], + local_impl_interfaces: list[str], + structure_text: str, + linkage_text: str, +) -> tuple[str, str]: + """Handle all interfaces which are used by component.""" + for iface, comps in proc_used_interfaces.items(): + if iface not in proc_impl_interfaces: + # Add implementing components and modules + impl_comp_str = get_impl_comp_from_logic_iface(iface, all_needs) + impl_comp = all_needs.get(impl_comp_str[0], {}) if impl_comp_str else "" + + if impl_comp: + retval = get_hierarchy_text(impl_comp_str[0], all_needs) + structure_text += retval[2] # module open + structure_text += retval[0] # rest open + structure_text += retval[1] # rest close + structure_text += retval[3] # module close + if iface not in local_impl_interfaces: + structure_text += gen_interface_element(iface, all_needs, True) + # Draw connection between implementing components and interface + linkage_text += f"{ + gen_link_text(impl_comp, '-u->', all_needs[iface], 'implements') + } \n" + else: + # Add only interface if component not defined + print(f"{iface}: No implementing component defined") + structure_text += gen_interface_element(iface, all_needs, True) + + # Interface can be used by multiple components + for comp in comps: + linkage_text += f"{ + gen_link_text(all_needs[comp], '-d[#green]->', all_needs[iface], 'uses') + } \n" + + return structure_text, linkage_text + + def draw_module( need: dict[str, str], all_needs: dict[str, dict[str, str]], @@ -254,17 +335,9 @@ def draw_module( # Draw all implemented interfaces outside the boxes local_impl_interfaces = draw_impl_interface(need, all_needs, set()) - - # Add all interfaces which are implemented by component to global list - # and provide implementation - for iface in local_impl_interfaces: - # check for misspelled implements - if not all_needs.get(iface, []): - logger.info(f"{need}: implements {iface} could not be found") - continue - - if not proc_impl_interfaces.get(iface, []): - structure_text += gen_interface_element(iface, all_needs, True) + structure_text = _process_impl_interfaces( + need, all_needs, proc_impl_interfaces, structure_text + ) # Draw outer module structure_text += f"{gen_struct_element('package', need)} {{\n" @@ -272,21 +345,17 @@ def draw_module( # Draw inner components recursively for need_inc in need.get("includes", []): curr_need = all_needs.get(need_inc, {}) - # check for misspelled include if not curr_need: logger.info(f"{need}: include with id {need_inc} could not be found") continue - if curr_need["type"] not in ["comp_arc_sta", "mod_view_sta"]: continue - sub_structure, sub_linkage, proc_impl_interfaces, proc_used_interfaces = ( draw_comp_incl_impl_int( curr_need, all_needs, proc_impl_interfaces, proc_used_interfaces ) ) - structure_text += sub_structure linkage_text += sub_linkage @@ -294,35 +363,15 @@ def draw_module( structure_text += f"}} /' {need['title']} '/ \n\n" # Add all interfaces which are used by component - for iface, comps in proc_used_interfaces.items(): - if iface not in proc_impl_interfaces: - # Add implementing components and modules - impl_comp_str = get_impl_comp_from_logic_iface(iface, all_needs) - - impl_comp = all_needs.get(impl_comp_str[0], {}) if impl_comp_str else "" - - if impl_comp: - retval = get_hierarchy_text(impl_comp_str[0], all_needs) - structure_text += retval[2] # module open - structure_text += retval[0] # rest open - - structure_text += retval[1] # rest close - structure_text += retval[3] # module close - if iface not in local_impl_interfaces: - structure_text += gen_interface_element(iface, all_needs, True) - - # Draw connection between implementing components and interface - linkage_text += f"{gen_link_text(impl_comp, '-u->', all_needs[iface], 'implements')} \n" - - else: - # Add only interface if component not defined - print(f"{iface}: No implementing component defined") - structure_text += gen_interface_element(iface, all_needs, True) - - # Interface can be used by multiple components - for comp in comps: - # Draw connection between used interfaces and components - linkage_text += f"{gen_link_text(all_needs[comp], '-d[#green]->', all_needs[iface], 'uses')} \n" + structure_text, linkage_text = _process_used_interfaces( + need, + all_needs, + proc_impl_interfaces, + proc_used_interfaces, + local_impl_interfaces, + structure_text, + linkage_text, + ) # Remove duplicate links linkage_text = "\n".join(set(linkage_text.split("\n"))) + "\n" @@ -339,43 +388,30 @@ class draw_full_feature: def __repr__(self): return "draw_full_feature" + " in " + scripts_directory_hash() - def __call__( - self, need: dict[str, str], all_needs: dict[str, dict[str, str]] - ) -> str: - interfacelist: list[str] = [] - impl_comp: dict[str, str] = dict() - # Store all Elements which have already been processed - proc_impl_interfaces: dict[str, str] = dict() - proc_used_interfaces: dict[str, list[str]] = dict() - proc_modules: list[str] = list() - - link_text = "" - structure_text = ( - f'actor "Feature User" as {get_alias({"id": "Feature_User"})} \n' - ) - - # Define Feature as a package - # structure_text += f"{gen_struct_element('package', need)} {{\n" - - # Add logical Interfaces / Interface Operations (aka includes) - for need_inc in need.get("includes", []): - # Generate list of interfaces since both interfaces - # and interface operations can be included - iface = get_interface_from_int(need_inc, all_needs) - if iface not in interfacelist: - interfacelist.append(iface) - + def _collect_interfaces_and_modules( + self, + need: dict[str, str], + all_needs: dict[str, dict[str, str]], + interfacelist: list[str], + impl_comp: dict[str, str], + proc_modules: list[str], + proc_impl_interfaces: dict[str, str], + proc_used_interfaces: dict[str, list[str]], + structure_text: str, + link_text: str, + ) -> tuple[ + str, str, dict[str, str], dict[str, list[str]], dict[str, str], list[str] + ]: + """Process interfaces and load modules for implementation.""" for iface in interfacelist: - if iface_need := all_needs.get(iface): + if all_needs.get(iface): if iface: comps = get_impl_comp_from_logic_iface(iface, all_needs) - if comps: impl_comp[iface] = comps[0] if imcomp := impl_comp.get(iface, {}): module = get_module(imcomp, all_needs) - # FIXME: sometimes module is empty, then the following code fails if not module: logger.info( @@ -395,14 +431,27 @@ def __call__( ) structure_text += tmp proc_modules.append(module) - else: logger.info(f"{need}: Interface {iface} could not be found") continue + return ( + structure_text, + link_text, + proc_impl_interfaces, + proc_used_interfaces, + impl_comp, + proc_modules, + ) - # Close Package - # structure_text += f"}} /' {need['title']} '/ \n\n" - + def _build_links( + self, + need: dict[str, str], + all_needs: dict[str, dict[str, str]], + interfacelist: list[str], + impl_comp: dict[str, str], + link_text: str, + ) -> str: + """Add actor-interface and interface-component relations.""" for iface in interfacelist: if imcomp := impl_comp.get(iface): # Add relation between Actor and Interfaces @@ -429,6 +478,61 @@ def __call__( else: logger.info(f"{need}: Interface {iface} could not be found") continue + return link_text + + def __call__( + self, need: dict[str, str], all_needs: dict[str, dict[str, str]] + ) -> str: + interfacelist: list[str] = [] + impl_comp: dict[str, str] = dict() + # Store all Elements which have already been processed + proc_impl_interfaces: dict[str, str] = dict() + proc_used_interfaces: dict[str, list[str]] = dict() + proc_modules: list[str] = list() + + link_text = "" + structure_text = ( + f'actor "Feature User" as {get_alias({"id": "Feature_User"})} \n' + ) + + # Define Feature as a package + # structure_text += f"{gen_struct_element('package', need)} {{\n" + + # Add logical Interfaces / Interface Operations (aka includes) + for need_inc in need.get("includes", []): + # Generate list of interfaces since both interfaces + # and interface operations can be included + iface = get_interface_from_int(need_inc, all_needs) + if iface not in interfacelist: + interfacelist.append(iface) + + # Process interfaces and collect required modules + ( + structure_text, + link_text, + proc_impl_interfaces, + proc_used_interfaces, + impl_comp, + proc_modules, + ) = self._collect_interfaces_and_modules( + need, + all_needs, + interfacelist, + impl_comp, + proc_modules, + proc_impl_interfaces, + proc_used_interfaces, + structure_text, + link_text, + ) + + # Close Package + # structure_text += f"}} /' {need['title']} '/ \n\n" + + # Build all links between actor, interfaces, and components + link_text = self._build_links( + need, all_needs, interfacelist, impl_comp, link_text + ) # Remove duplicate links link_text = "\n".join(set(link_text.split("\n"))) + "\n" diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 7f5cbdd2..c6e57680 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -11,7 +11,6 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import importlib -import json import os import pkgutil from collections.abc import Callable @@ -24,13 +23,6 @@ from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData -from src.helper_lib import ( - find_git_root, - find_ws_root, - get_current_git_hash, - get_github_repo_info, -) - from .external_needs import connect_external_needs from .log import CheckLogger diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 2d3a6cb3..ebf07157 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -70,10 +70,15 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): if parts[1] == "example_feature": max_lenght += 17 # _example_feature_ if len(need["id"]) > max_lenght: + length = 0 + if "example_feature" not in need["id"]: + length = len(need["id"]) + else: + length = len(need["id"]) - 17 msg = ( f"exceeds the maximum allowed length of 45 characters " "(current length: " - f"{len(need['id']) if 'example_feature' not in need['id'] else len(need['id']) - 17})." + f"{length})." ) log.warning_for_option(need, "id", msg) @@ -82,7 +87,7 @@ def _check_options_for_prohibited_words( prohibited_word_checks: ProhibitedWordCheck, need: NeedsInfoType, log: CheckLogger ): options: list[str] = [ - x for x in prohibited_word_checks.option_check.keys() if x != "types" + x for x in prohibited_word_checks.option_check if x != "types" ] for option in options: forbidden_words = prohibited_word_checks.option_check[option] diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 1fe7354b..aee3b18a 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -11,6 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import re +from os import error from score_metamodel import ( CheckLogger, @@ -33,6 +34,30 @@ def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeed raise ValueError(f"Need type {directive} not found in needs_types") +def _normalize_values(raw_value: str | list[str] | None) -> list[str]: + """Normalize a raw value into a list of strings.""" + if raw_value is None: + return [] + if isinstance(raw_value, str): + return [raw_value] + if isinstance(raw_value, list) and all(isinstance(v, str) for v in raw_value): + return raw_value + raise ValueError + + +def _validate_value_pattern( + value: str, pattern: str, need: NeedsInfoType, field: str, log: CheckLogger +) -> None: + """Check if a value matches the given pattern, log warnings if not.""" + try: + if not re.match(pattern, value): + log.warning_for_option(need, field, f"does not follow pattern `{pattern}`.") + except TypeError: + log.warning_for_option( + need, field, f"pattern `{pattern}` is not a valid regex pattern." + ) + + def validate_fields( need: NeedsInfoType, log: CheckLogger, @@ -64,31 +89,18 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: need, f"is missing required {field_type}: `{field}`." ) continue # Skip empty optional fields - - values: list[str] - - if isinstance(raw_value, str): - values = [raw_value] - elif isinstance(raw_value, list) and all(isinstance(v, str) for v in raw_value): - values = raw_value - else: - values = [str(raw_value)] - + try: + values = _normalize_values(raw_value) + except ValueError as err: + raise ValueError( + f"An Attribute inside need {need['id']} is " + "not of type str. Only Strings are allowed" + ) from err # The filter ensures that the function is only called when needed. for value in values: if allowed_prefixes: value = remove_prefix(value, allowed_prefixes) - try: - if not re.match(pattern, value): - log.warning_for_option( - need, field, f"does not follow pattern `{pattern}`." - ) - except TypeError: - log.warning_for_option( - need, - field, - f"pattern `{pattern}` is not a valid regex pattern.", - ) + _validate_value_pattern(value, pattern, need, field, log) # req-Id: tool_req__docs_req_attr_reqtype diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 09485048..438105a6 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -156,7 +156,7 @@ def test_invalid_option_type(self): target_id="wf_req__001", id="wf_req__001", type="workflow", - some_invalid_option=42, + some_invalid_option="42", docname=None, lineno=None, ) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index ce6fe246..b8184c4e 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -65,7 +65,8 @@ def setup_once(app: Sphinx, config: Config): LOGGER.debug(f"DEBUG: Git root is {find_git_root()}") # Run only for local files! - # ws_root is not set when running on any on bazel run command repositories (dependencies) + # ws_root is not set when running on any on bazel run + # command repositories (dependencies) ws_root = find_ws_root() if not ws_root: return @@ -146,7 +147,9 @@ def group_by_need(source_code_links: list[NeedLink]) -> dict[str, list[NeedLink] return source_code_links_by_need -def get_github_link(needlink: NeedLink = DefaultNeedLink()) -> str: +def get_github_link(needlink: NeedLink | None = None) -> str: + if needlink is None: + needlink = DefaultNeedLink() passed_git_root = find_git_root() if passed_git_root is None: passed_git_root = Path() diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_requirement_links.py index 1eb748ec..a6460c1d 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_requirement_links.py @@ -416,7 +416,7 @@ def test_get_current_git_hash(git_repo): def test_get_current_git_hash_invalid_repo(temp_dir): """Test getting git hash from invalid repository.""" - with pytest.raises(Exception): + with pytest.raises(subprocess.CalledProcessError): get_current_git_hash(temp_dir) @@ -519,7 +519,7 @@ def test_group_by_need_and_find_need_integration(sample_needlinks): ) # Test finding needs for each group - for need_id, links in grouped.items(): + for need_id in grouped: found_need = find_need(all_needs, need_id, ["PREFIX_"]) if need_id in ["TREQ_ID_1", "TREQ_ID_2"]: assert found_need is not None diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py index 3e0b0ed6..32c022f0 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ b/src/extensions/score_source_code_linker/tests/test_source_link.py @@ -10,6 +10,7 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +import contextlib import json import os import shutil @@ -150,14 +151,11 @@ def _create_app(): base_dir = sphinx_base_dir docs_dir = base_dir / "docs" + original_cwd = None # CRITICAL: Change to a directory that exists and is accessible # This fixes the "no such file or directory" error in Bazel - original_cwd = None - try: + with contextlib.suppress(FileNotFoundError): original_cwd = os.getcwd() - except FileNotFoundError: - # Current working directory doesn't exist, which is the problem - pass # Change to the base_dir before creating SphinxTestApp os.chdir(base_dir) @@ -173,11 +171,9 @@ def _create_app(): finally: # Try to restore original directory, but don't fail if it doesn't exist if original_cwd is not None: - try: + # Original directory might not exist anymore in Bazel sandbox + with contextlib.suppress(FileNotFoundError, OSError): os.chdir(original_cwd) - except (FileNotFoundError, OSError): - # Original directory might not exist anymore in Bazel sandbox - pass return _create_app diff --git a/src/find_runfiles/test_find_runfiles.py b/src/find_runfiles/test_find_runfiles.py index 3ed2fc9d..97d73d84 100644 --- a/src/find_runfiles/test_find_runfiles.py +++ b/src/find_runfiles/test_find_runfiles.py @@ -32,25 +32,25 @@ def get_runfiles_dir_impl( def test_run_incremental(): """bazel run //process-docs:incremental""" # in incremental.py: - assert ( - get_runfiles_dir_impl( - cwd="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main", - conf_dir="process-docs", - env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", - git_root="/workspaces/process", - ) - == "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles" + assert get_runfiles_dir_impl( + cwd="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main", + conf_dir="process-docs", + env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", + git_root="/workspaces/process", + ) == ( + "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/" + "incremental.runfiles" ) # in conf.py: - assert ( - get_runfiles_dir_impl( - cwd="/workspaces/process/process-docs", - conf_dir="process-docs", - env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", - git_root="/workspaces/process", - ) - == "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles" + assert get_runfiles_dir_impl( + cwd="/workspaces/process/process-docs", + conf_dir="process-docs", + env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", + git_root="/workspaces/process", + ) == ( + "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/" + "incremental.runfiles" ) @@ -83,12 +83,11 @@ def test_esbonio_old(): def test3(): # docs named differently, just to make sure nothing is hardcoded # bazel run //other-docs:incremental - assert ( - get_runfiles_dir_impl( - cwd="/workspaces/process/other-docs", - conf_dir="other-docs", - env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles", - git_root="/workspaces/process", - ) - == "/workspaces/process/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles" + assert get_runfiles_dir_impl( + cwd="/workspaces/process/other-docs", + conf_dir="other-docs", + env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles", + git_root="/workspaces/process", + ) == ( + "/workspaces/process/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles" ) diff --git a/src/helper_lib/test_helper_lib.py b/src/helper_lib/test_helper_lib.py index 78042cb0..e3ca45d4 100644 --- a/src/helper_lib/test_helper_lib.py +++ b/src/helper_lib/test_helper_lib.py @@ -42,7 +42,7 @@ def test_git_operations_with_no_commits(temp_dir): os.chdir(Path(git_dir).absolute()) # Should raise an exception when trying to get hash - with pytest.raises(Exception): + with pytest.raises(subprocess.CalledProcessError): get_current_git_hash(git_dir) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index 6c722d25..36ce3365 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -118,6 +118,7 @@ def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig) -> Path: temp_dir = tmp_path_factory.mktemp("testing_dir") print(f"[blue]Using temporary directory: {temp_dir}[/blue]") return temp_dir + CACHE_DIR.mkdir(parents=True, exist_ok=True) print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]") return CACHE_DIR @@ -210,11 +211,10 @@ def parse_bazel_output(BR: BuildOutput, pytestconfig) -> BuildOutput: split_warnings = [x for x in err_lines if "WARNING: " in x] warning_dict: dict[str, list[str]] = defaultdict(list) - if pytestconfig.get_verbosity() >= 2: - if os.getenv("CI"): - print("[DEBUG] Raw warnings in CI:") - for i, warning in enumerate(split_warnings): - print(f"[DEBUG] Warning {i}: {repr(warning)}") + if pytestconfig.get_verbosity() >= 2 and os.getenv("CI"): + print("[DEBUG] Raw warnings in CI:") + for i, warning in enumerate(split_warnings): + print(f"[DEBUG] Warning {i}: {repr(warning)}") for raw_warning in split_warnings: # In the CLI we seem to have some ansi codes in the warnings. @@ -479,46 +479,44 @@ def setup_test_environment(sphinx_base_dir, pytestconfig): """Set up the test environment and return necessary paths and metadata.""" git_root = find_git_root() if git_root is None: - assert False, "Git root was none" + raise RuntimeError("Git root was not found") + gh_url = get_github_base_url() current_hash = get_current_git_commit(git_root) os.chdir(Path(sphinx_base_dir).absolute()) - verbosity = pytestconfig.get_verbosity() - if verbosity >= 2: - print(f"[DEBUG] git_root: {git_root}") + def debug_print(message): + if verbosity >= 2: + print(f"[DEBUG] {message}") - # Get GitHub URL and current hash for git override + debug_print(f"git_root: {git_root}") - if verbosity >= 2: - print(f"[DEBUG] gh_url: {gh_url}") - print(f"[DEBUG] current_hash: {current_hash}") - print( - "[DEBUG] Working directory has uncommitted changes: " - f"{has_uncommitted_changes(git_root)}" - ) - - # Create symlink for local docs-as-code - docs_as_code_dest = sphinx_base_dir / "docs_as_code" - if docs_as_code_dest.exists() or docs_as_code_dest.is_symlink(): - # Remove existing symlink/directory to recreate it - if docs_as_code_dest.is_symlink(): - docs_as_code_dest.unlink() - if verbosity >= 2: - print(f"[DEBUG] Removed existing symlink: {docs_as_code_dest}") - elif docs_as_code_dest.is_dir(): - import shutil - - shutil.rmtree(docs_as_code_dest) - if verbosity >= 2: - print(f"[DEBUG] Removed existing directory: {docs_as_code_dest}") - - docs_as_code_dest.symlink_to(git_root) + # Get GitHub URL and current hash for git override + debug_print(f"gh_url: {gh_url}") + debug_print(f"current_hash: {current_hash}") + debug_print( + "Working directory has uncommitted changes: " + f"{has_uncommitted_changes(git_root)}" + ) - if verbosity >= 2: - print(f"[DEBUG] Symlink created: {docs_as_code_dest} -> {git_root}") + def recreate_symlink(dest, target): + # Create symlink for local docs-as-code + if dest.exists() or dest.is_symlink(): + # Remove existing symlink/directory to recreate it + if dest.is_symlink(): + dest.unlink() + debug_print(f"Removed existing symlink: {dest}") + elif dest.is_dir(): + import shutil + + shutil.rmtree(dest) + debug_print(f"Removed existing directory: {dest}") + dest.symlink_to(target) + debug_print(f"Symlink created: {dest} -> {target}") + + recreate_symlink(sphinx_base_dir / "docs_as_code", git_root) return gh_url, current_hash From cc3b0fa170a749ccd5241ccfb9c2941f4f2a5499 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 18 Aug 2025 13:02:41 +0200 Subject: [PATCH 106/231] Test parsing & testlinks (#197) * Add Test.xml parsing * Add testlink's to requirements (where found) * Added unit- & integration-tests * Splitted helper library to avoid circular imports * Remove junitparser Thanks for the infra Team for help in working out the concept & finding the issues here. --- docs/conf.py | 2 +- .../extensions/source_code_linker.md | 3 + docs/requirements/index.rst | 1 + docs/requirements/requirements.rst | 1 + docs/requirements/test_overview.rst | 55 ++ src/extensions/score_layout/sphinx_options.py | 4 +- .../score_metamodel/checks/check_options.py | 1 - src/extensions/score_metamodel/metamodel.yaml | 28 +- src/extensions/score_source_code_linker/BUILD | 25 +- .../score_source_code_linker/__init__.py | 336 ++++++++--- .../need_source_links.py | 104 ++++ .../score_source_code_linker/needlinks.py | 2 +- .../score_source_code_linker/testlink.py | 248 ++++++++ ...olden_file.json => expected_codelink.json} | 2 +- .../tests/expected_grouped.json | 113 ++++ .../tests/expected_testlink.json | 47 ++ ..._requirement_links.py => test_codelink.py} | 48 +- .../tests/test_need_source_links.py | 143 +++++ .../test_source_code_link_integration.py | 553 ++++++++++++++++++ .../tests/test_source_link.py | 336 ----------- .../tests/test_testlink.py | 118 ++++ .../tests/test_xml_parser.py | 146 +++++ .../score_source_code_linker/xml_parser.py | 241 ++++++++ src/helper_lib/BUILD | 6 +- src/helper_lib/additional_functions.py | 38 ++ src/tests/test_consumer.py | 3 +- 26 files changed, 2153 insertions(+), 451 deletions(-) create mode 100644 docs/requirements/test_overview.rst create mode 100644 src/extensions/score_source_code_linker/need_source_links.py create mode 100644 src/extensions/score_source_code_linker/testlink.py rename src/extensions/score_source_code_linker/tests/{scl_golden_file.json => expected_codelink.json} (97%) create mode 100644 src/extensions/score_source_code_linker/tests/expected_grouped.json create mode 100644 src/extensions/score_source_code_linker/tests/expected_testlink.json rename src/extensions/score_source_code_linker/tests/{test_requirement_links.py => test_codelink.py} (92%) create mode 100644 src/extensions/score_source_code_linker/tests/test_need_source_links.py create mode 100644 src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py delete mode 100644 src/extensions/score_source_code_linker/tests/test_source_link.py create mode 100644 src/extensions/score_source_code_linker/tests/test_testlink.py create mode 100644 src/extensions/score_source_code_linker/tests/test_xml_parser.py create mode 100644 src/extensions/score_source_code_linker/xml_parser.py create mode 100644 src/helper_lib/additional_functions.py diff --git a/docs/conf.py b/docs/conf.py index fc646026..027311a4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -35,9 +35,9 @@ "myst_parser", "sphinxcontrib.plantuml", "score_plantuml", + "score_source_code_linker", "score_metamodel", "score_draw_uml_funcs", - "score_source_code_linker", "score_layout", ] diff --git a/docs/internals/extensions/source_code_linker.md b/docs/internals/extensions/source_code_linker.md index 2e7085d5..4d8f5310 100644 --- a/docs/internals/extensions/source_code_linker.md +++ b/docs/internals/extensions/source_code_linker.md @@ -5,6 +5,9 @@ A Sphinx extension for source code traceability for requirements. This extension In a first step it parses the source code for requirement tags. All discovered tags including their file and line numbers are written in an intermediary file before the sphinx build. In a second step this intermediary file is parsed during sphinx build. If a requirement Id is found in the intermediary file a link to the source is added. +** Please note that the 'test parsing & linking' has been added to the source-code-linker. ** +* The documentation for this part will follow soon * + ## Implementation Components ### Bazel Integration diff --git a/docs/requirements/index.rst b/docs/requirements/index.rst index 1c730e0e..fde7eb58 100644 --- a/docs/requirements/index.rst +++ b/docs/requirements/index.rst @@ -9,3 +9,4 @@ Requirements capabilities process_overview requirements + test_overview diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index e4d8c858..7fe086be 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -4,6 +4,7 @@ Tool Requirements ================================= + 📈 Status ########## diff --git a/docs/requirements/test_overview.rst b/docs/requirements/test_overview.rst new file mode 100644 index 00000000..c87b78fb --- /dev/null +++ b/docs/requirements/test_overview.rst @@ -0,0 +1,55 @@ +.. _testing_stats: + +TESTING STATISTICS +================== + + +.. needtable:: SUCCESSFUL TEST + :filter: result == "passed" + :tags: TEST + :columns: external_url as "source_link"; name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique + + +.. needtable:: FAILED TEST + :filter: result == "failed" + :tags: TEST + :columns: external_url as "source_link"; name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique + + +.. needtable:: OTHER TEST + :filter: result != "failed" and result != "passed" + :tags: TEST + :columns: external_url as "source_link"; name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique + + +.. needpie:: Test Results + :labels: passed, failed, skipped + :colors: green, red, orange + :legend: + + type == 'testcase' and result == 'passed' + type == 'testcase' and result == 'failed' + type == 'testcase' and result == 'skipped' + + +.. needpie:: Test Types Used In Testcases + :labels: fault-injection, interface-test, requirements-based, resource-usage + :legend: + + type == 'testcase' and test_type == 'fault-injection' + type == 'testcase' and test_type == 'interface-test' + type == 'testcase' and test_type == 'requirements-based' + type == 'testcase' and test_type == 'resource-usage' + + +.. needpie:: Derivation Techniques Used In Testcases + :labels: requirements-analysis, design-analysis, boundary-values, equivalence-classes, fuzz-testing, error-guessing, explorative-testing + :legend: + + type == 'testcase' and derivation_technique == 'requirements-analysis' + type == 'testcase' and derivation_technique == 'design-analysis' + type == 'testcase' and derivation_technique == 'boundary-values' + type == 'testcase' and derivation_technique == 'equivalence-classes' + type == 'testcase' and derivation_technique == 'fuzz-testing' + type == 'testcase' and derivation_technique == 'error-guessing' + type == 'testcase' and derivation_technique == 'explorative-testing' diff --git a/src/extensions/score_layout/sphinx_options.py b/src/extensions/score_layout/sphinx_options.py index 663f3104..cc9e402a 100644 --- a/src/extensions/score_layout/sphinx_options.py +++ b/src/extensions/score_layout/sphinx_options.py @@ -39,8 +39,8 @@ class SingleLayout(TypedDict): "initial=False)>>", ], "meta_left": [ - '<>', - "<>", + '<>', + '<>', ], "meta_right": [], "footer_left": ["<>"], diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index aee3b18a..1df4da0f 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -11,7 +11,6 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import re -from os import error from score_metamodel import ( CheckLogger, diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index f336f415..59ac8041 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -16,6 +16,8 @@ needs_types_base_options: optional_options: # req-Id: tool_req__docs_dd_link_source_code_link source_code_link: ^https://github.com/.* + testlink: ^https://github.com/.* + # Custom semantic validation rules # Prohibited Word Option Checks # Follow this schema to write new checks @@ -372,7 +374,6 @@ needs_types: optional_options: codelink: ^.*$ tags: ^.*$ - testlink: ^.*$ # req-Id: tool_req__docs_req_attr_reqcov reqcovered: ^(YES|NO)$ # req-Id: tool_req__docs_req_attr_testcov @@ -830,6 +831,23 @@ needs_types: - safety_analysis parts: 3 + testcase: + title: Testcase Needs parsed from test.xml files + prefix: testcase__ + mandatory_options: + id: ^testcase__ + optional_options: + name: ^.*$ + file: ^.*$ + line: ^.*$ + test_type: ^.*$ + derivation_technique: ^.*$ + result: ^.*$ + result_text: ^.*$ + optional_links: + fully_verifies: ^.*$ + partially_verifies: ^.*$ + # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from # a specification to a requirement. This makes filtering and visualization of such connections @@ -915,6 +933,14 @@ needs_extra_links: violates: incoming: violated_by outgoing: violates + + fully_verifies: + incoming: fully_verified_by + outgoing: fully_verifies + + partially_verifies: + incoming: partially_verified_by + outgoing: partially_verifies ############################################################## # Graph Checks # The graph checks focus on the relation of the needs and their attributes. diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 08735e00..a8fd36cd 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -10,6 +10,18 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +#******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_python_basics//:defs.bzl", "score_py_pytest") @@ -27,6 +39,17 @@ py_library( deps = ["@score_docs_as_code//src/helper_lib"], ) +py_library( + name = "source_code_linker_helpers", + srcs = [ + "needlinks.py", + "testlink.py", + "xml_parser.py", + ], + imports = ["."], + visibility = ["//visibility:public"], +) + score_py_pytest( name = "score_source_code_linker_test", size = "small", @@ -42,5 +65,5 @@ score_py_pytest( deps = [ ":score_source_code_linker", "@score_docs_as_code//src/extensions/score_metamodel", - ] + all_requirements, + ], ) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index b8184c4e..0f0274a1 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -11,7 +11,8 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -"""In this file the actual sphinx extension is defined. It will read pre-generated +""" +In this file the actual sphinx extension is defined. It will read pre-generated source code links from a JSON file and add them to the needs. """ @@ -21,7 +22,6 @@ from typing import cast from sphinx.application import Sphinx -from sphinx.config import Config from sphinx.environment import BuildEnvironment from sphinx_needs.data import NeedsInfoType, NeedsMutable, SphinxNeedsData from sphinx_needs.logging import get_logger @@ -29,52 +29,125 @@ from src.extensions.score_source_code_linker.generate_source_code_links_json import ( generate_source_code_links_json, ) +from src.extensions.score_source_code_linker.need_source_links import ( + NeedSourceLinks, + SourceCodeLinks, + load_source_code_links_combined_json, + store_source_code_links_combined_json, +) from src.extensions.score_source_code_linker.needlinks import ( - DefaultNeedLink, NeedLink, load_source_code_links_json, ) +from src.extensions.score_source_code_linker.testlink import ( + DataForTestLink, + load_data_of_test_case_json, + load_test_xml_parsed_json, +) +from src.extensions.score_source_code_linker.xml_parser import ( + construct_and_add_need, + run_xml_parser, +) from src.helper_lib import ( find_git_root, find_ws_root, - get_current_git_hash, - get_github_base_url, ) +from src.helper_lib.additional_functions import get_github_link LOGGER = get_logger(__name__) -# Outcomment this to enable more verbose logging +# Uncomment this to enable more verbose logging # LOGGER.setLevel("DEBUG") -def get_cache_filename(build_dir: Path) -> Path: +# re-qid: gd_req__req_attr_impl +# ╭──────────────────────────────────────╮ +# │ JSON FILE RELATED FUNCS │ +# ╰──────────────────────────────────────╯ + + +def group_by_need( + source_code_links: list[NeedLink], + test_case_links: list[DataForTestLink] | None = None, +) -> list[SourceCodeLinks]: + """ + Groups the given need links and test case links by their need ID. + Returns a nested dictionary structure with 'CodeLink' and 'TestLink' categories. + Example output: + + + { + "need": "", + "links": { + "CodeLinks": [NeedLink, NeedLink, ...], + "TestLinks": [testlink, testlink, ...] + } + } + """ + # TODO: I wonder if there is a more efficent way to do this + grouped_by_need: dict[str, NeedSourceLinks] = defaultdict( + lambda: NeedSourceLinks(TestLinks=[], CodeLinks=[]) + ) + + # Group source code links + for needlink in source_code_links: + grouped_by_need[needlink.need].CodeLinks.append(needlink) + + # Group test case links + if test_case_links is not None: + for testlink in test_case_links: + grouped_by_need[testlink.need].TestLinks.append(testlink) + + # Build final list of SourceCodeLinks + result: list[SourceCodeLinks] = [ + SourceCodeLinks( + need=need, + links=NeedSourceLinks( + CodeLinks=need_links.CodeLinks, + TestLinks=need_links.TestLinks, + ), + ) + for need, need_links in grouped_by_need.items() + ] + + return result + + +def get_cache_filename(build_dir: Path, filename: str) -> Path: """ Returns the path to the cache file for the source code linker. This is used to store the generated source code links. """ - return build_dir / "score_source_code_linker_cache.json" + return build_dir / filename -def setup_once(app: Sphinx, config: Config): - # might be the only way to solve this? - if "skip_rescanning_via_source_code_linker" in app.config: - return - LOGGER.debug(f"DEBUG: Workspace root is {find_ws_root()}") - LOGGER.debug( - f"DEBUG: Current working directory is {Path('.')} = {Path('.').resolve()}" +def build_and_save_combined_file(outdir: Path): + """ + Reads the saved partial caches of codelink & testlink + Builds the combined JSON cache & saves it + """ + source_code_links = load_source_code_links_json( + get_cache_filename(outdir, "score_source_code_linker_cache.json") + ) + test_code_links = load_test_xml_parsed_json( + get_cache_filename(outdir, "score_xml_parser_cache.json") ) - LOGGER.debug(f"DEBUG: Git root is {find_git_root()}") - # Run only for local files! - # ws_root is not set when running on any on bazel run - # command repositories (dependencies) - ws_root = find_ws_root() - if not ws_root: - return + store_source_code_links_combined_json( + outdir / "score_scl_grouped_cache.json", + group_by_need(source_code_links, test_code_links), + ) + + +# ╭──────────────────────────────────────╮ +# │ ONE TIME SETUP FUNCS │ +# ╰──────────────────────────────────────╯ - # When BUILD_WORKSPACE_DIRECTORY is set, we are inside a git repository. - assert find_git_root() - # Extension: score_source_code_linker +def setup_source_code_linker(app: Sphinx, ws_root: Path): + """ + Setting up source_code_linker with all needed options. + Allows us to only have this run once during live_preview & esbonio + """ app.add_config_value( "skip_rescanning_via_source_code_linker", False, @@ -84,32 +157,129 @@ def setup_once(app: Sphinx, config: Config): ) # Define need_string_links here to not have it in conf.py + # source_code_link and testlinks have the same schema app.config.needs_string_links = { "source_code_linker": { "regex": r"(?P.+)<>(?P.+)", "link_url": "{{url}}", "link_name": "{{name}}", - "options": ["source_code_link"], + "options": ["source_code_link", "testlink"], }, } - cache_json = get_cache_filename(Path(app.outdir)) + scl_cache_json = get_cache_filename( + app.outdir, "score_source_code_linker_cache.json" + ) - if not cache_json.exists() or not app.config.skip_rescanning_via_source_code_linker: + if ( + not scl_cache_json.exists() + or not app.config.skip_rescanning_via_source_code_linker + ): LOGGER.debug( "INFO: Generating source code links JSON file.", type="score_source_code_linker", ) - generate_source_code_links_json(ws_root, cache_json) + generate_source_code_links_json(ws_root, scl_cache_json) + + +def register_test_code_linker(app: Sphinx): + # Connects function to sphinx to ensure correct execution order + # priority is set to make sure it is called in the right order. + # Before the combining action + app.connect("env-updated", setup_test_code_linker, priority=505) + + +def setup_test_code_linker(app: Sphinx, env: BuildEnvironment): + tl_cache_json = get_cache_filename(app.outdir, "score_xml_parser_cache.json") + if ( + not tl_cache_json.exists() + or not app.config.skip_rescanning_via_source_code_linker + ): + ws_root = find_ws_root() + if not ws_root: + return + LOGGER.debug( + "INFO: Generating score_xml_parser JSON file.", + type="score_source_code_linker", + ) + # sanity check if extension is enabled + bazel_testlogs = ws_root / "bazel-testlogs" + if not bazel_testlogs.exists(): + LOGGER.info(f"{'=' * 80}", type="score_source_code_linker") + LOGGER.info( + f"{'=' * 32}SCORE XML PARSER{'=' * 32}", type="score_source_code_linker" + ) + LOGGER.info( + "'bazel-testlogs' was not found. If test data should be parsed," + + "please run tests before building the documentation", + type="score_source_code_linker", + ) + LOGGER.info(f"{'=' * 80}", type="score_source_code_linker") + return + + run_xml_parser(app, env) + return + tcn_cache = get_cache_filename(app.outdir, "score_testcaseneeds_cache.json") + assert tcn_cache.exists(), ( + f"TestCaseNeed Cache file does not exist.Checked Path: {tcn_cache}" + ) + # TODO: Make this more efficent, idk how though. + test_case_needs = load_data_of_test_case_json(tcn_cache) + for tcn in test_case_needs: + construct_and_add_need(app, tcn) - app.connect("env-updated", inject_links_into_needs) + +def register_combined_linker(app: Sphinx): + # Registering the combined linker to Sphinx + # priority is set to make sure it is called in the right order. + # Needs to be called after xml parsing & codelink + app.connect("env-updated", setup_combined_linker, priority=507) + + +def setup_combined_linker(app: Sphinx, _: BuildEnvironment): + grouped_cache = get_cache_filename(app.outdir, "score_scl_grouped_cache.json") + gruped_cache_exists = grouped_cache.exists() + if not gruped_cache_exists or not app.config.skip_rescanning_via_source_code_linker: + LOGGER.debug( + "Did not find combined json 'score_scl_grouped_cache.json' in _build." + "Generating new one" + ) + build_and_save_combined_file(app.outdir) + + +def setup_once(app: Sphinx): + # might be the only way to solve this? + if "skip_rescanning_via_source_code_linker" in app.config: + return + LOGGER.debug(f"DEBUG: Workspace root is {find_ws_root()}") + LOGGER.debug( + f"DEBUG: Current working directory is {Path('.')} = {Path('.').resolve()}" + ) + LOGGER.debug(f"DEBUG: Git root is {find_git_root()}") + + # Run only for local files! + # ws_root is not set when running on external repositories (dependencies). + ws_root = find_ws_root() + if not ws_root: + return + + # When BUILD_WORKSPACE_DIRECTORY is set, we are inside a git repository. + assert find_git_root() + + # Register & Run (if needed) parsing & saving of JSON caches + setup_source_code_linker(app, ws_root) + register_test_code_linker(app) + register_combined_linker(app) + + # Priorty=510 to ensure it's called after the test code linker & combined connection + app.connect("env-updated", inject_links_into_needs, priority=510) def setup(app: Sphinx) -> dict[str, str | bool]: # Esbonio will execute setup() on every iteration. # setup_once will only be called once. - setup_once(app, app.config) + setup_once(app) return { "version": "0.1", @@ -132,33 +302,13 @@ def find_need( for prefix in prefixes: prefixed_id = f"{prefix}{id}" if prefixed_id in all_needs: + LOGGER.warning("linking to external needs is not supported!") return all_needs[prefixed_id] return None -def group_by_need(source_code_links: list[NeedLink]) -> dict[str, list[NeedLink]]: - """ - Groups the given need links by their need ID. - """ - source_code_links_by_need: dict[str, list[NeedLink]] = defaultdict(list) - for needlink in source_code_links: - source_code_links_by_need[needlink.need].append(needlink) - return source_code_links_by_need - - -def get_github_link(needlink: NeedLink | None = None) -> str: - if needlink is None: - needlink = DefaultNeedLink() - passed_git_root = find_git_root() - if passed_git_root is None: - passed_git_root = Path() - base_url = get_github_base_url() - current_hash = get_current_git_hash(passed_git_root) - return f"{base_url}/blob/{current_hash}/{needlink.file}#L{needlink.line}" - - -# req-Id: tool_req__docs_dd_link_source_code_link +# re-qid: gd_req__req__attr_impl def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: """ 'Main' function that facilitates the running of all other functions @@ -169,7 +319,6 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: env: Buildenvironment, this is filled automatically app: Sphinx app application, this is filled automatically """ - ws_root = find_ws_root() assert ws_root @@ -179,46 +328,65 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: needs ) # TODO: why do we create a copy? Can we also needs_copy = needs[:]? copy(needs)? - for _, need in needs.items(): - if need.get("source_code_link"): - LOGGER.debug( - f"?? Need {need['id']} already has source_code_link: " - f"{need.get('source_code_link')}" - ) - - source_code_links = load_source_code_links_json(get_cache_filename(app.outdir)) - - # group source_code_links by need - # groupby requires the input to be sorted by the key + # Enabled automatically for DEBUGGING + if LOGGER.getEffectiveLevel() >= 10: + for id, need in needs.items(): + if need.get("source_code_link"): + LOGGER.debug( + f"?? Need {id} already has source_code_link: {need.get('source_code_link')}" + ) + if need.get("testlink"): + LOGGER.debug( + f"?? Need {id} already has testlink: {need.get('testlink')}" + ) - source_code_links_by_need = group_by_need(source_code_links) + source_code_links_by_need = load_source_code_links_combined_json( + get_cache_filename(app.outdir, "score_scl_grouped_cache.json") + ) # For some reason the prefix 'sphinx_needs internally' is CAPSLOCKED. # So we have to make sure we uppercase the prefixes prefixes = [x["id_prefix"].upper() for x in app.config.needs_external_needs] - for need_id, needlinks in source_code_links_by_need.items(): - need = find_need(needs_copy, need_id, prefixes) + for source_code_links in source_code_links_by_need: + need = find_need(needs_copy, source_code_links.need, prefixes) if need is None: # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 - for n in needlinks: + for n in source_code_links.links.CodeLinks: + LOGGER.warning( + f"{n.file}:{n.line}: Could not find {source_code_links.need} in documentation [CODE LINK]", + type="score_source_code_linker", + ) + for n in source_code_links.links.TestLinks: LOGGER.warning( - f"{n.file}:{n.line}: Could not find {need_id} in documentation", + f"{n.file}:{n.line}: Could not find {source_code_links.need} in documentation [TEST LINK]", type="score_source_code_linker", ) - else: - need_as_dict = cast(dict[str, object], need) + continue + + need_as_dict = cast(dict[str, object], need) + + need_as_dict["source_code_link"] = ", ".join( + f"{get_github_link(n)}<>{n.file}:{n.line}" + for n in source_code_links.links.CodeLinks + ) + need_as_dict["testlink"] = ", ".join( + f"{get_github_link(n)}<>{n.name}" for n in source_code_links.links.TestLinks + ) + + # NOTE: Removing & adding the need is important to make sure + # the needs gets 're-evaluated'. + Needs_Data.remove_need(need["id"]) + Needs_Data.add_need(need) - need_as_dict["source_code_link"] = ", ".join( - f"{get_github_link(n)}<>{n.file}:{n.line}" for n in needlinks - ) - # NOTE: Removing & adding the need is important to make sure - # the needs gets 're-evaluated'. - Needs_Data.remove_need(need["id"]) - Needs_Data.add_need(need) +# ╭──────────────────────────────────────╮ +# │ WARNING: This somehow screws up the │ +# │ integration test? What?? │ +# │ Commented out for now │ +# ╰──────────────────────────────────────╯ - # source_code_link of affected needs was overwritten. - # Make sure it's empty in all others! - for need in needs.values(): - if need["id"] not in source_code_links_by_need: - need["source_code_link"] = "" +# source_code_link of affected needs was overwritten. Make sure it's empty in all others! +# for need in needs.values(): +# if need["id"] not in source_code_links_by_need: +# need["source_code_link"] = "" # type: ignore +# need["testlink"] = "" # type: ignore diff --git a/src/extensions/score_source_code_linker/need_source_links.py b/src/extensions/score_source_code_linker/need_source_links.py new file mode 100644 index 00000000..f0310c0c --- /dev/null +++ b/src/extensions/score_source_code_linker/need_source_links.py @@ -0,0 +1,104 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +This file defines NeedSourceLinks as well as SourceCodeLinks. +Both datatypes are used in the 'grouped cache' JSON that contains 'CodeLinks' and 'TestLinks' +It also defines a decoder and encoder for SourceCodeLinks to enable JSON read/write +""" + +import json +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Any + +from src.extensions.score_source_code_linker.needlinks import ( + NeedLink, +) +from src.extensions.score_source_code_linker.testlink import ( + DataForTestLink, +) + + +@dataclass +class NeedSourceLinks: + CodeLinks: list[NeedLink] = field(default_factory=list) + TestLinks: list[DataForTestLink] = field(default_factory=list) + + +@dataclass +class SourceCodeLinks: + # TODO: Find a good key name for this + need: str + links: NeedSourceLinks + # Example: + # + # need: + # links: + # { + # "CodeLinks: + # [{needlink},{needlink}...], + # "TestLinks": + # [{testlink},{testlink},...] + + +class SourceCodeLinks_JSON_Encoder(json.JSONEncoder): + def default(self, o: object): + if isinstance(o, (SourceCodeLinks, NeedSourceLinks)): + return asdict(o) + if isinstance(o, (NeedLink, DataForTestLink)): + return asdict(o) + if isinstance(o, Path): + return str(o) + return super().default(o) + + +def SourceCodeLinks_JSON_Decoder(d: dict[str, Any]) -> SourceCodeLinks | dict[str, Any]: + if "need" in d and "links" in d: + links = d["links"] + return SourceCodeLinks( + need=d["need"], + links=NeedSourceLinks( + CodeLinks=[NeedLink(**cl) for cl in links.get("CodeLinks", [])], + TestLinks=[DataForTestLink(**tl) for tl in links.get("TestLinks", [])], + ), + ) + return d + + +def store_source_code_links_combined_json( + file: Path, source_code_links: list[SourceCodeLinks] +): + # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + file.parent.mkdir(exist_ok=True) + with open(file, "w") as f: + json.dump( + source_code_links, + f, + cls=SourceCodeLinks_JSON_Encoder, + indent=2, + ensure_ascii=False, + ) + + +def load_source_code_links_combined_json(file: Path) -> list[SourceCodeLinks]: + links: list[SourceCodeLinks] = json.loads( + file.read_text(encoding="utf-8"), + object_hook=SourceCodeLinks_JSON_Decoder, + ) + assert isinstance(links, list), ( + "The combined source code linker links should be a list of SourceCodeLinks objects." + ) + assert all(isinstance(link, SourceCodeLinks) for link in links), ( + "All items in combined_source_code_linker_cache should be SourceCodeLinks objects." + ) + return links diff --git a/src/extensions/score_source_code_linker/needlinks.py b/src/extensions/score_source_code_linker/needlinks.py index 406ad941..2d3ca246 100644 --- a/src/extensions/score_source_code_linker/needlinks.py +++ b/src/extensions/score_source_code_linker/needlinks.py @@ -16,7 +16,7 @@ from typing import Any -@dataclass(frozen=True) +@dataclass(frozen=True, order=True) class NeedLink: """Represents a single template string finding in a file.""" diff --git a/src/extensions/score_source_code_linker/testlink.py b/src/extensions/score_source_code_linker/testlink.py new file mode 100644 index 00000000..30416e5d --- /dev/null +++ b/src/extensions/score_source_code_linker/testlink.py @@ -0,0 +1,248 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +This file defines the TestLink and TestCaseNeed. +It also defines encoding & decoding for JSON write/reading of TestLinks + +TestCaseNeed => The datatype that testcases from the test.xml get parsed into +TestLink => The datatype that is ultimately saved inside of the JSON +""" + +import html +import json +import re +from dataclasses import asdict, dataclass +from itertools import chain +from pathlib import Path +from typing import Any + +from sphinx_needs import logging + +LOGGER = logging.get_logger(__name__) + + +@dataclass(frozen=True, order=True) +class DataForTestLink: + name: str + file: Path + line: int + need: str + verify_type: str + result: str + result_text: str = "" + + +class DataForTestLink_JSON_Encoder(json.JSONEncoder): + def default(self, o: object): + if isinstance(o, DataForTestLink): + return asdict(o) + if isinstance(o, Path): + return str(o) + return super().default(o) + + +def DataForTestLink_JSON_Decoder(d: dict[str, Any]) -> DataForTestLink | dict[str, Any]: + if { + "name", + "file", + "line", + "need", + "verify_type", + "result", + "result_text", + } <= d.keys(): + return DataForTestLink( + name=d["name"], + file=Path(d["file"]), + line=d["line"], + need=d["need"], + verify_type=d["verify_type"], + result=d["result"], + result_text=d["result_text"], + ) + # It's something else, pass it on to other decoders + return d + + +# We will have everything as string here as that mirrors the xml file +@dataclass +class DataOfTestCase: + name: str + file: str + line: str + result: str # passed | falied | skipped | disabled + # Intentionally not snakecase to make dict parsing simple + TestType: str + DerivationTechnique: str + result_text: str = "" # Can be None on anything but failed + # Either or HAVE to be filled. + PartiallyVerifies: str | None = None + FullyVerifies: str | None = None + + @classmethod + def from_dict(cls, data: dict[str, Any]): # type-ignore + return cls(**data) # type-ignore + + @classmethod + def clean_text(cls, text: str): + # This might not be the right thing in all circumstances + + # Designed to find ansi terminal codes (formatting&color) and santize the text + # '\x1b[0m' => '' # Reset formatting code + # '\x1b[31m' => '' # Red text + ansi_regex = re.compile(r"\x1b\[[0-9;]*m") + ansi_clean = ansi_regex.sub("", text) + # Will turn HTML things back into 'symbols'. E.g. '<' => '<' + decoded = html.unescape(ansi_clean) + return str(decoded.replace("\n", " ")).strip() + + def __post_init__(self): + # Cleaning text + if self.result_text: + self.result_text = self.clean_text(self.result_text) + # Self assertion to double check some mandatory options + # For now this is disabled + + # It's mandatory that the test either partially or fully verifies a requirement + # if self.PartiallyVerifies is None and self.FullyVerifies is None: + # raise ValueError( + # f"TestCase: {self.id} Error. Either 'PartiallyVerifies' or 'FullyVerifies' must be provided." + # ) + # Skipped tests should always have a reason associated with them + # if "skipped" in self.result.keys() and not list(self.result.values())[0]: + # raise ValueError( + # f"TestCase: {self.id} Error. Test was skipped without provided reason, reason is mandatory for skipped tests." + # ) + + def get_test_links(self) -> list[DataForTestLink]: + """Convert TestCaseNeed to list of TestLink objects.""" + + def parse_attributes(self, verify_field: str | None, verify_type: str): + """Process a verification field and yield TestLink objects.""" + if not verify_field: + return + + LOGGER.debug( + f"{verify_type.upper()} VERIFIES: {verify_field}", + type="score_source_code_linker", + ) + + for need in verify_field.split(","): + yield DataForTestLink( + name=self.name, + file=Path(self.file), + line=int(self.line), + need=need.strip(), + verify_type=verify_type, + result=self.result, + result_text=self.result_text, + ) + + return list( + chain( + parse_attributes(self, self.PartiallyVerifies, "partially"), + parse_attributes(self, self.FullyVerifies, "fully"), + ) + ) + + +class DataOfTestCase_JSON_Encoder(json.JSONEncoder): + def default(self, o: object): + if isinstance(o, DataOfTestCase): + return asdict(o) + return super().default(o) + + +def DataOfTestCase_JSON_Decoder(d: dict[str, Any]) -> DataOfTestCase | dict[str, Any]: + if { + "name", + "file", + "line", + "result", + "TestType", + "DerivationTechnique", + "result_text", + "PartiallyVerifies", + "FullyVerifies", + } <= d.keys(): + return DataOfTestCase( + name=d["name"], + file=d["file"], + line=d["line"], + result=d["result"], + TestType=d["TestType"], + DerivationTechnique=d["DerivationTechnique"], + result_text=d["result_text"], + PartiallyVerifies=d["PartiallyVerifies"], + FullyVerifies=d["FullyVerifies"], + ) + # It's something else, pass it on to other decoders + return d + + +def store_test_xml_parsed_json(file: Path, testlist: list[DataForTestLink]): + """ + TestCases that are 'skipped' do not have properties, therefore they will NOT be saved/transformed + to TestLinks. + """ + # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + file.parent.mkdir(exist_ok=True) + with open(file, "w") as f: + json.dump( + testlist, + f, + cls=DataForTestLink_JSON_Encoder, + indent=2, + ensure_ascii=False, + ) + + +def load_test_xml_parsed_json(file: Path) -> list[DataForTestLink]: + links: list[DataForTestLink] = json.loads( + file.read_text(encoding="utf-8"), + object_hook=DataForTestLink_JSON_Decoder, + ) + assert isinstance(links, list), ( + "The source xml parser links should be a list of TestLink objects." + ) + assert all(isinstance(link, DataForTestLink) for link in links), ( + "All items in source_xml_parser should be TestLink objects." + ) + return links + + +def store_data_of_test_case_json(file: Path, testneeds: list[DataOfTestCase]): + # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + file.parent.mkdir(exist_ok=True) + with open(file, "w") as f: + json.dump( + testneeds, + f, + cls=DataOfTestCase_JSON_Encoder, + indent=2, + ensure_ascii=False, + ) + + +def load_data_of_test_case_json(file: Path) -> list[DataOfTestCase]: + links: list[DataOfTestCase] = json.loads( + file.read_text(encoding="utf-8"), + object_hook=DataOfTestCase_JSON_Decoder, + ) + assert isinstance(links, list), ( + "The test_case_need json should be a list of TestCaseNeed objects." + ) + assert all(isinstance(link, DataOfTestCase) for link in links), ( + "All items in source_xml_parser should be TestCaseNeed objects." + ) + return links diff --git a/src/extensions/score_source_code_linker/tests/scl_golden_file.json b/src/extensions/score_source_code_linker/tests/expected_codelink.json similarity index 97% rename from src/extensions/score_source_code_linker/tests/scl_golden_file.json rename to src/extensions/score_source_code_linker/tests/expected_codelink.json index e5584a12..457d6a61 100644 --- a/src/extensions/score_source_code_linker/tests/scl_golden_file.json +++ b/src/extensions/score_source_code_linker/tests/expected_codelink.json @@ -8,7 +8,7 @@ }, { "file": "src/implementation2.py", - "line": 3, + "line": 5, "tag":"#-----req-Id:", "need": "TREQ_ID_1", "full_line": "#-----req-Id: TREQ_ID_1" diff --git a/src/extensions/score_source_code_linker/tests/expected_grouped.json b/src/extensions/score_source_code_linker/tests/expected_grouped.json new file mode 100644 index 00000000..da05343c --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/expected_grouped.json @@ -0,0 +1,113 @@ +[ + { + "need": "TREQ_ID_1", + "links": { + "CodeLinks": [ + { + "file": "src/implementation1.py", + "line": 3, + "tag":"#-----req-Id:", + "need": "TREQ_ID_1", + "full_line": "#-----req-Id: TREQ_ID_1" + }, + { + "file": "src/implementation2.py", + "line": 5, + "tag":"#-----req-Id:", + "need": "TREQ_ID_1", + "full_line": "#-----req-Id: TREQ_ID_1" + } + + ], + "TestLinks": [ + { + "name": "test_system_startup_time", + "file": "src/tests/testfile_2.py", + "line": 25, + "need": "TREQ_ID_1", + "verify_type": "fully", + "result": "passed", + "result_text": "" + } + ] + } + }, + { + "need": "TREQ_ID_2", + "links": { + "CodeLinks": [ + { + "file": "src/implementation1.py", + "line": 9, + "tag":"#-----req-Id:", + "need": "TREQ_ID_2", + "full_line":"#-----req-Id: TREQ_ID_2" + } + ], + "TestLinks": [ + + { + "name": "test_api_response_format", + "file": "src/testfile_1.py", + "line": 10, + "need": "TREQ_ID_2", + "verify_type": "partially", + "result": "passed", + "result_text": "" + }, + { + "name": "test_error_handling", + "file": "src/testfile_1.py", + "line": 38, + "need": "TREQ_ID_2", + "verify_type": "partially", + "result": "passed", + "result_text": "" + } + + ] + } + }, + { + "need": "TREQ_ID_3", + "links": { + "CodeLinks": [], + "TestLinks": [ + { + "name": "test_api_response_format", + "file": "src/testfile_1.py", + "line": 10, + "need": "TREQ_ID_3", + "verify_type": "partially", + "result": "passed", + "result_text": "" + }, + { + "name": "test_error_handling", + "file": "src/testfile_1.py", + "line": 38, + "need": "TREQ_ID_3", + "verify_type": "partially", + "result": "passed", + "result_text": "" + } + ] + } + }, + { + "need": "TREQ_ID_200", + "links": { + "CodeLinks": [ + { + "file": "src/bad_implementation.py", + "line":2, + "tag":"#-----req-Id:", + "need": "TREQ_ID_200", + "full_line":"#-----req-Id: TREQ_ID_200" + } + + ], + "TestLinks": [] + } + } +] diff --git a/src/extensions/score_source_code_linker/tests/expected_testlink.json b/src/extensions/score_source_code_linker/tests/expected_testlink.json new file mode 100644 index 00000000..9dc32210 --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/expected_testlink.json @@ -0,0 +1,47 @@ +[ + { + "name": "test_api_response_format", + "file": "src/testfile_1.py", + "line": 10, + "need": "TREQ_ID_2", + "verify_type": "partially", + "result": "passed", + "result_text": "" + }, + { + "name": "test_api_response_format", + "file": "src/testfile_1.py", + "line": 10, + "need": "TREQ_ID_3", + "verify_type": "partially", + "result": "passed", + "result_text": "" + }, + { + "name": "test_error_handling", + "file": "src/testfile_1.py", + "line": 38, + "need": "TREQ_ID_2", + "verify_type": "partially", + "result": "passed", + "result_text": "" + }, + { + "name": "test_error_handling", + "file": "src/testfile_1.py", + "line": 38, + "need": "TREQ_ID_3", + "verify_type": "partially", + "result": "passed", + "result_text": "" + }, + { + "name": "test_system_startup_time", + "file": "src/tests/testfile_2.py", + "line": 25, + "need": "TREQ_ID_1", + "verify_type": "fully", + "result": "passed", + "result_text": "" + } +] diff --git a/src/extensions/score_source_code_linker/tests/test_requirement_links.py b/src/extensions/score_source_code_linker/tests/test_codelink.py similarity index 92% rename from src/extensions/score_source_code_linker/tests/test_requirement_links.py rename to src/extensions/score_source_code_linker/tests/test_codelink.py index a6460c1d..633c0125 100644 --- a/src/extensions/score_source_code_linker/tests/test_requirement_links.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -28,7 +28,6 @@ from src.extensions.score_source_code_linker import ( find_need, get_cache_filename, - get_github_link, group_by_need, ) from src.extensions.score_source_code_linker.needlinks import ( @@ -41,6 +40,7 @@ get_github_repo_info, parse_remote_git_output, ) +from src.helper_lib.additional_functions import get_github_link """ # ────────────────ATTENTION─────────────── @@ -263,7 +263,7 @@ def test_get_cache_filename(): """Test cache filename generation.""" build_dir = Path("/tmp/build") expected = build_dir / "score_source_code_linker_cache.json" - result = get_cache_filename(build_dir) + result = get_cache_filename(build_dir, "score_source_code_linker_cache.json") assert result == expected @@ -333,21 +333,24 @@ def test_group_by_need(sample_needlinks): """Test grouping source code links by need ID.""" result = group_by_need(sample_needlinks) - assert len(result) == 3 - assert len(result["TREQ_ID_1"]) == 2 - assert len(result["TREQ_ID_2"]) == 1 - assert len(result["TREQ_ID_200"]) == 1 - # Check that the grouping is correct - assert result["TREQ_ID_1"][0].file == Path("src/implementation1.py") - assert result["TREQ_ID_1"][1].file == Path("src/implementation2.py") - assert result["TREQ_ID_2"][0].file == Path("src/implementation1.py") - assert result["TREQ_ID_2"][0].line == 9 + assert len(result) == 3 + for found_link in result: + if found_link.need == "TREQ_ID_1": + assert len(found_link.links.CodeLinks) == 2 + assert found_link.links.CodeLinks[0].file == Path("src/implementation1.py") + assert found_link.links.CodeLinks[1].file == Path("src/implementation2.py") + elif found_link.need == "TREQ_ID_2": + assert len(found_link.links.CodeLinks) == 1 + assert found_link.links.CodeLinks[0].file == Path("src/implementation1.py") + assert found_link.links.CodeLinks[0].line == 9 + elif found_link.need == "TREQ_ID_200": + assert len(found_link.links.CodeLinks) == 1 def test_group_by_need_empty_list(): """Test grouping empty list of needlinks.""" - result = group_by_need([]) + result = group_by_need([], []) assert len(result) == 0 @@ -519,17 +522,17 @@ def test_group_by_need_and_find_need_integration(sample_needlinks): ) # Test finding needs for each group - for need_id in grouped: - found_need = find_need(all_needs, need_id, ["PREFIX_"]) - if need_id in ["TREQ_ID_1", "TREQ_ID_2"]: + for found_link in grouped: + found_need = find_need(all_needs, found_link.need, ["PREFIX_"]) + if found_link.need in ["TREQ_ID_1", "TREQ_ID_2"]: assert found_need is not None - assert found_need["id"] == need_id - elif need_id == "TREQ_ID_200": + assert found_need["id"] == found_link.need + elif found_link.need == "TREQ_ID_200": assert found_need is not None assert found_need["id"] == "PREFIX_TREQ_ID_200" -def test_end_to_end_with_real_files(temp_dir, git_repo): +def test_source_linker_end_to_end_with_real_files(temp_dir, git_repo): """Test end-to-end workflow with real files and git repo.""" # Create source files with requirement IDs src_dir = git_repo / "src" @@ -602,8 +605,13 @@ def another_function(): # Test grouping grouped = group_by_need(loaded_links) - assert len(grouped["TREQ_ID_1"]) == 2 - assert len(grouped["TREQ_ID_2"]) == 1 + for found_links in grouped: + if found_links.need == "TREQ_ID_1": + assert len(found_links.links.CodeLinks) == 2 + assert len(found_links.links.TestLinks) == 0 + if found_links.need == "TREQ_ID_2": + assert len(found_links.links.CodeLinks) == 1 + assert len(found_links.links.TestLinks) == 0 # Test GitHub link generation # Have to change directories in order to ensure that we get the right/any .git file diff --git a/src/extensions/score_source_code_linker/tests/test_need_source_links.py b/src/extensions/score_source_code_linker/tests/test_need_source_links.py new file mode 100644 index 00000000..4e1c052c --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/test_need_source_links.py @@ -0,0 +1,143 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import json +from dataclasses import asdict +from pathlib import Path +from typing import Any + +import pytest + +from src.extensions.score_source_code_linker.need_source_links import ( + NeedSourceLinks, + SourceCodeLinks, + SourceCodeLinks_JSON_Decoder, + SourceCodeLinks_JSON_Encoder, + load_source_code_links_combined_json, + store_source_code_links_combined_json, +) +from src.extensions.score_source_code_linker.needlinks import NeedLink +from src.extensions.score_source_code_linker.testlink import DataForTestLink +from src.extensions.score_source_code_linker.tests.test_codelink import ( + NeedLinkTestEncoder, + needlink_test_decoder, +) + + +def SourceCodeLinks_TEST_JSON_Decoder( + d: dict[str, Any], +) -> SourceCodeLinks | dict[str, Any]: + if "need" in d and "links" in d: + links = d["links"] + return SourceCodeLinks( + need=d["need"], + links=NeedSourceLinks( + CodeLinks=[ + needlink_test_decoder(cl) for cl in links.get("CodeLinks", []) + ], + TestLinks=[DataForTestLink(**tl) for tl in links.get("TestLinks", [])], + ), + ) + return d + + +class SourceCodeLinks_TEST_JSON_Encoder(json.JSONEncoder): + def default(self, o: object): + if isinstance(o, SourceCodeLinks): + return { + "need": o.need, + "links": self.default(o.links), + } + if isinstance(o, NeedSourceLinks): + return { + "CodeLinks": [NeedLinkTestEncoder().default(cl) for cl in o.CodeLinks], + "TestLinks": [asdict(tl) for tl in o.TestLinks], + } + if isinstance(o, Path): + return str(o) + return super().default(o) + + +@pytest.fixture +def sample_needlink() -> NeedLink: + return NeedLink( + file=Path("src/example.py"), + line=10, + tag="# req:", + need="REQ_001", + full_line="# req: REQ_001", + ) + + +@pytest.fixture +def sample_testlink() -> DataForTestLink: + return DataForTestLink( + name="test_example", + file=Path("tests/test_example.py"), + need="REQ_001", + line=5, + verify_type="partially", + result="passed", + result_text="", + ) + + +@pytest.fixture +def sample_source_code_links(sample_needlink, sample_testlink) -> SourceCodeLinks: + return SourceCodeLinks( + need="REQ_001", + links=NeedSourceLinks(CodeLinks=[sample_needlink], TestLinks=[sample_testlink]), + ) + + +def test_encoder_outputs_serializable_dict(sample_source_code_links): + encoded = json.dumps(sample_source_code_links, cls=SourceCodeLinks_JSON_Encoder) + assert isinstance(encoded, str) + assert "REQ_001" in encoded + + +def test_decoder_reconstructs_object(sample_source_code_links): + encoded = json.dumps(sample_source_code_links, cls=SourceCodeLinks_JSON_Encoder) + decoded = json.loads(encoded, object_hook=SourceCodeLinks_JSON_Decoder) + assert isinstance(decoded, SourceCodeLinks) + assert decoded.need == "REQ_001" + assert isinstance(decoded.links, NeedSourceLinks) + assert decoded.links.CodeLinks[0].need == "REQ_001" + + +def test_store_and_load_json(tmp_path: Path, sample_source_code_links): + test_file = tmp_path / "combined_links.json" + store_source_code_links_combined_json(test_file, [sample_source_code_links]) + assert test_file.exists() + + loaded = load_source_code_links_combined_json(test_file) + assert isinstance(loaded, list) + assert len(loaded) == 1 + assert isinstance(loaded[0], SourceCodeLinks) + assert loaded[0].need == sample_source_code_links.need + + +def test_load_invalid_json_type(tmp_path: Path): + test_file = tmp_path / "invalid.json" + test_file.write_text('{"not_a_list": true}', encoding="utf-8") + + with pytest.raises(AssertionError, match="should be a list of SourceCodeLinks"): + _ = load_source_code_links_combined_json(test_file) + + +def test_load_invalid_json_items(tmp_path: Path): + test_file = tmp_path / "bad_items.json" + # This is a list but doesn't contain SourceCodeLinks + test_file.write_text('[{"some": "thing"}]', encoding="utf-8") + + with pytest.raises(AssertionError, match="should be SourceCodeLinks objects"): + _ = load_source_code_links_combined_json(test_file) diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py new file mode 100644 index 00000000..9f7d2578 --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -0,0 +1,553 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import json +import os +import shutil +import subprocess +from collections import Counter +from collections.abc import Callable +from pathlib import Path +from typing import cast + +import pytest +from pytest import TempPathFactory +from sphinx.testing.util import SphinxTestApp +from sphinx_needs.data import SphinxNeedsData + +from src.extensions.score_source_code_linker.needlinks import NeedLink +from src.extensions.score_source_code_linker.testlink import ( + DataForTestLink, + DataForTestLink_JSON_Decoder, +) +from src.extensions.score_source_code_linker.tests.test_codelink import ( + needlink_test_decoder, +) +from src.extensions.score_source_code_linker.tests.test_need_source_links import ( + SourceCodeLinks_TEST_JSON_Decoder, +) +from src.helper_lib import find_ws_root, get_github_base_url +from src.helper_lib.additional_functions import get_github_link + + +@pytest.fixture() +def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: + repo_path = tmp_path_factory.mktemp("test_git_repo") + return repo_path + + +@pytest.fixture() +def git_repo_setup(sphinx_base_dir) -> Path: + """Creating git repo, to make testing possible""" + + repo_path = sphinx_base_dir + subprocess.run(["git", "init"], cwd=repo_path, check=True) + subprocess.run( + ["git", "config", "user.name", "Test User"], cwd=repo_path, check=True + ) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=repo_path, check=True + ) + + subprocess.run( + ["git", "remote", "add", "origin", "https://github.com/testorg/testrepo.git"], + cwd=repo_path, + check=True, + ) + os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(repo_path) + return repo_path + + +@pytest.fixture() +def create_demo_files(sphinx_base_dir, git_repo_setup): + repo_path = sphinx_base_dir + + # Create some source files with requirement IDs + source_dir = repo_path / "src" + source_dir.mkdir() + + # Create source files that contain requirement references + (source_dir / "implementation1.py").write_text(make_codelink_source_1()) + + (source_dir / "implementation2.py").write_text(make_codelink_source_2()) + (source_dir / "bad_implementation.py").write_text(make_codelink_bad_source()) + # Create a docs directory for Sphinx + docs_dir = repo_path / "docs" + docs_dir.mkdir() + (docs_dir / "index.rst").write_text(basic_needs()) + (docs_dir / "conf.py").write_text(basic_conf()) + + # Create test.xml files + bazel_testdir1 = repo_path / "bazel-testlogs" + bazel_testdir1.mkdir() + bazel_testdir2 = bazel_testdir1 / "src" + bazel_testdir2.mkdir() + + (bazel_testdir2 / "test.xml").write_text(make_test_xml_1()) + testsdir = bazel_testdir2 / "tests" + testsdir.mkdir() + (testsdir / "test.xml").write_text(make_test_xml_2()) + + curr_dir = Path(__file__).absolute().parent + # print("CURR_dir", curr_dir) + shutil.copyfile( + curr_dir / "expected_codelink.json", repo_path / ".expected_codelink.json" + ) + shutil.copyfile( + curr_dir / "expected_testlink.json", repo_path / ".expected_testlink.json" + ) + shutil.copyfile( + curr_dir / "expected_grouped.json", repo_path / ".expected_grouped.json" + ) + + # Add files to git and commit + subprocess.run(["git", "add", "."], cwd=repo_path, check=True) + subprocess.run( + ["git", "commit", "-m", "Initial commit with test files"], + cwd=repo_path, + check=True, + ) + + +def make_codelink_source_1(): + return ( + """ +# This is a test implementation file +#""" + + """ req-Id: TREQ_ID_1 +def some_function(): + pass + +# Some other code here +# More code... +#""" + """ req-Id: TREQ_ID_2 +def another_function(): + pass +""" + ) + + +def make_codelink_source_2(): + return ( + """ +# Another implementation file +# Though we should make sure this +# is at a different line than the other ID +#""" + + """ req-Id: TREQ_ID_1 +class SomeClass: + def method(self): + pass + +""" + ) + + +def make_codelink_bad_source(): + return ( + """ +#""" + + """ req-Id: TREQ_ID_200 +def This_Should_Error(self): + pass + +""" + ) + + +def make_test_xml_1(): + return """ + + + + + + + + + + + + + + + + + + +""" + + +def make_test_xml_2(): + return """ + + + + + + + + + + + This is a message that shouldn't show up + + + +""" + + +def construct_gh_url() -> str: + gh = get_github_base_url() + return f"{gh}/blob/" + + +@pytest.fixture() +def sphinx_app_setup( + sphinx_base_dir, create_demo_files, git_repo_setup +) -> Callable[[], SphinxTestApp]: + def _create_app(): + base_dir = sphinx_base_dir + docs_dir = base_dir / "docs" + + # CRITICAL: Change to a directory that exists and is accessible + # This fixes the "no such file or directory" error in Bazel + original_cwd = None + try: + original_cwd = os.getcwd() + except FileNotFoundError: + # Current working directory doesn't exist, which is the problem + pass + + # Change to the base_dir before creating SphinxTestApp + os.chdir(base_dir) + try: + return SphinxTestApp( + freshenv=True, + srcdir=docs_dir, + confdir=docs_dir, + outdir=sphinx_base_dir / "out", + buildername="html", + warningiserror=True, + ) + finally: + # Try to restore original directory, but don't fail if it doesn't exist + if original_cwd is not None: + try: + os.chdir(original_cwd) + except (FileNotFoundError, OSError): + # Original directory might not exist anymore in Bazel sandbox + pass + + return _create_app + + +def basic_conf(): + return """ +extensions = [ + "sphinx_needs", + "score_source_code_linker", +] +needs_types = [ + dict( + directive="test_req", + title="Testing Requirement", + prefix="TREQ_", + color="#BFD8D2", + style="node", + ), +] +needs_extra_options = ["source_code_link", "testlink"] +needs_extra_links = [{ + "option": "partially_verifies", + "incoming": "paritally_verified_by", + "outgoing": "paritally_verifies", + }, + { + "option": "fully_verifies", + "incoming": "fully_verified_by", + "outgoing": "fully_verifies", + }] + +""" + + +def basic_needs(): + return """ +TESTING SOURCE LINK +=================== + +.. test_req:: TestReq1 + :id: TREQ_ID_1 + :status: valid + +.. test_req:: TestReq2 + :id: TREQ_ID_2 + :status: open + +.. test_req:: TestReq3 + :id: TREQ_ID_3 + :status: open +""" + + +@pytest.fixture() +def example_source_link_text_all_ok(sphinx_base_dir): + repo_path = sphinx_base_dir + return { + "TREQ_ID_1": [ + NeedLink( + file=Path("src/implementation1.py"), + line=3, + tag="#" + " req-Id:", + need="TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", + ), + NeedLink( + file=Path("src/implementation2.py"), + line=3, + tag="#" + " req-Id:", + need="TREQ_ID_1", + full_line="#" + " req-Id: TREQ_ID_1", + ), + ], + "TREQ_ID_2": [ + NeedLink( + file=Path("src/implementation1.py"), + line=9, + tag="#" + " req-Id:", + need="TREQ_ID_2", + full_line="#" + " req-Id: TREQ_ID_2", + ) + ], + } + + +@pytest.fixture() +def example_test_link_text_all_ok(sphinx_base_dir): + repo_path = sphinx_base_dir + return { + "TREQ_ID_1": [ + DataForTestLink( + name="test_system_startup_time", + file=Path("src/tests/testfile_2.py"), + need="TREQ_ID_1", + line=25, + verify_type="fully", + result="passed", + result_text="", + ), + ], + "TREQ_ID_2": [ + DataForTestLink( + name="test_api_response_format", + file=Path("src/testfile_1.py"), + need="TREQ_ID_2", + line=10, + verify_type="partially", + result="passed", + result_text="", + ), + DataForTestLink( + name="test_error_handling", + file=Path("src/tests/testfile_2.py"), + need="TREQ_ID_2", + line=33, + verify_type="partially", + result="passed", + result_text="", + ), + ], + "TREQ_ID_3": [ + DataForTestLink( + name="test_api_response_format", + file=Path("src/testfile_1.py"), + need="TREQ_ID_3", + line=10, + verify_type="partially", + result="passed", + result_text="", + ), + DataForTestLink( + name="test_error_handling", + file=Path("src/test/testfile_2.py"), + need="TREQ_ID_3", + line=38, + verify_type="partially", + result="passed", + result_text="", + ), + ], + } + + +@pytest.fixture() +def example_source_link_text_non_existent(sphinx_base_dir): + repo_path = sphinx_base_dir + return [ + { + "TREQ_ID_200": [ + NeedLink( + file=Path("src/bad_implementation.py"), + line=2, + tag="#" + " req-Id:", + need="TREQ_ID_200", + full_line="#" + " req-Id: TREQ_ID_200", + ) + ] + } + ] + + +def make_source_link(needlinks): + return ", ".join(f"{get_github_link(n)}<>{n.file}:{n.line}" for n in needlinks) + + +def make_test_link(testlinks): + return ", ".join(f"{get_github_link(n)}<>{n.name}" for n in testlinks) + + +def compare_json_files(file1: Path, expected_file: Path, object_hook): + """Golden File tests with a known good file and the one created""" + with open(file1) as f1: + json1 = json.load(f1, object_hook=object_hook) + with open(expected_file) as f2: + json2 = json.load(f2, object_hook=object_hook) + assert len(json1) == len(json2), ( + f"{file1}'s lenth are not the same as in the golden file lenght. Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" + ) + c1 = Counter(n for n in json1) + c2 = Counter(n for n in json2) + assert c1 == c2, ( + f"Testfile does not have same needs as golden file. Testfile: {c1}\nGoldenFile: {c2}" + ) + + +def compare_grouped_json_files(file1: Path, golden_file: Path): + """Golden File tests with a known good file and the one created""" + with open(file1) as f1: + json1 = json.load(f1, object_hook=SourceCodeLinks_TEST_JSON_Decoder) + with open(golden_file) as f2: + json2 = json.load(f2, object_hook=SourceCodeLinks_TEST_JSON_Decoder) + + assert len(json1) == len(json2), ( + f"Input & Expected have different Lenghts. Input: {file1}: {len(json1)}, Expected: {golden_file}: {len(json2)}" + ) + + json1_sorted = sorted(json1, key=lambda x: x.need) + json2_sorted = sorted(json2, key=lambda x: x.need) + + for item1, item2 in zip(json1_sorted, json2_sorted, strict=False): + assert item1.need == item2.need, ( + f"Needs don't match: {item1.need} vs {item2.need}" + ) + + # Need to sort it to make sure we compare content not order + codelinks1_sorted = sorted(item1.links.CodeLinks) + codelinks2_sorted = sorted(item2.links.CodeLinks) + assert codelinks1_sorted == codelinks2_sorted, ( + f"CodeLinks don't match for {item1.need}. " + f"{file1}: {item1.links.CodeLinks}, {golden_file}: {item2.links.CodeLinks}" + ) + + testlinks1_sorted = sorted(item1.links.TestLinks) + testlinks2_sorted = sorted(item2.links.TestLinks) + assert testlinks1_sorted == testlinks2_sorted, ( + f"TestLinks don't match for {item1.need}. " + f"{file1}: {item1.links.TestLinks}, {golden_file}: {item2.links.TestLinks}" + ) + + +def test_source_link_integration_ok( + sphinx_app_setup: Callable[[], SphinxTestApp], + example_source_link_text_all_ok: dict[str, list[str]], + example_test_link_text_all_ok: dict[str, list[str]], + sphinx_base_dir, + git_repo_setup, + create_demo_files, +): + """This is a test description""" + app = sphinx_app_setup() + try: + os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(sphinx_base_dir) + app.build() + ws_root = find_ws_root() + if ws_root is None: + # This should never happen + pytest.fail(f"WS_root is none. WS_root: {ws_root}") + Needs_Data = SphinxNeedsData(app.env) + needs_data = {x["id"]: x for x in Needs_Data.get_needs_view().values()} + compare_json_files( + app.outdir / "score_source_code_linker_cache.json", + sphinx_base_dir / ".expected_codelink.json", + needlink_test_decoder, + ) + compare_json_files( + app.outdir / "score_xml_parser_cache.json", + sphinx_base_dir / ".expected_testlink.json", + DataForTestLink_JSON_Decoder, + ) + compare_grouped_json_files( + app.outdir / "score_scl_grouped_cache.json", + sphinx_base_dir / ".expected_grouped.json", + ) + # Testing TREQ_ID_1, TREQ_ID_2, TREQ_ID_3 + + # TODO: Is this actually a good test, or just a weird mock? + for i in range(1, 4): + # extra_options are only available at runtime + assert f"TREQ_ID_{i}" in needs_data + need_as_dict = cast(dict[str, object], needs_data[f"TREQ_ID_{i}"]) + # TODO: This probably isn't great. Should make this better. + if i != 3: + # Excluding 3 as this is a keyerror here + expected_code_link = make_source_link( + example_source_link_text_all_ok[f"TREQ_ID_{i}"] + ) + print(f"EXPECTED LINK CODE: {expected_code_link}") + actual_source_code_link = cast( + list[str], need_as_dict["source_code_link"] + ) + print(f"ACTUALL CODE LINK: {actual_source_code_link}") + assert set(expected_code_link) == set(actual_source_code_link) + expected_test_link = make_test_link( + example_test_link_text_all_ok[f"TREQ_ID_{i}"] + ) + # Compare contents, regardless of order. + print(f"NEED AS DICT: {need_as_dict}") + print(f"EXPECTED LINK TEST: {expected_test_link}") + actual_test_code_link = cast(list[str], need_as_dict["testlink"]) + print(f"ACTUALL TEST LINK: {actual_test_code_link}") + assert set(expected_test_link) == set(actual_test_code_link) + finally: + app.cleanup() + + +def test_source_link_integration_non_existent_id( + sphinx_app_setup: Callable[[], SphinxTestApp], + example_source_link_text_non_existent: dict[str, list[str]], + sphinx_base_dir, + git_repo_setup, + create_demo_files, +): + """Asserting warning if need not found""" + app = sphinx_app_setup() + try: + app.build() + warnings = app.warning.getvalue() + assert ( + "src/bad_implementation.py:2: Could not find TREQ_ID_200 in documentation" + in warnings + ) + finally: + app.cleanup() diff --git a/src/extensions/score_source_code_linker/tests/test_source_link.py b/src/extensions/score_source_code_linker/tests/test_source_link.py deleted file mode 100644 index 32c022f0..00000000 --- a/src/extensions/score_source_code_linker/tests/test_source_link.py +++ /dev/null @@ -1,336 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -import contextlib -import json -import os -import shutil -import subprocess -from collections import Counter -from collections.abc import Callable -from pathlib import Path -from typing import cast - -import pytest -from pytest import TempPathFactory -from sphinx.testing.util import SphinxTestApp -from sphinx_needs.data import SphinxNeedsData -from test_requirement_links import needlink_test_decoder - -from src.extensions.score_source_code_linker import get_github_base_url, get_github_link -from src.extensions.score_source_code_linker.needlinks import NeedLink -from src.helper_lib import find_ws_root - - -@pytest.fixture() -def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: - return tmp_path_factory.mktemp("test_git_repo") - - -@pytest.fixture() -def git_repo_setup(sphinx_base_dir) -> Path: - """Creating git repo, to make testing possible""" - - repo_path = sphinx_base_dir - subprocess.run(["git", "init"], cwd=repo_path, check=True) - subprocess.run( - ["git", "config", "user.name", "Test User"], cwd=repo_path, check=True - ) - subprocess.run( - ["git", "config", "user.email", "test@example.com"], cwd=repo_path, check=True - ) - - subprocess.run( - ["git", "remote", "add", "origin", "https://github.com/testorg/testrepo.git"], - cwd=repo_path, - check=True, - ) - os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(repo_path) - return repo_path - - -@pytest.fixture() -def create_demo_files(sphinx_base_dir, git_repo_setup): - repo_path = sphinx_base_dir - - # Create some source files with requirement IDs - source_dir = repo_path / "src" - source_dir.mkdir() - - # Create source files that contain requirement references - (source_dir / "implementation1.py").write_text(make_source_1()) - - (source_dir / "implementation2.py").write_text(make_source_2()) - (source_dir / "bad_implementation.py").write_text(make_bad_source()) - # Create a docs directory for Sphinx - docs_dir = repo_path / "docs" - docs_dir.mkdir() - (docs_dir / "index.rst").write_text(basic_needs()) - (docs_dir / "conf.py").write_text(basic_conf()) - curr_dir = Path(__file__).absolute().parent - # print("CURR_dir", curr_dir) - shutil.copyfile(curr_dir / "scl_golden_file.json", repo_path / ".golden_file.json") - - # Add files to git and commit - subprocess.run(["git", "add", "."], cwd=repo_path, check=True) - subprocess.run( - ["git", "commit", "-m", "Initial commit with test files"], - cwd=repo_path, - check=True, - ) - - # Cleanup - # Don't know if we need this? - # os.environ.pop("BUILD_WORKSPACE_DIRECTORY", None) - - -def make_source_1(): - return ( - """ -# This is a test implementation file -#""" - + """ req-Id: TREQ_ID_1 -def some_function(): - pass - -# Some other code here -# More code... -#""" - """ req-Id: TREQ_ID_2 -def another_function(): - pass -""" - ) - - -def make_source_2(): - return ( - """ -# Another implementation file -#""" - + """ req-Id: TREQ_ID_1 -class SomeClass: - def method(self): - pass - -""" - ) - - -def make_bad_source(): - return ( - """ -#""" - + """ req-Id: TREQ_ID_200 -def This_Should_Error(self): - pass - -""" - ) - - -def construct_gh_url() -> str: - gh = get_github_base_url() - return f"{gh}/blob/" - - -@pytest.fixture() -def sphinx_app_setup( - sphinx_base_dir, create_demo_files, git_repo_setup -) -> Callable[[], SphinxTestApp]: - def _create_app(): - base_dir = sphinx_base_dir - docs_dir = base_dir / "docs" - - original_cwd = None - # CRITICAL: Change to a directory that exists and is accessible - # This fixes the "no such file or directory" error in Bazel - with contextlib.suppress(FileNotFoundError): - original_cwd = os.getcwd() - - # Change to the base_dir before creating SphinxTestApp - os.chdir(base_dir) - try: - return SphinxTestApp( - freshenv=True, - srcdir=docs_dir, - confdir=docs_dir, - outdir=sphinx_base_dir / "out", - buildername="html", - warningiserror=True, - ) - finally: - # Try to restore original directory, but don't fail if it doesn't exist - if original_cwd is not None: - # Original directory might not exist anymore in Bazel sandbox - with contextlib.suppress(FileNotFoundError, OSError): - os.chdir(original_cwd) - - return _create_app - - -def basic_conf(): - return """ -extensions = [ - "sphinx_needs", - "score_source_code_linker", -] -needs_types = [ - dict( - directive="test_req", - title="Testing Requirement", - prefix="TREQ_", - color="#BFD8D2", - style="node", - ), -] -needs_extra_options = ["source_code_link"] -""" - - -def basic_needs(): - return """ -TESTING SOURCE LINK -=================== - -.. test_req:: TestReq1 - :id: TREQ_ID_1 - :status: valid - -.. test_req:: TestReq2 - :id: TREQ_ID_2 - :status: open -""" - - -@pytest.fixture() -def example_source_link_text_all_ok(sphinx_base_dir): - return { - "TREQ_ID_1": [ - NeedLink( - file=Path("src/implementation1.py"), - line=3, - tag="#" + " req-Id:", - need="TREQ_ID_1", - full_line="#" + " req-Id: TREQ_ID_1", - ), - NeedLink( - file=Path("src/implementation2.py"), - line=3, - tag="#" + " req-Id:", - need="TREQ_ID_1", - full_line="#" + " req-Id: TREQ_ID_1", - ), - ], - "TREQ_ID_2": [ - NeedLink( - file=Path("src/implementation1.py"), - line=9, - tag="#" + " req-Id:", - need="TREQ_ID_2", - full_line="#" + " req-Id: TREQ_ID_2", - ) - ], - } - - -@pytest.fixture() -def example_source_link_text_non_existent(sphinx_base_dir): - return [ - { - "TREQ_ID_200": [ - NeedLink( - file=Path("src/bad_implementation.py"), - line=2, - tag="#" + " req-Id:", - need="TREQ_ID_200", - full_line="#" + " req-Id: TREQ_ID_200", - ) - ] - } - ] - - -def make_source_link(needlinks): - return ", ".join(f"{get_github_link(n)}<>{n.file}:{n.line}" for n in needlinks) - - -def compare_json_files(file1: Path, golden_file: Path): - with open(file1) as f1: - json1 = json.load(f1, object_hook=needlink_test_decoder) - with open(golden_file) as f2: - json2 = json.load(f2, object_hook=needlink_test_decoder) - assert len(json1) == len(json2), ( - f"{file1}'s lenth are not the same as in the golden file lenght. " - f"Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" - ) - c1 = Counter(n for n in json1) - c2 = Counter(n for n in json2) - assert c1 == c2, ( - "Testfile does not have same needs as golden file. " - f"Testfile: {c1}\nGoldenFile: {c2}" - ) - - -def test_source_link_integration_ok( - sphinx_app_setup: Callable[[], SphinxTestApp], - example_source_link_text_all_ok: dict[str, list[str]], - sphinx_base_dir, - git_repo_setup, - create_demo_files, -): - app = sphinx_app_setup() - try: - os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(sphinx_base_dir) - app.build() - ws_root = find_ws_root() - if ws_root is None: - # This should never happen - pytest.fail(f"WS_root is none. WS_root: {ws_root}") - Needs_Data = SphinxNeedsData(app.env) - needs_data = {x["id"]: x for x in Needs_Data.get_needs_view().values()} - compare_json_files( - app.outdir / "score_source_code_linker_cache.json", - sphinx_base_dir / ".golden_file.json", - ) - # Testing TREQ_ID_1 & TREQ_ID_2 - for i in range(1, 3): - assert f"TREQ_ID_{i}" in needs_data - need_as_dict = cast(dict[str, object], needs_data[f"TREQ_ID_{i}"]) - expected_link = make_source_link( - example_source_link_text_all_ok[f"TREQ_ID_{i}"] - ) - # extra_options are only available at runtime - # Compare contents, regardless of order. - actual_source_code_link = cast(list[str], need_as_dict["source_code_link"]) - assert set(expected_link) == set(actual_source_code_link) - finally: - app.cleanup() - - -def test_source_link_integration_non_existent_id( - sphinx_app_setup: Callable[[], SphinxTestApp], - example_source_link_text_non_existent: dict[str, list[str]], - sphinx_base_dir, - git_repo_setup, - create_demo_files, -): - app = sphinx_app_setup() - try: - app.build() - warnings = app.warning.getvalue() - assert ( - "src/bad_implementation.py:2: Could not find TREQ_ID_200 in documentation" - in warnings - ) - finally: - app.cleanup() diff --git a/src/extensions/score_source_code_linker/tests/test_testlink.py b/src/extensions/score_source_code_linker/tests/test_testlink.py new file mode 100644 index 00000000..09e08d25 --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/test_testlink.py @@ -0,0 +1,118 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +import json +from pathlib import Path + +from src.extensions.score_source_code_linker.testlink import ( + DataForTestLink, + DataForTestLink_JSON_Decoder, + DataForTestLink_JSON_Encoder, + DataOfTestCase, + load_test_xml_parsed_json, + store_test_xml_parsed_json, +) + + +def test_testlink_serialization_roundtrip(): + link = DataForTestLink( + name="my_test", + file=Path("some/file.py"), + line=123, + need="REQ_001", + verify_type="fully", + result="passed", + result_text="All good", + ) + dumped = json.dumps(link, cls=DataForTestLink_JSON_Encoder) + loaded = json.loads(dumped, object_hook=DataForTestLink_JSON_Decoder) + + assert isinstance(loaded, DataForTestLink) + assert loaded == link + + +def test_testlink_encoder_handles_path(): + data = {"file": Path("some/thing.py")} + encoded = json.dumps(data, cls=DataForTestLink_JSON_Encoder) + assert '"file": "some/thing.py"' in encoded + + +def test_decoder_ignores_irrelevant_dicts(): + input_data = {"foo": "bar"} + result = DataForTestLink_JSON_Decoder(input_data) + assert result == input_data + + +def test_clean_text_removes_ansi_and_html_unescapes(): + raw = "\x1b[31m<b>Warning</b>\x1b[0m\nExtra line" + cleaned = DataOfTestCase.clean_text(raw) + assert cleaned == "Warning Extra line" + + +def test_testcaseneed_to_dict_multiple_links(): + case = DataOfTestCase( + name="TC_01", + file="src/test.py", + line="10", + result="failed", + TestType="unit", + DerivationTechnique="manual", + result_text="Something went wrong", + PartiallyVerifies="REQ-1, REQ-2", + FullyVerifies="REQ-3", + ) + + links = case.get_test_links() + + assert len(links) == 3 + need_ids = [link.need for link in links] + assert set(need_ids) == {"REQ-1", "REQ-2", "REQ-3"} + + for link in links: + assert link.file == Path("src/test.py") + assert link.line == 10 + assert link.name == "TC_01" + assert link.result == "failed" + + +def test_store_and_load_testlinks_roundtrip(tmp_path): + file = tmp_path / "testlinks.json" + + links = [ + DataForTestLink( + name="L1", + file=Path("abc.py"), + line=1, + need="REQ_A", + verify_type="partially", + result="passed", + result_text="Looks good", + ), + DataForTestLink( + name="L2", + file=Path("def.py"), + line=2, + need="REQ_B", + verify_type="fully", + result="failed", + result_text="Needs work", + ), + ] + + store_test_xml_parsed_json(file, links) + assert file.exists() + + reloaded = load_test_xml_parsed_json(file) + + assert reloaded == links + for link in reloaded: + assert isinstance(link, DataForTestLink) diff --git a/src/extensions/score_source_code_linker/tests/test_xml_parser.py b/src/extensions/score_source_code_linker/tests/test_xml_parser.py new file mode 100644 index 00000000..c87e7947 --- /dev/null +++ b/src/extensions/score_source_code_linker/tests/test_xml_parser.py @@ -0,0 +1,146 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Tests for the xml_parser.py file. +Keep in mind that this is with the 'assertions' inside xml_parser disabled so far. +Once we enable those we will need to change the tests +""" + +import xml.etree.ElementTree as ET +from pathlib import Path +from typing import Any + +import pytest + +import src.extensions.score_source_code_linker.xml_parser as xml_parser +from src.extensions.score_source_code_linker.testlink import DataOfTestCase + + +# Unsure if I should make these last a session or not +@pytest.fixture +def tmp_xml_dirs(tmp_path): + root = tmp_path / "bazel-testlogs" + dir1 = root / "with_props" + dir2 = root / "no_props" + dir1.mkdir(parents=True) + dir2.mkdir(parents=True) + + def write(file_path: Path, testcases: list[ET.Element]): + ts = ET.Element("testsuites") + suite = ET.SubElement(ts, "testsuite") + for tc in testcases: + suite.append(tc) + tree = ET.ElementTree(ts) + tree.write(file_path, encoding="utf-8", xml_declaration=True) + + def make_tc( + name: str, + result: str = "", + props: dict[str, str] = dict(), + file: str = "", + line: int = 0, + ): + tc = ET.Element("testcase", {"name": name}) + if file: + tc.set("file", file) + if line: + tc.set("line", str(line)) + if result == "failed": + ET.SubElement(tc, "failure", {"message": "failmsg"}) + elif result == "skipped": + ET.SubElement(tc, "skipped", {"message": "skipmsg"}) + if props: + props_el = ET.SubElement(tc, "properties") + for k, v in props.items(): + ET.SubElement(props_el, "property", {"name": k, "value": v}) + return tc + + # File with properties + tc1 = make_tc( + "tc_with_props", + result="failed", + props={ + "PartiallyVerifies": "REQ1", + "FullyVerifies": "", + "TestType": "type", + "DerivationTechnique": "tech", + "Description": "desc", + }, + file="path1", + line=10, + ) + write(dir1 / "test.xml", [tc1]) + + # File without properties + # HINT: Once the assertions in xml_parser are back and active, this should allow us to catch that the tests + # Need to be changed too. + tc2 = make_tc("tc_no_props", file="path2", line=20) + write(dir2 / "test.xml", [tc2]) + + return root, dir1, dir2 + + +def test_find_xml_files(tmp_xml_dirs): + root, dir1, dir2 = tmp_xml_dirs + found = xml_parser.find_xml_files(root) + expected = {dir1 / "test.xml", dir2 / "test.xml"} + assert set(found) == expected + + +def test_parse_testcase_result(): + tc = ET.Element("testcase", {"name": "a"}) + assert xml_parser.parse_testcase_result(tc) == ("passed", "") + + tc2 = ET.Element("testcase", {"name": "b", "status": "notrun"}) + assert xml_parser.parse_testcase_result(tc2) == ("disabled", "") + + tc3 = ET.Element("testcase", {"name": "c"}) + ET.SubElement(tc3, "failure", {"message": "err"}) + assert xml_parser.parse_testcase_result(tc3) == ("failed", "err") + + tc4 = ET.Element("testcase", {"name": "d"}) + ET.SubElement(tc4, "skipped", {"message": "skp"}) + assert xml_parser.parse_testcase_result(tc4) == ("skipped", "skp") + + +def test_parse_properties(): + cp: dict[str, Any] = {} + props_el = ET.Element("properties") + ET.SubElement(props_el, "property", {"name": "A", "value": "1"}) + ET.SubElement(props_el, "property", {"name": "Description", "value": "ignored"}) + res = xml_parser.parse_properties(cp, props_el) + assert res["A"] == "1" + assert "Description" not in res + + +def test_read_test_xml_file(tmp_xml_dirs): + root, dir1, dir2 = tmp_xml_dirs + + needs1, no_props1 = xml_parser.read_test_xml_file(dir1 / "test.xml") + assert isinstance(needs1, list) and len(needs1) == 1 + tcneed = needs1[0] + assert isinstance(tcneed, DataOfTestCase) + assert tcneed.result == "failed" + assert no_props1 == [] + + needs2, no_props2 = xml_parser.read_test_xml_file(dir2 / "test.xml") + assert needs2 == [] + assert no_props2 == ["tc_no_props"] + + +def test_short_hash_consistency_and_format(): + h1 = xml_parser.short_hash("foo") + h2 = xml_parser.short_hash("foo") + assert h1 == h2 + assert h1.isalpha() + assert len(h1) == 5 diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py new file mode 100644 index 00000000..9ef65bc4 --- /dev/null +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -0,0 +1,241 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +This file deals with finding and parsing of test.xml files that get created during `bazel test`. +It also generates external needs out of the parsed testcases to enable linking to requirements &gathering statistics +""" + +import contextlib +import base64 +import hashlib +import itertools +import os +import xml.etree.ElementTree as ET +from pathlib import Path +from typing import Any +from xml.etree.ElementTree import Element + +from sphinx.application import Sphinx +from sphinx.environment import BuildEnvironment +from sphinx_needs import logging +from sphinx_needs.api import add_external_need + +from src.extensions.score_source_code_linker.testlink import ( + DataOfTestCase, + store_data_of_test_case_json, + store_test_xml_parsed_json, +) +from src.helper_lib import find_ws_root +from src.helper_lib.additional_functions import get_github_link + +logger = logging.get_logger(__name__) +logger.setLevel("DEBUG") + + +def parse_testcase_result(testcase: ET.Element) -> tuple[str, str]: + """ + Returns 'result' and 'result_text' found in the 'message' + attribute of the result. + Example: + => + + Returns: + ("skipped", "Test skip message") + """ + skipped = testcase.find("skipped") + failed = testcase.find("failure") + status = testcase.get("status") + # NOTE: Special CPP case of 'disabled' + if status is not None and status == "notrun": + return "disabled", "" + if skipped is None and failed is None: + return "passed", "" + if failed is not None: + return "failed", failed.get("message", "") + if skipped is not None: + return "skipped", skipped.get("message", "") + # TODO: Test all possible permuations of this to find if this is unreachable + raise ValueError( + f"Testcase: {testcase.get('name')}. Did not find 'failed', 'skipped' or 'passed' in test" + ) + + +def parse_properties(case_properties: dict[str, Any], properties: Element): + for prop in properties: + prop_name = prop.get("name", "") + prop_value = prop.get("value", "") + # We ignore the Description of the test as a 'property'. + # Every language just needs to ensure each test does have a description. No matter where this resides. + if prop_name == "Description": + continue + case_properties[prop_name] = prop_value + return case_properties + + +def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str]]: + """ + Reading & parsing the test.xml files into TestCaseNeeds + + Returns: + tuple consisting of: + - list[TestCaseNeed] + - list[str] => Testcase Names that did not have the required properties. + """ + test_case_needs: list[DataOfTestCase] = [] + non_prop_tests: list[str] = [] + tree = ET.parse(file) + root = tree.getroot() + + for testsuite in root.findall("testsuite"): + for testcase in testsuite.findall("testcase"): + case_properties = {} + testname = testcase.get("name") + assert testname is not None, ( + f"Testcase: {testcase} does not have a 'name' attribute. This is mandatory. This should not happen, something is wrong." + ) + test_file = testcase.get("file") + line = testcase.get("line") + + # ╭──────────────────────────────────────╮ + # │ Assert worldview that mandatory │ + # │ things are actually there │ + # │ Disabled temporarily │ + # ╰──────────────────────────────────────╯ + + # assert test_file is not None, ( + # f"Testcase: {testname} does not have a 'file' attribute. This is mandatory" + # ) + # assert lineNr is not None, ( + # f"Testcase: {testname} located in {test_file} does not have a 'lineNr' attribute. This is mandator" + # ) + case_properties["name"] = testname + case_properties["file"] = test_file + case_properties["line"] = line + case_properties["result"], case_properties["result_text"] = ( + parse_testcase_result(testcase) + ) + + properties_element = testcase.find("properties") + # HINT: This list is hard coded here, might not be ideal to have that in the long run. + if properties_element is None: + non_prop_tests.append(testname) + continue + + # ╓ ╖ + # ║ Disabled Temporarily ║ + # ╙ ╜ + # assert properties_element is not None, ( + # f"Testcase: {testname} located in {test_file}:{lineNr}, does not have any properties. Properties 'TestType', 'DerivationTechnique' and either 'PartiallyVerifies' or 'FullyVerifies' are mandatory." + # ) + + case_properties = parse_properties(case_properties, properties_element) + test_case_needs.append(DataOfTestCase.from_dict(case_properties)) + return test_case_needs, non_prop_tests + + +def find_xml_files(dir: Path) -> list[Path]: + """ + Recursively search all test.xml files inside 'bazel-testlogs' + + Returns: + - list[Path] => Paths to all found 'test.xml' files. + """ + + test_file_name = "test.xml" + + xml_paths: list[Path] = [] + for root, _, files in os.walk(dir): + if test_file_name in files: + xml_paths.append(Path(os.path.join(root, test_file_name))) + return xml_paths + + +def run_xml_parser(app: Sphinx, env: BuildEnvironment): + """ + This is the 'main' function for parsing test.xml's and + building testcase needs. + It gets called from the source_code_linker __init__ + """ + ws_root = find_ws_root() + assert ws_root is not None + bazel_testlogs = ws_root / "bazel-testlogs" + xml_file_paths = find_xml_files(bazel_testlogs) + test_case_needs = build_test_needs_from_files(app, env, xml_file_paths) + # Saving the test case needs for cache + store_data_of_test_case_json( + app.outdir / "score_testcaseneeds_cache.json", test_case_needs + ) + output = list( + itertools.chain.from_iterable(tcn.get_test_links() for tcn in test_case_needs) + ) + # This is not ideal, due to duplication, but I can't think of a better solution right now + store_test_xml_parsed_json(app.outdir / "score_xml_parser_cache.json", output) + + +def build_test_needs_from_files( + app: Sphinx, env: BuildEnvironment, xml_paths: list[Path] +) -> list[DataOfTestCase]: + """ + Reading in all test.xml files, and building 'testcase' external need objects out of them. + + Returns: + - list[TestCaseNeed] + """ + tcns: list[DataOfTestCase] = [] + for f in xml_paths: + b, z = read_test_xml_file(f) + for non_prop_test in z: + # We probably do not want to do this as a warning yet + logger.info( + f"Test: {non_prop_test} has no properties. Could not create need" + ) + # Now we build the needs from it + tcns.extend(b) + for c in b: + construct_and_add_need(app, c) + return tcns + + +def short_hash(input_str: str, length: int = 5) -> str: + # Get a stable hash + sha256 = hashlib.sha256(input_str.encode()).digest() + # Encode to base32 (A-Z + 2-7), decode to str, remove padding + b32 = base64.b32encode(sha256).decode("utf-8").rstrip("=") + # Keep only alphabetic characters + letters_only = "".join(filter(str.isalpha, b32)) + # Return the first `length` letters + return letters_only[:length].lower() + + +def construct_and_add_need(app: Sphinx, tn: DataOfTestCase): + # IDK if this is ideal or not + with contextlib.suppress(BaseException): + _ = add_external_need( + app=app, + need_type="testcase", + title=tn.name, + tags="TEST", + id=f"testcase__{tn.name}_{short_hash(tn.file + tn.name).upper()}", + name=tn.name, + external_url=get_github_link(tn), + fully_verifies=tn.FullyVerifies if tn.FullyVerifies is not None else "", + partially_verifies=tn.PartiallyVerifies + if tn.PartiallyVerifies is not None + else "", + test_type=tn.TestType, + derivation_technique=tn.DerivationTechnique, + file=tn.file, + line=tn.line, + result=tn.result, # We just want the 'failed' or whatever + result_text=tn.result_text if tn.result_text else "", + ) diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD index 8e0e16b2..ac51fc2f 100644 --- a/src/helper_lib/BUILD +++ b/src/helper_lib/BUILD @@ -16,9 +16,13 @@ load("@score_python_basics//:defs.bzl", "score_py_pytest") py_library( name = "helper_lib", - srcs = ["__init__.py"], + srcs = [ + "__init__.py", + "additional_functions.py", + ], imports = ["."], visibility = ["//visibility:public"], + deps = ["@score_docs_as_code//src/extensions/score_source_code_linker:source_code_linker_helpers"], ) score_py_pytest( diff --git a/src/helper_lib/additional_functions.py b/src/helper_lib/additional_functions.py new file mode 100644 index 00000000..5b1ce6d9 --- /dev/null +++ b/src/helper_lib/additional_functions.py @@ -0,0 +1,38 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +from pathlib import Path + +# Import types that depend on score_source_code_linker +from src.extensions.score_source_code_linker.needlinks import DefaultNeedLink, NeedLink +from src.extensions.score_source_code_linker.testlink import ( + DataForTestLink, + DataOfTestCase, +) +from src.helper_lib import ( + find_git_root, + get_current_git_hash, + get_github_base_url, +) + + +def get_github_link( + link: NeedLink | DataForTestLink | DataOfTestCase | None = None, +) -> str: + if link is None: + link = DefaultNeedLink() + passed_git_root = find_git_root() + if passed_git_root is None: + passed_git_root = Path() + base_url = get_github_base_url() + current_hash = get_current_git_hash(passed_git_root) + return f"{base_url}/blob/{current_hash}/{link.file}#L{link.line}" diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index 36ce3365..ef1dbda8 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -23,8 +23,7 @@ from rich.console import Console from rich.table import Table -from src.extensions.score_source_code_linker import get_github_base_url -from src.helper_lib import find_git_root +from src.helper_lib import find_git_root, get_github_base_url """ This script's main usecase is to test consumers of Docs-As-Code with From aae98cab79ae41fd7109746afabb3083980e44df Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Mon, 18 Aug 2025 15:03:31 +0200 Subject: [PATCH 107/231] Fix new needs weak content not detected (#214) * Fix and simplify complexity for weak content check * handle edge cases to remove matching between "something" and "thing" * Correct PR #211 remaining comments --- docs/requirements/requirements.rst | 4 ++-- .../score_draw_uml_funcs/__init__.py | 13 ++++++++--- .../checks/attributes_format.py | 15 +++++++------ .../score_metamodel/checks/check_options.py | 2 ++ .../rst/attributes/test_prohibited_words.rst | 22 +++++++++++++++++-- .../tests/test_check_options.py | 20 +++++++++++++++++ src/tests/test_consumer.py | 4 ++-- 7 files changed, 64 insertions(+), 16 deletions(-) diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 7fe086be..26bb3e85 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -132,7 +132,7 @@ This section provides an overview of current process requirements and their clar :parent_covered: YES Docs-as-Code shall enforce that requirement descriptions do not contain the following weak words: - just, about, really, some, thing, absol-utely + ju-st, ab-out, rea-lly, so-me, th-ing, absol-utely This rule applies to: @@ -457,7 +457,7 @@ Mapping ================================ =========================== .. note:: - Some tool requirements do not have a matching process requirement. + Certain tool requirements do not have a matching process requirement. .. tool_req:: Safety: enforce safe linking :id: tool_req__docs_common_attr_safety_link_check diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index 96e0b93c..da29e6cc 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -47,6 +47,15 @@ from sphinx.application import Sphinx from sphinx_needs.logging import get_logger +CollectResult = tuple[ + str, # structure_text + str, # link_text + dict[str, str], # proc_impl_interfaces + dict[str, list[str]], # proc_used_interfaces + dict[str, str], # impl_comp + list[str], # proc_modules +] + logger = get_logger(__file__) @@ -399,9 +408,7 @@ def _collect_interfaces_and_modules( proc_used_interfaces: dict[str, list[str]], structure_text: str, link_text: str, - ) -> tuple[ - str, str, dict[str, str], dict[str, list[str]], dict[str, str], list[str] - ]: + ) -> CollectResult: """Process interfaces and load modules for implementation.""" for iface in interfacelist: if all_needs.get(iface): diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index ebf07157..95c75ec2 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -11,6 +11,8 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +import string + from score_metamodel import CheckLogger, ProhibitedWordCheck, ScoreNeedType, local_check from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType @@ -70,11 +72,9 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): if parts[1] == "example_feature": max_lenght += 17 # _example_feature_ if len(need["id"]) > max_lenght: - length = 0 - if "example_feature" not in need["id"]: - length = len(need["id"]) - else: - length = len(need["id"]) - 17 + length = len(need["id"]) + if "example_feature" in need["id"]: + length -= 17 msg = ( f"exceeds the maximum allowed length of 45 characters " "(current length: " @@ -92,9 +92,10 @@ def _check_options_for_prohibited_words( for option in options: forbidden_words = prohibited_word_checks.option_check[option] for word in need[option].split(): - if word in forbidden_words: + normalized = word.strip(string.punctuation).lower() + if normalized in forbidden_words: msg = ( - f"contains a weak word: `{word}` in option: `{option}`. " + f"contains a weak word: `{normalized}` in option: `{option}`. " "Please revise the wording." ) log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 1df4da0f..f715ea2b 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -88,6 +88,8 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: need, f"is missing required {field_type}: `{field}`." ) continue # Skip empty optional fields + # Try except used to add more context to Error without passing variables + # just for that to function try: values = _normalize_values(raw_value) except ValueError as err: diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst index 0c7af6a0..240de822 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst @@ -30,7 +30,7 @@ -.. Title of an architecture element contains a stop word +.. Title of an architecture element contains a stop word #EXPECT: stkh_req__test_title_bad: contains a weak word: `must` in option: `title`. Please revise the wording. .. stkh_req:: This must work @@ -40,7 +40,7 @@ #EXPECT-NOT: stkh_req__test_title_good: contains a weak word: `must` in option: `title`. Please revise the wording. -.. stkh_req:: This is a teset +.. stkh_req:: This is a test :id: stkh_req__test_title_good @@ -73,3 +73,21 @@ :id: feat_arc_sta_desc_good This should really work + + +#EXPECT: tool_req__docs_common_attr_desc_wording: contains a weak word: `just` in option: `content`. Please revise the wording. + +.. tool_req:: Enforces description wording rules + :id: tool_req__docs_common_attr_desc_wording + :tags: Common Attributes + :implemented: YES + :satisfies: + PROCESS_gd_req__req_desc_weak, + :parent_covered: YES + + Docs-as-Code shall enforce that requirement descriptions do not contain the following weak words: + just, about, really, some, thing, absolut-ely + + This rule applies to: + + * all requirement types defined in :need:`tool_req__docs_req_types`, except process requirements. diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 438105a6..85ba06dd 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -199,3 +199,23 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): "has these extra options: `other_option`.", expect_location=False, ) + + def test_invalid_option_value_type_raises_value_error(self): + # Given a need with an option of wrong type (list with non-str) + need_1 = need( + target_id="tool_req__002", + id="tool_req__002", + type="tool_req", + some_required_option=123, + docname=None, + lineno=None, + ) + + logger = fake_check_logger() + app = Mock(spec=Sphinx) + app.config = Mock() + app.config.needs_types = self.NEED_TYPE_INFO + app.config.allowed_external_prefixes = [] + + with pytest.raises(ValueError, match="Only Strings are allowed"): + check_options(app, need_1, logger) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index ef1dbda8..db315e05 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -477,8 +477,8 @@ def run_test_commands(): def setup_test_environment(sphinx_base_dir, pytestconfig): """Set up the test environment and return necessary paths and metadata.""" git_root = find_git_root() - if git_root is None: - raise RuntimeError("Git root was not found") + + assert git_root is None, "Git root was not found" gh_url = get_github_base_url() current_hash = get_current_git_commit(git_root) From e48a1fc1acb0142380502d9ed5eff5588a68d397 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Mon, 18 Aug 2025 16:59:53 +0200 Subject: [PATCH 108/231] Followup PR to integrate nits of #207 (#212) * Followup PR to integrate nits of #207 --- .../tests/test_codelink.py | 138 -------------- src/helper_lib/__init__.py | 7 +- src/helper_lib/test_helper_lib.py | 172 +++++++++++++++++- 3 files changed, 176 insertions(+), 141 deletions(-) diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index 633c0125..03730217 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -124,75 +124,6 @@ def git_repo(temp_dir): return git_dir -@pytest.fixture -def git_repo_with_https_remote(temp_dir): - """Create a git repository with HTTPS remote for testing.""" - git_dir = temp_dir / "test_repo_https" - git_dir.mkdir() - - # Initialize git repo - subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) - subprocess.run( - ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True - ) - subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) - - # Create a test file and commit - test_file = git_dir / "test_file.py" - test_file.write_text("# Test file\nprint('hello')\n") - subprocess.run(["git", "add", "."], cwd=git_dir, check=True) - subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) - - # Add HTTPS remote - subprocess.run( - [ - "git", - "remote", - "add", - "origin", - "https://github.com/test-user/test-repo.git", - ], - cwd=git_dir, - check=True, - ) - - return git_dir - - -@pytest.fixture -def git_repo_multiple_remotes(temp_dir): - """Create a git repository with multiple remotes for testing.""" - git_dir = temp_dir / "test_repo_multiple" - git_dir.mkdir() - - # Initialize git repo - subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) - subprocess.run( - ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True - ) - subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) - - # Create a test file and commit - test_file = git_dir / "test_file.py" - test_file.write_text("# Test file\nprint('hello')\n") - subprocess.run(["git", "add", "."], cwd=git_dir, check=True) - subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) - - # Add multiple remotes - subprocess.run( - ["git", "remote", "add", "upstream", "git@github.com:upstream/test-repo.git"], - cwd=git_dir, - check=True, - ) - subprocess.run( - ["git", "remote", "add", "origin", "git@github.com:test-user/test-repo.git"], - cwd=git_dir, - check=True, - ) - - return git_dir - - @pytest.fixture def sample_needlinks(): """Create sample NeedLink objects for testing.""" @@ -354,75 +285,6 @@ def test_group_by_need_empty_list(): assert len(result) == 0 -# Test git-related functions -def test_parse_git_output_ssh_format(): - """Test parsing git remote output in SSH format.""" - git_line = "origin git@github.com:test-user/test-repo.git (fetch)" - result = parse_remote_git_output(git_line) - assert result == "test-user/test-repo" - - -def test_parse_git_output_https_format(): - """Test parsing git remote output in HTTPS format.""" - git_line = "origin https://github.com/test-user/test-repo.git (fetch)" - result = parse_remote_git_output(git_line) - assert result == "test-user/test-repo" - - -def test_parse_git_output_ssh_format_without_git_suffix(): - """Test parsing git remote output in SSH format without .git suffix.""" - git_line = "origin git@github.com:test-user/test-repo (fetch)" - result = parse_remote_git_output(git_line) - assert result == "test-user/test-repo" - - -def test_parse_git_output_invalid_format(): - """Test parsing invalid git remote output.""" - git_line = "invalid" - result = parse_remote_git_output(git_line) - assert result == "" - - -def test_parse_git_output_empty_string(): - """Test parsing empty git remote output.""" - git_line = "" - result = parse_remote_git_output(git_line) - assert result == "" - - -def test_get_github_repo_info_ssh_remote(git_repo): - """Test getting GitHub repository information with SSH remote.""" - result = get_github_repo_info(git_repo) - assert result == "test-user/test-repo" - - -def test_get_github_repo_info_https_remote(git_repo_with_https_remote): - """Test getting GitHub repository information with HTTPS remote.""" - result = get_github_repo_info(git_repo_with_https_remote) - assert result == "test-user/test-repo" - - -def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes): - """Test GitHub repo info retrieval with multiple remotes (origin preferred).""" - result = get_github_repo_info(git_repo_multiple_remotes) - assert result == "test-user/test-repo" - - -def test_get_current_git_hash(git_repo): - """Test getting current git hash.""" - result = get_current_git_hash(git_repo) - - # Verify it's a valid git hash (40 hex characters) - assert len(result) == 40 - assert all(c in "0123456789abcdef" for c in result) - - -def test_get_current_git_hash_invalid_repo(temp_dir): - """Test getting git hash from invalid repository.""" - with pytest.raises(subprocess.CalledProcessError): - get_current_git_hash(temp_dir) - - def test_get_github_link_with_real_repo(git_repo): """Test generating GitHub link with real repository.""" # Create a needlink diff --git a/src/helper_lib/__init__.py b/src/helper_lib/__init__.py index 08d366ef..ff201eae 100644 --- a/src/helper_lib/__init__.py +++ b/src/helper_lib/__init__.py @@ -75,7 +75,7 @@ def parse_remote_git_output(str_line: str) -> str: def get_github_repo_info(git_root_cwd: Path) -> str: """ - Extract GitHub repository info from git remotes. + Query git for the github remote repository (based on heuristic). Execution context behavior: - Works consistently across all contexts when given valid git directory @@ -154,5 +154,8 @@ def get_current_git_hash(git_root: Path) -> str: assert all(c in "0123456789abcdef" for c in decoded_result) return decoded_result except Exception as e: - LOGGER.warning(f"Unexpected error: {git_root}", exc_info=e) + LOGGER.warning( + f"Unexpected error while trying to get git_hash. Exceuted in: {git_root}", + exc_info=e, + ) raise diff --git a/src/helper_lib/test_helper_lib.py b/src/helper_lib/test_helper_lib.py index e3ca45d4..d1d0a2f8 100644 --- a/src/helper_lib/test_helper_lib.py +++ b/src/helper_lib/test_helper_lib.py @@ -17,7 +17,11 @@ import pytest -from src.helper_lib import get_current_git_hash, get_github_repo_info +from src.helper_lib import ( + get_current_git_hash, + get_github_repo_info, + parse_remote_git_output, +) @pytest.fixture @@ -27,6 +31,103 @@ def temp_dir(): yield Path(temp_dir) +@pytest.fixture +def git_repo(temp_dir): + """Create a real git repository for testing.""" + git_dir = temp_dir / "test_repo" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + + # Add a remote + subprocess.run( + ["git", "remote", "add", "origin", "git@github.com:test-user/test-repo.git"], + cwd=git_dir, + check=True, + ) + return git_dir + + +@pytest.fixture +def git_repo_multiple_remotes(temp_dir): + """Create a git repository with multiple remotes for testing.""" + git_dir = temp_dir / "test_repo_multiple" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + + # Add multiple remotes + subprocess.run( + ["git", "remote", "add", "upstream", "git@github.com:upstream/test-repo.git"], + cwd=git_dir, + check=True, + ) + subprocess.run( + ["git", "remote", "add", "origin", "git@github.com:test-user/test-repo.git"], + cwd=git_dir, + check=True, + ) + + return git_dir + + +@pytest.fixture +def git_repo_with_https_remote(temp_dir): + """Create a git repository with HTTPS remote for testing.""" + git_dir = temp_dir / "test_repo_https" + git_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=git_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=git_dir, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) + + # Create a test file and commit + test_file = git_dir / "test_file.py" + test_file.write_text("# Test file\nprint('hello')\n") + subprocess.run(["git", "add", "."], cwd=git_dir, check=True) + subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) + + # Add HTTPS remote + subprocess.run( + [ + "git", + "remote", + "add", + "origin", + "https://github.com/test-user/test-repo.git", + ], + cwd=git_dir, + check=True, + ) + + return git_dir + + # Test error handling def test_git_operations_with_no_commits(temp_dir): """Test git operations on repo with no commits.""" @@ -68,3 +169,72 @@ def test_git_repo_with_no_remotes(temp_dir): # Should raise an exception when trying to get repo info with pytest.raises(AssertionError): get_github_repo_info(git_dir) + + +# Test git-related functions +def test_parse_git_output_ssh_format(): + """Test parsing git remote output in SSH format.""" + git_line = "origin git@github.com:test-user/test-repo.git (fetch)" + result = parse_remote_git_output(git_line) + assert result == "test-user/test-repo" + + +def test_parse_git_output_https_format(): + """Test parsing git remote output in HTTPS format.""" + git_line = "origin https://github.com/test-user/test-repo.git (fetch)" + result = parse_remote_git_output(git_line) + assert result == "test-user/test-repo" + + +def test_parse_git_output_ssh_format_without_git_suffix(): + """Test parsing git remote output in SSH format without .git suffix.""" + git_line = "origin git@github.com:test-user/test-repo (fetch)" + result = parse_remote_git_output(git_line) + assert result == "test-user/test-repo" + + +def test_parse_git_output_invalid_format(): + """Test parsing invalid git remote output.""" + git_line = "invalid" + result = parse_remote_git_output(git_line) + assert result == "" + + +def test_parse_git_output_empty_string(): + """Test parsing empty git remote output.""" + git_line = "" + result = parse_remote_git_output(git_line) + assert result == "" + + +def test_get_github_repo_info_ssh_remote(git_repo): + """Test getting GitHub repository information with SSH remote.""" + result = get_github_repo_info(git_repo) + assert result == "test-user/test-repo" + + +def test_get_github_repo_info_https_remote(git_repo_with_https_remote): + """Test getting GitHub repository information with HTTPS remote.""" + result = get_github_repo_info(git_repo_with_https_remote) + assert result == "test-user/test-repo" + + +def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes): + """Test GitHub repo info retrieval with multiple remotes (origin preferred).""" + result = get_github_repo_info(git_repo_multiple_remotes) + assert result == "test-user/test-repo" + + +def test_get_current_git_hash(git_repo): + """Test getting current git hash.""" + result = get_current_git_hash(git_repo) + + # Verify it's a valid git hash (40 hex characters) + assert len(result) == 40 + assert all(c in "0123456789abcdef" for c in result) + + +def test_get_current_git_hash_invalid_repo(temp_dir): + """Test getting git hash from invalid repository.""" + with pytest.raises(Exception): + get_current_git_hash(temp_dir) From 49bde21eef407413b13a40d1fef20996e7b56d01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 19 Aug 2025 23:47:21 +0200 Subject: [PATCH 109/231] Update Metamodel README (#217) --- docs/internals/extensions/metamodel.md | 240 +++++++++++++++++++++---- 1 file changed, 209 insertions(+), 31 deletions(-) diff --git a/docs/internals/extensions/metamodel.md b/docs/internals/extensions/metamodel.md index 055f0a4a..c14a91dc 100644 --- a/docs/internals/extensions/metamodel.md +++ b/docs/internals/extensions/metamodel.md @@ -1,30 +1,40 @@ (metamodel)= # score_metamodel -This extension provides the Metamodel and corresponding checks of the SCORE -project as a Sphinx extension. +The `score_metamodel` extension is a core extension/component of the Docs-As-Code. +It provides metamodel definitions, validation checks, and project layout management for Sphinx documentation. -## Naming +## Overview -* check: A check is a function that checks compliance to a specific rule. - (Note: sphinx-needs calls this a 'warning') -* Need-Local checks: checks which can be checked file-local, without a graph of - other needs. -* Graph-Based checks: These warnings require a graph of all other needs to be - checked. +This extension serves multiple critical functions: -## Creating new checks +- **Metamodel Definition**: Houses the project's metamodel schema and configuration +- **Validation System**: Implements comprehensive checks to ensure documentation compliance +- **External Needs Management**: Imports external needs from dependencies +- **Project Layout**: Manages the rendering, look and feel of documentation +- **Integration Testing**: Provides RST-based tests to validate metamodel behavior -In order to create a new check, you need to create a file in the `checks` -directory. It will be picked up automatically. -Then you need to write a local or graph based check function. -You need to use @local_check or @graph_check decorators to mark your function -accordingly. -Have a look at a simple example like `id_contains_feature`. +## Core Components -## Usage +### Metamodel Definition +The extension contains: +- `metamodel.yaml`: The main metamodel definition +- `metamodel-schema.json`: JSON schema for validation +- Setting configuration parameters based on input that get passed on to sphinx-needs -Add score_metamodel to your extensions in `conf.py`: +### Validation System +The extension implements a multi-tier checking system: + +**Local Checks**: Validate individual needs using only their own data +- Run faster as they don't require the full needs graph +- Examples: ID format validation, prohibited words, attribute formatting + +**Graph-Based Checks**: Validate needs in the context of their relationships +- Require access to the complete needs graph +- Examples: Link validation, dependency checking, cross-reference verification + +This extension comes with Docs-As-Code. +Add `score_metamodel` to your extensions in `conf.py`: ```python extensions = [ @@ -34,22 +44,190 @@ extensions = [ ] ``` -Make sure score_metamodel is installed in your environment or added to your -sys.path. +## Creating New Validation Checks + +The extension automatically discovers checks from the `checks/` directory and the metamodel.yaml config. There are several types of checks you can implement: + +### 1. Need-Local Checks (Configuration-Based) +These checks validate individual needs using regex patterns. They're defined in `metamodel.yaml` and are the easiest to create. + +All definitions are parsed as regex and evaluated as such, keep that in mind. +They can be found inside the metamodel.yml and are how we define needs. See an example here: + +```yaml +needs_types: + gd_req: + title: Process Requirements + prefix: gd_req__ + # All options&links defined below are checked if they follow the defined regex in the need itself + mandatory_options: + id: ^gd_req__[0-9a-z_]*$ + status: ^(valid|draft)$ + content: ^[\s\S]+$ # multiline non empty matching + optional_links: + satisfies: ^wf__.*$ + complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ #can only be one of these + tags: + - requirement # grouping of similar needs together in order to make some other checks easier to write + parts: 2 # has to have exactly one `__` inside the ID +``` + +### 2. Generic Graph Checks (Configuration-Based) +Generic graph checks are defined in the metamodel.yaml under `graph_checks`. +These checks all follow the same structure: + +```yaml +: + needs: + include: , #list of your needs + condition: + check: + : + explanation: +``` + +> *Note:* You can also use multiple conditions or negate conditions in either the needs or check part. + +A complete example might look like so: + +```yaml +graph_checks: + tool_req__docs_req_arch_link_safety_to_arch: + needs: + include: feat_arc_sta, logic_arc_int, logic_arc_int_op, comp_arc_sta, real_arc_int, real_arc_int_op + condition: + and: + - safety != QM + - status == valid + check: + implements: + and: + - safety != QM + - status == valid + explanation: An safety architecture element can only link other safety architecture elements. +``` + +What does this check do? +This check will go through each of the needs mentioned in 'include' that match the condition, and then for every single one of them check the needs that are linked inside the 'implements' attribute. Go inside those needs and check if they also fulfill the condition described. +If one of them does not fulfill the condition the check fails and will let you know with a warning that it did so. + +### 3. Prohibited Word Checks (Configuration-Based) +For preventing specific words for specific needs in certain attributes. +This is also defined in metamodel and follows the following schema: + +```yaml +prohibited_words_checks: + : + types[OPTIONAL]: # If you skip this, all needs will be checked. + - < you can specify here that only needs with this tag will get checked for this check> + : + - < word to forbid > + - < word to forbid > +``` + +An example might look like this: +```yaml +prohibited_words_checks: + content_check: + types: + - requirement_excl_process + content: + - just + - about + - really + - some + - thing + - absolutely +``` + +For all needs that have the `tag` 'requirement_excl_process' **inside the metamodel.yaml** this check will now verify that the `content` or the `description` does not contain any of the mentioned words in the list. + +### 4. Custom Local Checks (Python Code) +If you need something that the generic local or graph checks can not fulfill, then you can also add a custom check. +Ensure this check is inside a python file that is placed in the `check` folder in this extension. +Do not forget to add the applicable decorator to the function. + +This means all validations can be done with only the information in this need itself, and you do not need access to any of the linked needs or other needs inside the documentation. + +Your function will receive the Sphinx `app`, the current `need` to check and a `log` to log messages. + +```python +from score_metamodel.checks import local_check + +@local_check +def my_local_check(app, need, log): + # Validate individual need properties + # Example: If option_a == '2' then option_b is required to be not empty + pass +``` + +> Check existing files in the `checks/` folder for real examples. + +### 5. Custom Graph Checks (Python Code) +These checks need to access linked needs in order to fully verify the specified behavior. +The signature is similar to that of local_check, but instead of one need you will get `all_needs`. + +```python +from score_metamodel.checks import graph_check + +@graph_check +def my_custom_graph_check(app, all_needs, log): + # Complex validation with full graph access + # Example: if option_a == '2' then each linked requirement needs to also have option_a == '2' + pass +``` + +> Check existing files in the `checks/` folder for real examples. + +## File Structure Reference + +``` +score_metamodel/ +├── BUILD +├── __init__.py +├── checks +│ ├── __init__.py +│ ├── attributes_format.py +│ ├── check_options.py +│ ├── graph_checks.py +│ ├── id_contains_feature.py +│ └── standards.py +├── external_needs.py +├── log.py +├── metamodel-schema.json +├── metamodel.yaml +└── tests + ├── __init__.py + ├── rst + │ ├── attributes + │ │ └── ... + │ ├── conf.py + │ ├── graph + │ │ └── test_metamodel_graph.rst + │ ├── id_contains_feature + │ │ └── test_id_contains_feature.rst + │ └── options + │ └── test_options_options.rst + └── ... +``` + +## Testing Your Changes +The extension is setup for comprehensive testing: -## Decision Record: split of need-local and graph-based checks +To add tests for new checks: +- **Unit Tests**: Test individual additions, if they are not covered by the present unit-tests +- **Integration Tests**: Add RST-based tests in `tests/rst/` that validate the metamodel in realistic scenarios. Make sure to have at least one good and one bad case. -While sphinx-needs' own check/warning mechanism is very powerful, it only works -for need-local checks. This means, that it is not possible to warn about -graph-based issues like wrong links etc. +1. Add unit tests in the appropriate `test_*.py` file +2. Create RST integration tests in `tests/rst/` to verify behavior in Sphinx. Ensure thematically it's in the right folder -There are multiple ways to solve this issue, for example via -https://github.com/useblocks/sphinx-needs/pull/1248 +## Architecture Decision: Local vs Graph-Based Checks -However we chose to implement a custom warning mechanism, as we needed a -more elaborate solution for our use case anyway. Calling the checks ourselves -seems to be the most flexible solution. +While sphinx-needs provides a powerful warning mechanism, it's limited to need-local checks. This extension implements a custom multi-tier system because: -Technically local checks will be called with a single need, while graph-based -checks will be called with all needs. +- **Performance**: Local checks run faster and can provide immediate feedback +- **Flexibility**: Graph-based checks enable complex relationship validation +- **Control**: Custom implementation allows for more elaborate validation logic +- **Future-proofing**: Provides foundation for advanced validation features From f69b9c028cc438880eb37b6fb0230670855f80bc Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Wed, 20 Aug 2025 09:41:33 +0200 Subject: [PATCH 110/231] Improve new checks logs visibility (#215) * Improve new checks logs * Change implementation to display all new checks warnings separately in the end --- src/extensions/score_metamodel/__init__.py | 10 ++-- src/extensions/score_metamodel/log.py | 60 ++++++++++++++++------ 2 files changed, 50 insertions(+), 20 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index c6e57680..c045192d 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -141,12 +141,14 @@ def is_check_enabled(check: local_check_function | graph_check_function): check(app, needs_all_needs, log) if log.has_warnings: - log.warning("Some needs have issues. See the log for more information.") + logger.warning("Some needs have issues. See the log for more information.") if log.has_infos: - log.info( - "Some needs have issues related to the new checks. " - "See the log for more information." + log.flush_new_checks() + logger.info( + "\n\nThese next warnings are displayed as info statements for now. " + "They will become real warnings in the future. " + "Please fix them as soon as possible.\n" ) # TODO: exit code diff --git a/src/extensions/score_metamodel/log.py b/src/extensions/score_metamodel/log.py index 5f61c9ed..8c101cbb 100644 --- a/src/extensions/score_metamodel/log.py +++ b/src/extensions/score_metamodel/log.py @@ -14,9 +14,14 @@ from typing import Any from docutils.nodes import Node +from sphinx_needs import logging from sphinx_needs.data import NeedsInfoType from sphinx_needs.logging import SphinxLoggerAdapter +Location = str | tuple[str | None, int | None] | Node | None +NewCheck = tuple[str, Location] +logger = logging.get_logger(__name__) + class CheckLogger: def __init__(self, log: SphinxLoggerAdapter, prefix: str): @@ -24,6 +29,7 @@ def __init__(self, log: SphinxLoggerAdapter, prefix: str): self._info_count = 0 self._warning_count = 0 self._prefix = prefix + self._new_checks: list[NewCheck] = [] @staticmethod def _location(need: NeedsInfoType, prefix: str): @@ -43,47 +49,45 @@ def get(key: str) -> Any: return None def warning_for_option( - self, need: NeedsInfoType, option: str, msg: str, new_check: bool = False + self, need: NeedsInfoType, option: str, msg: str, is_new_check: bool = False ): full_msg = f"{need['id']}.{option} ({need.get(option, None)}): {msg}" location = CheckLogger._location(need, self._prefix) - self._log_message(full_msg, location, new_check) + self._log_message(full_msg, location, is_new_check) - def warning_for_need(self, need: NeedsInfoType, msg: str, new_check: bool = False): + def warning_for_need( + self, need: NeedsInfoType, msg: str, is_new_check: bool = False + ): full_msg = f"{need['id']}: {msg}" location = CheckLogger._location(need, self._prefix) - self._log_message(full_msg, location, new_check) + self._log_message(full_msg, location, is_new_check) def _log_message( self, msg: str, - location: None | str | tuple[str | None, int | None] | Node = None, - is_info: bool = False, + location: Location, + is_new_check: bool = False, ): - if is_info: - msg += ( - "\nPlease fix this warning related to the new check " - "before the release of the next version of Docs-As-Code." - ) - self.info(msg, location) + if is_new_check: + self._new_checks.append((msg, location)) + self._info_count += 1 else: self.warning(msg, location) + self._warning_count += 1 def info( self, msg: str, - location: None | str | tuple[str | None, int | None] | Node = None, + location: Location, ): self._log.info(msg, type="score_metamodel", location=location) - self._info_count += 1 def warning( self, msg: str, - location: None | str | tuple[str | None, int | None] | Node = None, + location: Location, ): self._log.warning(msg, type="score_metamodel", location=location) - self._warning_count += 1 @property def has_warnings(self): @@ -92,3 +96,27 @@ def has_warnings(self): @property def has_infos(self): return self._info_count > 0 + + def flush_new_checks(self): + """Log all new-check messages together at once.""" + + def make_header_line(text: str, width: int = 80) -> str: + """Center a header inside '=' padding so line length stays fixed.""" + text = f" {text} " + return text.center(width, "=") + + if not self._new_checks: + return + + info_header = make_header_line("[INFO MESSAGE]") + separator = "=" * 80 + warning_header = make_header_line( + f"[New Checks] has {len(self._new_checks)} warnings" + ) + + logger.info(info_header) + logger.info(separator) + logger.info(warning_header) + + for msg, location in self._new_checks: + self.info(msg, location) From fd90a1779b87735095fccab069085d3d52ef25d1 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 20 Aug 2025 12:01:15 +0200 Subject: [PATCH 111/231] add documentation check command and time tracking (#218) --- docs.bzl | 13 +++++++++++++ src/incremental.py | 23 ++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/docs.bzl b/docs.bzl index 3c940e99..04f3b49d 100644 --- a/docs.bzl +++ b/docs.bzl @@ -85,6 +85,19 @@ def docs(source_dir = "docs", data = [], deps = []): }, ) + py_binary( + name = "docs_check", + tags = ["cli_help=Verify documentation [run]"], + srcs = ["@score_docs_as_code//src:incremental.py"], + data = data, + deps = deps, + env = { + "SOURCE_DIRECTORY": source_dir, + "DATA": str(data), + "ACTION": "check", + }, + ) + py_binary( name = "live_preview", tags = ["cli_help=Live preview documentation in the browser [run]"], diff --git a/src/incremental.py b/src/incremental.py index 857b6d98..10513181 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -15,6 +15,7 @@ import logging import os import sys +import time from pathlib import Path import debugpy @@ -102,4 +103,24 @@ def get_env(name: str) -> str: ] ) else: - sys.exit(sphinx_main(base_arguments)) + if action == "incremental": + builder = "html" + elif action == "check": + builder = "needs" + else: + raise ValueError(f"Unknown action: {action}") + + base_arguments.extend( + [ + "-b", + builder, + ] + ) + + start_time = time.perf_counter() + exit_code = sphinx_main(base_arguments) + end_time = time.perf_counter() + duration = end_time - start_time + print(f"docs ({action}) finished in {duration:.1f} seconds") + + sys.exit(exit_code) From 014857a184d7329c7e0cc2fdb14f2e47ba9d1176 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Thu, 21 Aug 2025 11:22:10 +0200 Subject: [PATCH 112/231] Integrate pytest plugin & new tooling structure (#220) Added first testlink decorator Improved / added testlinking to source code linker README Changed imports to adapt new score_tooling structure --- BUILD | 7 +- MODULE.bazel | 16 +- docs.bzl | 4 +- docs/conf.py | 1 + .../extensions/source_code_linker.md | 389 +++++++++++------- docs/requirements/test_overview.rst | 2 +- pyproject.toml | 4 +- src/BUILD | 5 +- src/extensions/BUILD | 2 +- src/extensions/score_header_service/BUILD | 2 +- src/extensions/score_metamodel/BUILD | 2 +- src/extensions/score_source_code_linker/BUILD | 4 +- .../tests/test_codelink.py | 6 + .../score_source_code_linker/xml_parser.py | 10 +- src/find_runfiles/BUILD | 2 +- src/helper_lib/BUILD | 2 +- src/requirements.in | 1 + src/requirements.txt | 21 +- 18 files changed, 305 insertions(+), 175 deletions(-) diff --git a/BUILD b/BUILD index cb382a56..a624d0a1 100644 --- a/BUILD +++ b/BUILD @@ -11,7 +11,8 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -load("@score_cr_checker//:cr_checker.bzl", "copyright_checker") +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@score_tooling//:defs.bzl", "copyright_checker") load("//:docs.bzl", "docs") package(default_visibility = ["//visibility:public"]) @@ -23,8 +24,8 @@ copyright_checker( "//:BUILD", "//:MODULE.bazel", ], - config = "@score_cr_checker//resources:config", - template = "@score_cr_checker//resources:templates", + config = "@score_tooling//cr_checker/resources:config", + template = "@score_tooling//cr_checker/resources:templates", visibility = ["//visibility:public"], ) diff --git a/MODULE.bazel b/MODULE.bazel index efe5e633..d8abb3d0 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -88,20 +88,24 @@ http_file( ) # Provides, pytest & venv -bazel_dep(name = "score_python_basics", version = "0.3.4") +# bazel_dep(name = "score_python_basics", version = "0.3.4") +# local_path_override( +# module_name = "score_python_basics", +# path="../tooling" +# ) # Checker rule for CopyRight checks/fixes -bazel_dep(name = "score_cr_checker", version = "0.3.1") -bazel_dep(name = "score_dash_license_checker", version = "0.1.1") # docs dependency bazel_dep(name = "score_process", version = "1.1.1-Beta") # Add Linter bazel_dep(name = "rules_multitool", version = "1.2.0") -bazel_dep( - name = "score_linter", - version = "0.1.0", +bazel_dep(name = "score_tooling", version = "0.0.0") +git_override( + module_name = "score_tooling", + commit = "0fc82801df8356571582527e907662e4875fef36", + remote = "https://github.com/eclipse-score/tooling", ) multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") diff --git a/docs.bzl b/docs.bzl index 04f3b49d..df23235b 100644 --- a/docs.bzl +++ b/docs.bzl @@ -44,7 +44,7 @@ load("@rules_pkg//pkg:mappings.bzl", "pkg_files") load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") -load("@score_python_basics//:defs.bzl", "score_virtualenv") +load("@score_tooling//:defs.bzl", "score_virtualenv") def docs(source_dir = "docs", data = [], deps = []): """ @@ -63,6 +63,8 @@ def docs(source_dir = "docs", data = [], deps = []): "@score_docs_as_code//src/extensions/score_layout:score_layout", "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", + # NOTE: Do not comment this in, can only be enabled once tooling is released & process upgraded + #"@score_tooling//python_basics/score_pytest:attribute_plugin" ] sphinx_build_binary( diff --git a/docs/conf.py b/docs/conf.py index 027311a4..4e98b780 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,6 +39,7 @@ "score_metamodel", "score_draw_uml_funcs", "score_layout", + "sphinxcontrib.mermaid", ] myst_enable_extensions = ["colon_fence"] diff --git a/docs/internals/extensions/source_code_linker.md b/docs/internals/extensions/source_code_linker.md index 4d8f5310..7ef81e59 100644 --- a/docs/internals/extensions/source_code_linker.md +++ b/docs/internals/extensions/source_code_linker.md @@ -1,187 +1,300 @@ (source-code-linker)= -# Source Link Extension Details +# Score Source Code Linker -A Sphinx extension for source code traceability for requirements. This extension works with the Bazel system and Sphinx-needs to provide automatic source code traceability. -In a first step it parses the source code for requirement tags. All discovered tags including their file and line numbers are written in an intermediary file before the sphinx build. -In a second step this intermediary file is parsed during sphinx build. If a requirement Id is found in the intermediary file a link to the source is added. +A Sphinx extension for enabling **source code and test traceability** for requirements. +This extension integrates with **Bazel** and **sphinx-needs** to automatically generate traceability links between implementation, tests, and documentation. -** Please note that the 'test parsing & linking' has been added to the source-code-linker. ** -* The documentation for this part will follow soon * +--- -## Implementation Components +## Overview -### Bazel Integration -The extension uses two main components to integrate with Bazel: +The extension is split into two main components: -1. `collect_source_files` - - Processes all files from provided deps - - Passes files as `--input` arguments to `parse_source_files.py` - - Handles dependency tracking for incremental builds +- **CodeLink** – Parses source files for template strings and links them to needs. +- **TestLink** – Parses test.xml outputs inside `bazel-testlogs` to link test cases to requirements. -2. `parse_source_files.py` - - Scans input files for template tags (e.g., "# req-traceability:") - - Retrieves git information (hash, file location) - - Generates mapping file with requirement IDs and links +Each component stores intermediate data in JSON caches under `_build/` to optimize performance and speed up incremental builds. -### Link Generation Process +--- -1. File Discovery: - - Takes deps from Bazel rule - - Filters for relevant file types - - Creates file list for processing +## How It Works -
+### ✅ CodeLink: Source Code Integration -2. Tag Processing: - - Scans files for template strings - - Extracts requirement IDs - - Maps IDs to file locations - - *Git Integration*: - - Gets current git hash for each file - - Constructs GitHub URLs with format: - `{base_url}/{repo}/blob/{hash}/{file}#L{line_nr}` - **Note:** The base_url is defined in `parse_source_files.py`. Currently set to: `https://github.com/eclipse-score/score/blob/` +CodeLink scans repository files (excluding `_`, `.`, and binary formats) for requirement tags such as: -Produces JSON mapping file: -The strings are split here to not enable tracking by the source code linker. ```python -[ - { - "file": "src/implementation1.py", - "line": 3, - "tag":"#" + " req-Id:", - "need": "TREQ_ID_1", - "full_line": "#"+" req-Id: TREQ_ID_1" - }, - { - "file": "src/implementation2.py", - "line": 3, - "tag":"#" + " req-Id:", - "need": "TREQ_ID_1", - "full_line": "#"+" req-Id: TREQ_ID_1" - }, -] +# Choose one or the other, both mentioned here to avoid detection +# req-Id/req-traceability: ``` -
- -3. Sphinx extension & Sphinx-needs: - - Parses JSON file - - Adds 'url-string' to needs - - Converts 'url-string' to clickable link - -
- -> An overview of the data flow inside the extension and it's parts - -![Data flow inside extension](data_flow.png) - ---- +These tags are extracted and matched to Sphinx needs via the `source_code_link` attribute. If a need ID does not exist, a build warning will be raised. +#### Data Flow -### Sphinx Integration -The extension hooks into Sphinx's build process. It attaches to the `env-updated` event. +1. **File Scanning** (`generate_source_code_links_json.py`) + - Filters out files starting with `_`, `.`, or ending in `.pyc`, `.so`, `.exe`, `.bin`. + - Searches for template tags: `# req-Id:` and `# req-traceability:`. + - Extracts: + - File path + - Line number + - Tag and full line + - Associated need ID + - Saves data as JSON via `needlinks.py`. -1. Configuration Phase: - - Registers as Sphinx extension - - Reads mapping file - - Sets up sphinx-needs integration +2. **Link Creation** + - Git info (file hash) is used to build a GitHub URL to the line in the source file. + - Links are injected into needs via the `source_code_link` attribute during the Sphinx build process. -2. Build Phase: - - Processes each need - - Adds source_code_link option to matching needs - - Handles needs_string_links conversion +#### Example JSON Cache (CodeLinks) -3. Error Handling: - - Validates requirement IDs - - Provides descriptive warnings for missing IDs +``` +[ + { + "file": "src/extensions/score_metamodel/metamodel.yaml", + "line": 17, + "tag": "#--req-Id:", # added `--` to avoid detection + "need": "tool_req__docs_dd_link_source_code_link", + "full_line": "#--req-Id: tool_req__docs_dd_link_source_code_link" # added `--` to avoid detection + } +] +``` -## Usage Guide +--- -### Adding Places to Search +### ✅ TestLink: Test Result Integration -You can easily add files to be searched by adding targets / files to the deps inside the -`collect_source_files_for_score_source_code_linker` in `docs/BUILD`. -See here: +TestLink scans test result XMLs from Bazel and converts each test case with metadata into Sphinx external needs, allowing links from tests to requirements. +This depends on the `attribute_plugin` in our tooling repository, find it [here](https://github.com/eclipse-score/tooling/tree/main/python_basics/score_pytest) +#### Test Tagging Options -```starlark -collect_source_files_for_score_source_code_linker( - name = "collected_files_for_score_source_code_linker", - deps = [ - ":score_metamodel", - ":score_source_code_linker", - # Add targets to be parsed here - ], +```python +# Import the decorator +from attribute_plugin import add_test_properties + +# Add the decorator to your test +@add_test_properties( + partially_verifies=["tool_req__docs_common_attr_title", "tool_req__docs_common_attr_description"], + test_type="interface-test", + derivation_technique="boundary-values" ) -``` +def test_feature(): + """ + Mandatory docstring that contains a description of the test + """ + ... -### Adding Tags to Source Files +``` +> Note: If you use the decorator, it will check that you have specified a docstring inside the function. + +#### Data Flow + +1. **XML Parsing** (`xml_parser.py`) + - Scans `bazel-testlogs/` for `test.xml` files. + - Parses test cases and extracts: + - Name + - File path + - Line + - Result (e.g. passed, failed, skipped) + - Result text (if failed/skipped will check if message was attached to it) + - Verifications (`PartiallyVerifies`, `FullyVerifies`) + + - Cases without metadata are logged out as info (not errors). + - Test cases with metadata are converted into: + - `DataFromTestCase` (used for external needs) + - `DataForTestLink` (used for linking tests to requirements) + +2. **Need Linking** + - Generates external Sphinx needs from `DataFromTestCase`. + - Creates `testlink` attributes on linked requirements. + - Warns on missing need IDs. + +#### Example JSON Cache (DataFromTestCase) +The DataFromTestCase depicts the information gathered about one testcase. +```json +[ + { + "name": "test_cache_file_with_encoded_comments", + "file": "src/extensions/score_source_code_linker/tests/test_codelink.py", + "line": "340", + "result": "passed", + "TestType": "interface-test", + "DerivationTechnique": "boundary-values", + "result_text": "", + "PartiallyVerifies": "tool_req__docs_common_attr_title, tool_req__docs_common_attr_description", + "FullyVerifies": null + } +] +``` -In order for a source_code_link to be generated there needs to be a **tag** inside the parsed file. -Tags are defined inside `parse_source_files.py` +--- -You can use them like this: +## 🔗 Combined Links -```python -# req-#traceability: -def dummy_function(): - pass -``` +During the Sphinx build process, both CodeLink and TestLink data are combined and applied to needs. -This will then add a link to this source file to the need you specified. +This is handled in `__init__.py` using the `NeedSourceLinks` and `SourceCodeLinks` dataclasses from `need_source_links.py`. -**Warning:** If the need-id you specified in the tag, does **not** exist in the needs, the extension will give an error message. -Therefore stopping the sphinx-build. -The error message looks similar to this: +### Combined JSON Example ``` -WARNING: Could not find TREQ_ID_200 in the needs id's. Found in file(s):['_tooling/score_metamodel/bad_implementation.py'] +[ + { + "need": "tool_req__docs_common_attr_title", + "links": { + "CodeLinks": [ + { + "file": "src/extensions/score_metamodel/metamodel.yaml", + "line": 33, + "tag": "#--req-Id:",# added `--` to avoid detection + "need": "tool_req__docs_common_attr_title", + "full_line": "#--req-Id: tool_req__docs_common_attr_title" # added `--` to avoid detection + } + ], + "TestLinks": [ + { + "name": "test_cache_file_with_encoded_comments", + "file": "src/extensions/score_source_code_linker/tests/test_codelink.py", + "line": 340, + "need": "tool_req__docs_common_attr_title", + "verify_type": "partially", + "result": "passed", + "result_text": "" + } + ] + } + } +] ``` -### Quickly Finding Source Links -The easiest and quickest way to find source_code_link options is to just search for the option `source_code_link`. It should give you all rst files -where the option is not empty. +--- -### Executing Tests +## ⚠️ Known Limitations -If you want to specifically execute the test suite for the extension please use the following command: -```bash -bazel test //docs:score_source_code_link_tests -``` +### CodeLink -The test suite should also run if you run all tests via `bazel test //...` +- ❌ Not compatible with **Esbonio/Live_preview** +- 🔗 GitHub links may 404 if the commit isn’t pushed +- 🧪 Tags must match exactly (e.g. # req-Id) +- 👀 `source_code_link` isn’t visible until the full Sphinx build is completed -## Known Limitations +### TestLink -1. Esbonio Compatibility: - - Does not work with Language Server - - Skipped for performance reasons in instant feedback mode +- ❌ Not compatible with **Esbonio/Live_preview** +- 🔗 GitHub links may 404 if the commit isn’t pushed +- 🧪 XML structure must be followed exactly (e.g. `properties & attributes`) +- 🗂 Relies on test to be executed first -2. Local Development: - - Links to unpushed commits return 404 - - Links still generated but non-functional until push -3. Build Process: - - source_code_link not visible in raw RST - - Requires full build cycle for link generation - - Dependent on GitHub repository structure -## Internal Architecture +--- +## 🏗️ Internal Module Overview -1. `collect_source_files`: - - Dependency management - - File filtering - - Build rule definition +``` +score_source_code_linker/ +├── __init__.py # Main Sphinx extension; combines CodeLinks + TestLinks +├── generate_source_code_links_json.py # Parses source files for tags +├── need_source_links.py # Data model for combined links +├── needlinks.py # CodeLink dataclass & JSON encoder/decoder +├── testlink.py # DataForTestLink definition & logic +├── xml_parser.py # Parses XML files into test case data +├── tests/ # Testsuite, containing unit & integration tests +│ └── ... +``` -2. `parse_source_files.py`: - - File parsing - - Git integration - - Link generation +--- +## Clearing Cache Manually -3. `source_link` extension: - - Sphinx integration - - Need modification +To clear the build cache, run: -Tests are inside the `source_link/tests` +```bash +rm -rf _build/ +``` + +## Examples: +To see working examples for CodeLinks & TestLinks, take a look at the Docs-As-Code documentation. + +[Example CodeLink](https://eclipse-score.github.io/docs-as-code/main/requirements/requirements.html#tool_req__docs_common_attr_id_scheme) +[Example CodeLink](https://eclipse-score.github.io/docs-as-code/main/requirements/requirements.html#tool_req__docs_common_attr_status) + +[Example TestLink](https://eclipse-score.github.io/docs-as-code/main/requirements/requirements.html#tool_req__docs_dd_link_source_code_link) + +## Flow-Overview +```{mermaid} +flowchart TD + %% Entry Point + A[source_code_linker] --> B{Check for Grouped JSON Cache} + + %% If cache exists + B --> |✅| C[Load Grouped JSON Cache] + B --> |🔴| N9[Proceed Without Cache] + + %% --- NeedLink Path --- + N9 --> D1[needslink.py
NeedLink] + D1 --> E1{Check for CodeLink JSON Cache} + + E1 --> |✅| F1[Load CodeLink JSON Cache] + F1 --> Z[Grouped JSON Cache] + + E1 --> |🔴| G1[Parse all files in repository] + G1 --> H1[Build & Save
CodeLink JSON Cache] + H1 --> Z + + %% --- TestLink Path --- + N9 --> D2[testlink.py
DFTL] + D2 --> E2{Check for DFTL JSON Cache} + + E2 --> |✅| F2[Load DFTL JSON Cache] + F2 --> J2[Load DOTC JSON Cache] + J2 --> K2[Add as External Needs] + + E2 --> |🔴| G2[Parse test.xml Files] + G2 --> H2[Convert TestCases
to DOTC] + H2 --> I2[Build & Save
DOTC JSON Cache] + I2 --> K2 + + H2 --> M2[Convert to DFTL] + M2 --> N2[Build & Save
DFTL JSON Cache] + N2 --> Z + + %% Final step + Z --> FINAL[Add links to needs] + + %% Legend + subgraph Legend["Legend"] + direction TB + L1[NeedLink Operations] + L2[TestLink Operations] + L4[DTFL = DataForTestLink] + L3[TestCaseNeed Operations] + L5[DOTC = DataOfTestCase] + L1 ~~~ L2 + L2 ~~~ L4 + L4 ~~~ L3 + L3 ~~~ L5 + end + + %% Node Styling + classDef needlink fill:#3b82f6,color:#ffffff,stroke:#1d4ed8,stroke-width:2px + classDef testlink fill:#8b5cf6,color:#ffffff,stroke:#6d28d9,stroke-width:2px + classDef dotc fill:#f59e0b,color:#ffffff,stroke:#b45309,stroke-width:2px + classDef grouped fill:#10b981,color:#ffffff,stroke:#047857,stroke-width:2px + classDef final fill:#f43f5e,color:#ffffff,stroke:#be123c,stroke-width:2px + + %% Class assignments + class D1,E1,F1,G1,H1 needlink + class D2,E2,F2,G2,M2,N2 testlink + class J2,H2,I2,K2 dotc + class Z grouped + class FINAL final + class L1 needlink + class L2,L4 testlink + class L3,L5 dotc + + %% Edge/Arrow Styling + linkStyle default stroke:#22d3ee,stroke-width:2px,color:#22d3ee + %% Ensure links in the Legend do not show up + linkStyle 23,24,25,26 opacity:0 +``` diff --git a/docs/requirements/test_overview.rst b/docs/requirements/test_overview.rst index c87b78fb..9b0dc20a 100644 --- a/docs/requirements/test_overview.rst +++ b/docs/requirements/test_overview.rst @@ -1,6 +1,6 @@ .. _testing_stats: -TESTING STATISTICS +Testing Statistics ================== diff --git a/pyproject.toml b/pyproject.toml index eae04ccc..1b74cd11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ # This file is at the root level, as it applies to all Python code, # not only to docs or to tools. [tool.pyright] -extends = "bazel-bin/ide_support.runfiles/score_python_basics+/pyproject.toml" +extends = "bazel-bin/ide_support.runfiles/score_tooling+/python_basics/pyproject.toml" exclude = [ "**/__pycache__", @@ -11,7 +11,7 @@ exclude = [ ] [tool.ruff] -extend = "bazel-bin/ide_support.runfiles/score_python_basics+/pyproject.toml" +extend = "bazel-bin/ide_support.runfiles/score_tooling+/python_basics/pyproject.toml" extend-exclude = [ "**/__pycache__", diff --git a/src/BUILD b/src/BUILD index 2446069d..15a8b3f5 100644 --- a/src/BUILD +++ b/src/BUILD @@ -17,8 +17,7 @@ load("@rules_pkg//pkg:mappings.bzl", "pkg_files") load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//python:pip.bzl", "compile_pip_requirements") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary") -load("@score_dash_license_checker//:dash.bzl", "dash_license_checker") -load("@score_python_basics//:defs.bzl", "score_virtualenv") +load("@score_tooling//:defs.bzl", "dash_license_checker", "score_virtualenv") # These are only exported because they're passed as files to the //docs.bzl # macros, and thus must be visible to other packages. They should only be @@ -63,7 +62,7 @@ compile_pip_requirements( name = "requirements", srcs = [ "requirements.in", - "@score_python_basics//:requirements.txt", + "@score_tooling//python_basics:requirements.txt", ], requirements_txt = "requirements.txt", tags = [ diff --git a/src/extensions/BUILD b/src/extensions/BUILD index d8ad70e2..d4db4293 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -13,7 +13,7 @@ load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_python_basics//:defs.bzl", "score_py_pytest", "score_virtualenv") +load("@score_tooling//:defs.bzl", "score_py_pytest", "score_virtualenv") py_library( name = "score_plantuml", diff --git a/src/extensions/score_header_service/BUILD b/src/extensions/score_header_service/BUILD index c9e136ef..d7a7b65c 100644 --- a/src/extensions/score_header_service/BUILD +++ b/src/extensions/score_header_service/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_python_basics//:defs.bzl", "score_py_pytest") +load("@score_tooling//:defs.bzl", "score_py_pytest") py_library( name = "score_header_service", diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index 014b6ca4..7bf0d602 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_python_basics//:defs.bzl", "score_py_pytest") +load("@score_tooling//:defs.bzl", "score_py_pytest") py_library( name = "score_metamodel", diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index a8fd36cd..7a662df3 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -24,9 +24,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_python_basics//:defs.bzl", "score_py_pytest") - -#exports_files(["parse_source_files.py"]) +load("@score_tooling//:defs.bzl", "score_py_pytest") py_library( name = "score_source_code_linker", diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index 03730217..dc196a17 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -22,6 +22,7 @@ from sphinx_needs.data import NeedsMutable from src.extensions.score_metamodel.tests import need as test_need +from attribute_plugin import add_test_properties # Import the module under test # Note: You'll need to adjust these imports based on your actual module structure @@ -394,6 +395,11 @@ def test_group_by_need_and_find_need_integration(sample_needlinks): assert found_need["id"] == "PREFIX_TREQ_ID_200" +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="interface-test", + derivation_technique="design-analysis", +) def test_source_linker_end_to_end_with_real_files(temp_dir, git_repo): """Test end-to-end workflow with real files and git repo.""" # Create source files with requirement IDs diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index 9ef65bc4..82ca0bbe 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -194,12 +194,10 @@ def build_test_needs_from_files( tcns: list[DataOfTestCase] = [] for f in xml_paths: b, z = read_test_xml_file(f) - for non_prop_test in z: - # We probably do not want to do this as a warning yet - logger.info( - f"Test: {non_prop_test} has no properties. Could not create need" - ) - # Now we build the needs from it + non_prop_tests = ", ".join(n for n in z) + if non_prop_tests: + logger.info("The following tests do not have properties.") + logger.info(non_prop_tests + "\n") tcns.extend(b) for c in b: construct_and_add_need(app, c) diff --git a/src/find_runfiles/BUILD b/src/find_runfiles/BUILD index 9acb8406..a286c57f 100644 --- a/src/find_runfiles/BUILD +++ b/src/find_runfiles/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_python_basics//:defs.bzl", "score_py_pytest") +load("@score_tooling//:defs.bzl", "score_py_pytest") py_library( name = "find_runfiles", diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD index ac51fc2f..61d94175 100644 --- a/src/helper_lib/BUILD +++ b/src/helper_lib/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_python_basics//:defs.bzl", "score_py_pytest") +load("@score_tooling//:defs.bzl", "score_py_pytest") py_library( name = "helper_lib", diff --git a/src/requirements.in b/src/requirements.in index d798d7f5..31e43bd6 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -14,6 +14,7 @@ ruamel.yaml myst-parser PyGithub sphinx-needs[plotting] +sphinxcontrib.mermaid # Until release of esbonio 1.x, we need to install it ourselves so the VS Code esbonio-extension # can find it. # esbonio >= 1 comes bundled with the esbonio-extension >= 1. diff --git a/src/requirements.txt b/src/requirements.txt index a179bd7b..58932ad8 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -37,7 +37,7 @@ babel==2.17.0 \ basedpyright==1.29.2 \ --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d - # via -r external/score_python_basics+/requirements.txt + # via -r external/score_tooling+/python_basics/requirements.txt beautifulsoup4==4.13.4 \ --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 @@ -438,7 +438,7 @@ iniconfig==2.1.0 \ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 # via - # -r external/score_python_basics+/requirements.txt + # -r external/score_tooling+/python_basics/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ @@ -669,7 +669,7 @@ nodejs-wheel-binaries==22.16.0 \ --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e # via - # -r external/score_python_basics+/requirements.txt + # -r external/score_tooling+/python_basics/requirements.txt # basedpyright numpy==2.2.5 \ --hash=sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70 \ @@ -734,7 +734,7 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via - # -r external/score_python_basics+/requirements.txt + # -r external/score_tooling+/python_basics/requirements.txt # matplotlib # pytest # sphinx @@ -829,7 +829,7 @@ pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via - # -r external/score_python_basics+/requirements.txt + # -r external/score_tooling+/python_basics/requirements.txt # pytest pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -882,7 +882,7 @@ pyspellchecker==0.8.2 \ pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r external/score_python_basics+/requirements.txt + # via -r external/score_tooling+/python_basics/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 @@ -941,7 +941,9 @@ pyyaml==6.0.2 \ --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 - # via myst-parser + # via + # myst-parser + # sphinxcontrib-mermaid referencing==0.36.2 \ --hash=sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa \ --hash=sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0 @@ -1167,6 +1169,7 @@ sphinx==8.2.3 \ # sphinx-design # sphinx-needs # sphinxcontrib-jquery + # sphinxcontrib-mermaid # sphinxcontrib-plantuml sphinx-autobuild==2024.10.3 \ --hash=sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa \ @@ -1204,6 +1207,10 @@ sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 # via sphinx +sphinxcontrib-mermaid==1.0.0 \ + --hash=sha256:2e8ab67d3e1e2816663f9347d026a8dee4a858acdd4ad32dd1c808893db88146 \ + --hash=sha256:60b72710ea02087f212028feb09711225fbc2e343a10d34822fe787510e1caa3 + # via -r src/requirements.in sphinxcontrib-plantuml==0.30 \ --hash=sha256:2a1266ca43bddf44640ae44107003df4490de2b3c3154a0d627cfb63e9a169bf # via -r src/requirements.in From cab4dfdc1a7a0f047852305e243b76c49255d1d7 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Thu, 21 Aug 2025 12:42:09 +0200 Subject: [PATCH 113/231] Update all dependencies (#219) * Update dependancies & libraries --- MODULE.bazel | 12 +- src/requirements.txt | 1781 ++++++++++++++++++++++-------------------- 2 files changed, 942 insertions(+), 851 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index d8abb3d0..16bdae07 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -57,7 +57,7 @@ pip.parse( use_repo(pip, "pip_process") # Additional Python rules provided by aspect, e.g. an improved version of -bazel_dep(name = "aspect_rules_py", version = "1.4.0") +bazel_dep(name = "aspect_rules_py", version = "1.6.3") bazel_dep(name = "buildifier_prebuilt", version = "8.2.0.2") ############################################################################### @@ -65,10 +65,10 @@ bazel_dep(name = "buildifier_prebuilt", version = "8.2.0.2") # Generic linting and formatting rules # ############################################################################### -bazel_dep(name = "aspect_rules_lint", version = "1.4.4") +bazel_dep(name = "aspect_rules_lint", version = "1.5.3") # PlantUML for docs -bazel_dep(name = "rules_java", version = "8.13.0") +bazel_dep(name = "rules_java", version = "8.15.1") http_jar = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_jar") @@ -97,14 +97,14 @@ http_file( # Checker rule for CopyRight checks/fixes # docs dependency -bazel_dep(name = "score_process", version = "1.1.1-Beta") +bazel_dep(name = "score_process", version = "1.1.1") # Add Linter -bazel_dep(name = "rules_multitool", version = "1.2.0") +bazel_dep(name = "rules_multitool", version = "1.9.0") bazel_dep(name = "score_tooling", version = "0.0.0") git_override( module_name = "score_tooling", - commit = "0fc82801df8356571582527e907662e4875fef36", + commit = "07bb8bce1dc5ed806b934bbb7bb49f6b796e0387", remote = "https://github.com/eclipse-score/tooling", ) diff --git a/src/requirements.txt b/src/requirements.txt index 58932ad8..d0b8d5a0 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -14,9 +14,9 @@ alabaster==1.0.0 \ --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b # via sphinx -anyio==4.9.0 \ - --hash=sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028 \ - --hash=sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c +anyio==4.10.0 \ + --hash=sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6 \ + --hash=sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1 # via # starlette # watchfiles @@ -42,15 +42,15 @@ beautifulsoup4==4.13.4 \ --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 # via pydata-sphinx-theme -cattrs==24.1.3 \ - --hash=sha256:981a6ef05875b5bb0c7fb68885546186d306f10f0f6718fe9b96c226e68821ff \ - --hash=sha256:adf957dddd26840f27ffbd060a6c4dd3b2192c5b7c2c0525ef1bd8131d8a83f5 +cattrs==25.1.1 \ + --hash=sha256:1b40b2d3402af7be79a7e7e097a9b4cd16d4c06e6d526644b0b26a063a1cc064 \ + --hash=sha256:c914b734e0f2d59e5b720d145ee010f1fd9a13ee93900922a2f3f9d593b8382c # via # lsprotocol # pygls -certifi==2025.4.26 \ - --hash=sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6 \ - --hash=sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3 +certifi==2025.8.3 \ + --hash=sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407 \ + --hash=sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5 # via requests cffi==1.17.1 \ --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ @@ -123,240 +123,240 @@ cffi==1.17.1 \ # via # cryptography # pynacl -charset-normalizer==3.4.1 \ - --hash=sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537 \ - --hash=sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa \ - --hash=sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a \ - --hash=sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294 \ - --hash=sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b \ - --hash=sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd \ - --hash=sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601 \ - --hash=sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd \ - --hash=sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4 \ - --hash=sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d \ - --hash=sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2 \ - --hash=sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313 \ - --hash=sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd \ - --hash=sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa \ - --hash=sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8 \ - --hash=sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1 \ - --hash=sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2 \ - --hash=sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496 \ - --hash=sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d \ - --hash=sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b \ - --hash=sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e \ - --hash=sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a \ - --hash=sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4 \ - --hash=sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca \ - --hash=sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78 \ - --hash=sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408 \ - --hash=sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5 \ - --hash=sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3 \ - --hash=sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f \ - --hash=sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a \ - --hash=sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765 \ - --hash=sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6 \ - --hash=sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146 \ - --hash=sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6 \ - --hash=sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9 \ - --hash=sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd \ - --hash=sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c \ - --hash=sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f \ - --hash=sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545 \ - --hash=sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176 \ - --hash=sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770 \ - --hash=sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824 \ - --hash=sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f \ - --hash=sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf \ - --hash=sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487 \ - --hash=sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d \ - --hash=sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd \ - --hash=sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b \ - --hash=sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534 \ - --hash=sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f \ - --hash=sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b \ - --hash=sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9 \ - --hash=sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd \ - --hash=sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125 \ - --hash=sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9 \ - --hash=sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de \ - --hash=sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11 \ - --hash=sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d \ - --hash=sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35 \ - --hash=sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f \ - --hash=sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda \ - --hash=sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7 \ - --hash=sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a \ - --hash=sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971 \ - --hash=sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8 \ - --hash=sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41 \ - --hash=sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d \ - --hash=sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f \ - --hash=sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757 \ - --hash=sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a \ - --hash=sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886 \ - --hash=sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77 \ - --hash=sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76 \ - --hash=sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247 \ - --hash=sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85 \ - --hash=sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb \ - --hash=sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7 \ - --hash=sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e \ - --hash=sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6 \ - --hash=sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037 \ - --hash=sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1 \ - --hash=sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e \ - --hash=sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807 \ - --hash=sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407 \ - --hash=sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c \ - --hash=sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12 \ - --hash=sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3 \ - --hash=sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089 \ - --hash=sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd \ - --hash=sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e \ - --hash=sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00 \ - --hash=sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616 +charset-normalizer==3.4.3 \ + --hash=sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91 \ + --hash=sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0 \ + --hash=sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154 \ + --hash=sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601 \ + --hash=sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884 \ + --hash=sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07 \ + --hash=sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c \ + --hash=sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64 \ + --hash=sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe \ + --hash=sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f \ + --hash=sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432 \ + --hash=sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc \ + --hash=sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa \ + --hash=sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9 \ + --hash=sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae \ + --hash=sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19 \ + --hash=sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d \ + --hash=sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e \ + --hash=sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4 \ + --hash=sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7 \ + --hash=sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312 \ + --hash=sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92 \ + --hash=sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31 \ + --hash=sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c \ + --hash=sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f \ + --hash=sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99 \ + --hash=sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b \ + --hash=sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15 \ + --hash=sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392 \ + --hash=sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f \ + --hash=sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8 \ + --hash=sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491 \ + --hash=sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0 \ + --hash=sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc \ + --hash=sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0 \ + --hash=sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f \ + --hash=sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a \ + --hash=sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40 \ + --hash=sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927 \ + --hash=sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849 \ + --hash=sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce \ + --hash=sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14 \ + --hash=sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05 \ + --hash=sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c \ + --hash=sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c \ + --hash=sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a \ + --hash=sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc \ + --hash=sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34 \ + --hash=sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9 \ + --hash=sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096 \ + --hash=sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14 \ + --hash=sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30 \ + --hash=sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b \ + --hash=sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b \ + --hash=sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942 \ + --hash=sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db \ + --hash=sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5 \ + --hash=sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b \ + --hash=sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce \ + --hash=sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669 \ + --hash=sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0 \ + --hash=sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018 \ + --hash=sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93 \ + --hash=sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe \ + --hash=sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049 \ + --hash=sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a \ + --hash=sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef \ + --hash=sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2 \ + --hash=sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca \ + --hash=sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16 \ + --hash=sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f \ + --hash=sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb \ + --hash=sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1 \ + --hash=sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557 \ + --hash=sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37 \ + --hash=sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7 \ + --hash=sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72 \ + --hash=sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c \ + --hash=sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9 # via requests -click==8.1.8 \ - --hash=sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2 \ - --hash=sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a +click==8.2.1 \ + --hash=sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202 \ + --hash=sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b # via uvicorn colorama==0.4.6 \ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 # via sphinx-autobuild -contourpy==1.3.2 \ - --hash=sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f \ - --hash=sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92 \ - --hash=sha256:107ba8a6a7eec58bb475329e6d3b95deba9440667c4d62b9b6063942b61d7f16 \ - --hash=sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f \ - --hash=sha256:1c48188778d4d2f3d48e4643fb15d8608b1d01e4b4d6b0548d9b336c28fc9b6f \ - --hash=sha256:3859783aefa2b8355697f16642695a5b9792e7a46ab86da1118a4a23a51a33d7 \ - --hash=sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e \ - --hash=sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08 \ - --hash=sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841 \ - --hash=sha256:49b65a95d642d4efa8f64ba12558fcb83407e58a2dfba9d796d77b63ccfcaff5 \ - --hash=sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2 \ - --hash=sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415 \ - --hash=sha256:5ebac872ba09cb8f2131c46b8739a7ff71de28a24c869bcad554477eb089a878 \ - --hash=sha256:5f5964cdad279256c084b69c3f412b7801e15356b16efa9d78aa974041903da0 \ - --hash=sha256:65a887a6e8c4cd0897507d814b14c54a8c2e2aa4ac9f7686292f9769fcf9a6ab \ - --hash=sha256:6a37a2fb93d4df3fc4c0e363ea4d16f83195fc09c891bc8ce072b9d084853445 \ - --hash=sha256:70771a461aaeb335df14deb6c97439973d253ae70660ca085eec25241137ef43 \ - --hash=sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c \ - --hash=sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823 \ - --hash=sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69 \ - --hash=sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15 \ - --hash=sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef \ - --hash=sha256:8c5acb8dddb0752bf252e01a3035b21443158910ac16a3b0d20e7fed7d534ce5 \ - --hash=sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73 \ - --hash=sha256:8d2e74acbcba3bfdb6d9d8384cdc4f9260cae86ed9beee8bd5f54fee49a430b9 \ - --hash=sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912 \ - --hash=sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5 \ - --hash=sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85 \ - --hash=sha256:9be002b31c558d1ddf1b9b415b162c603405414bacd6932d031c5b5a8b757f0d \ - --hash=sha256:ad687a04bc802cbe8b9c399c07162a3c35e227e2daccf1668eb1f278cb698631 \ - --hash=sha256:b4f54d6a2defe9f257327b0f243612dd051cc43825587520b1bf74a31e2f6ef2 \ - --hash=sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54 \ - --hash=sha256:b7cd50c38f500bbcc9b6a46643a40e0913673f869315d8e70de0438817cb7773 \ - --hash=sha256:ba38e3f9f330af820c4b27ceb4b9c7feee5fe0493ea53a8720f4792667465934 \ - --hash=sha256:c440093bbc8fc21c637c03bafcbef95ccd963bc6e0514ad887932c18ca2a759a \ - --hash=sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441 \ - --hash=sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422 \ - --hash=sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532 \ - --hash=sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739 \ - --hash=sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b \ - --hash=sha256:cdd22595308f53ef2f891040ab2b93d79192513ffccbd7fe19be7aa773a5e09f \ - --hash=sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1 \ - --hash=sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87 \ - --hash=sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52 \ - --hash=sha256:d6658ccc7251a4433eebd89ed2672c2ed96fba367fd25ca9512aa92a4b46c4f1 \ - --hash=sha256:d91a3ccc7fea94ca0acab82ceb77f396d50a1f67412efe4c526f5d20264e6ecd \ - --hash=sha256:dc41ba0714aa2968d1f8674ec97504a8f7e334f48eeacebcaa6256213acb0989 \ - --hash=sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb \ - --hash=sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f \ - --hash=sha256:ded1706ed0c1049224531b81128efbd5084598f18d8a2d9efae833edbd2b40ad \ - --hash=sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9 \ - --hash=sha256:e259bced5549ac64410162adc973c5e2fb77f04df4a439d00b478e57a0e65512 \ - --hash=sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd \ - --hash=sha256:eab0f6db315fa4d70f1d8ab514e527f0366ec021ff853d7ed6a2d33605cf4b83 \ - --hash=sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe \ - --hash=sha256:f939a054192ddc596e031e50bb13b657ce318cf13d264f095ce9db7dc6ae81c0 \ - --hash=sha256:fd93cc7f3139b6dd7aab2f26a90dde0aa9fc264dbf70f6740d498a70b860b82c +contourpy==1.3.3 \ + --hash=sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69 \ + --hash=sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc \ + --hash=sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880 \ + --hash=sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a \ + --hash=sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8 \ + --hash=sha256:13b68d6a62db8eafaebb8039218921399baf6e47bf85006fd8529f2a08ef33fc \ + --hash=sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470 \ + --hash=sha256:177fb367556747a686509d6fef71d221a4b198a3905fe824430e5ea0fda54eb5 \ + --hash=sha256:1cadd8b8969f060ba45ed7c1b714fe69185812ab43bd6b86a9123fe8f99c3263 \ + --hash=sha256:1fd43c3be4c8e5fd6e4f2baeae35ae18176cf2e5cced681cca908addf1cdd53b \ + --hash=sha256:22e9b1bd7a9b1d652cd77388465dc358dafcd2e217d35552424aa4f996f524f5 \ + --hash=sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381 \ + --hash=sha256:283edd842a01e3dcd435b1c5116798d661378d83d36d337b8dde1d16a5fc9ba3 \ + --hash=sha256:2a2a8b627d5cc6b7c41a4beff6c5ad5eb848c88255fda4a8745f7e901b32d8e4 \ + --hash=sha256:2b7e9480ffe2b0cd2e787e4df64270e3a0440d9db8dc823312e2c940c167df7e \ + --hash=sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f \ + --hash=sha256:33c82d0138c0a062380332c861387650c82e4cf1747aaa6938b9b6516762e772 \ + --hash=sha256:348ac1f5d4f1d66d3322420f01d42e43122f43616e0f194fc1c9f5d830c5b286 \ + --hash=sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42 \ + --hash=sha256:3c30273eb2a55024ff31ba7d052dde990d7d8e5450f4bbb6e913558b3d6c2301 \ + --hash=sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77 \ + --hash=sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7 \ + --hash=sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411 \ + --hash=sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1 \ + --hash=sha256:4debd64f124ca62069f313a9cb86656ff087786016d76927ae2cf37846b006c9 \ + --hash=sha256:4feffb6537d64b84877da813a5c30f1422ea5739566abf0bd18065ac040e120a \ + --hash=sha256:50ed930df7289ff2a8d7afeb9603f8289e5704755c7e5c3bbd929c90c817164b \ + --hash=sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db \ + --hash=sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6 \ + --hash=sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620 \ + --hash=sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989 \ + --hash=sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea \ + --hash=sha256:644a6853d15b2512d67881586bd03f462c7ab755db95f16f14d7e238f2852c67 \ + --hash=sha256:655456777ff65c2c548b7c454af9c6f33f16c8884f11083244b5819cc214f1b5 \ + --hash=sha256:66c8a43a4f7b8df8b71ee1840e4211a3c8d93b214b213f590e18a1beca458f7d \ + --hash=sha256:6afc576f7b33cf00996e5c1102dc2a8f7cc89e39c0b55df93a0b78c1bd992b36 \ + --hash=sha256:6c3d53c796f8647d6deb1abe867daeb66dcc8a97e8455efa729516b997b8ed99 \ + --hash=sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1 \ + --hash=sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e \ + --hash=sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b \ + --hash=sha256:87acf5963fc2b34825e5b6b048f40e3635dd547f590b04d2ab317c2619ef7ae8 \ + --hash=sha256:88df9880d507169449d434c293467418b9f6cbe82edd19284aa0409e7fdb933d \ + --hash=sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7 \ + --hash=sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7 \ + --hash=sha256:95b181891b4c71de4bb404c6621e7e2390745f887f2a026b2d99e92c17892339 \ + --hash=sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1 \ + --hash=sha256:a15459b0f4615b00bbd1e91f1b9e19b7e63aea7483d03d804186f278c0af2659 \ + --hash=sha256:a22738912262aa3e254e4f3cb079a95a67132fc5a063890e224393596902f5a4 \ + --hash=sha256:ab2fd90904c503739a75b7c8c5c01160130ba67944a7b77bbf36ef8054576e7f \ + --hash=sha256:ab3074b48c4e2cf1a960e6bbeb7f04566bf36b1861d5c9d4d8ac04b82e38ba20 \ + --hash=sha256:afe5a512f31ee6bd7d0dda52ec9864c984ca3d66664444f2d72e0dc4eb832e36 \ + --hash=sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb \ + --hash=sha256:b20c7c9a3bf701366556e1b1984ed2d0cedf999903c51311417cf5f591d8c78d \ + --hash=sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8 \ + --hash=sha256:b7301b89040075c30e5768810bc96a8e8d78085b47d8be6e4c3f5a0b4ed478a0 \ + --hash=sha256:b7448cb5a725bb1e35ce88771b86fba35ef418952474492cf7c764059933ff8b \ + --hash=sha256:ca0fdcd73925568ca027e0b17ab07aad764be4706d0a925b89227e447d9737b7 \ + --hash=sha256:ca658cd1a680a5c9ea96dc61cdbae1e85c8f25849843aa799dfd3cb370ad4fbe \ + --hash=sha256:cbedb772ed74ff5be440fa8eee9bd49f64f6e3fc09436d9c7d8f1c287b121d77 \ + --hash=sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497 \ + --hash=sha256:cf9022ef053f2694e31d630feaacb21ea24224be1c3ad0520b13d844274614fd \ + --hash=sha256:d002b6f00d73d69333dac9d0b8d5e84d9724ff9ef044fd63c5986e62b7c9e1b1 \ + --hash=sha256:d06bb1f751ba5d417047db62bca3c8fde202b8c11fb50742ab3ab962c81e8216 \ + --hash=sha256:d304906ecc71672e9c89e87c4675dc5c2645e1f4269a5063b99b0bb29f232d13 \ + --hash=sha256:e4e6b05a45525357e382909a4c1600444e2a45b4795163d3b22669285591c1ae \ + --hash=sha256:e74a9a0f5e3fff48fb5a7f2fd2b9b70a3fe014a67522f79b7cca4c0c7e43c9ae \ + --hash=sha256:ea37e7b45949df430fe649e5de8351c423430046a2af20b1c1961cae3afcda77 \ + --hash=sha256:f64836de09927cba6f79dcd00fdd7d5329f3fccc633468507079c829ca4db4e3 \ + --hash=sha256:fd6ec6be509c787f1caf6b247f0b1ca598bef13f4ddeaa126b7658215529ba0f \ + --hash=sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff \ + --hash=sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9 \ + --hash=sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a # via matplotlib -cryptography==44.0.2 \ - --hash=sha256:04abd71114848aa25edb28e225ab5f268096f44cf0127f3d36975bdf1bdf3390 \ - --hash=sha256:0529b1d5a0105dd3731fa65680b45ce49da4d8115ea76e9da77a875396727b41 \ - --hash=sha256:1bc312dfb7a6e5d66082c87c34c8a62176e684b6fe3d90fcfe1568de675e6688 \ - --hash=sha256:268e4e9b177c76d569e8a145a6939eca9a5fec658c932348598818acf31ae9a5 \ - --hash=sha256:29ecec49f3ba3f3849362854b7253a9f59799e3763b0c9d0826259a88efa02f1 \ - --hash=sha256:2bf7bf75f7df9715f810d1b038870309342bff3069c5bd8c6b96128cb158668d \ - --hash=sha256:3b721b8b4d948b218c88cb8c45a01793483821e709afe5f622861fc6182b20a7 \ - --hash=sha256:3c00b6b757b32ce0f62c574b78b939afab9eecaf597c4d624caca4f9e71e7843 \ - --hash=sha256:3dc62975e31617badc19a906481deacdeb80b4bb454394b4098e3f2525a488c5 \ - --hash=sha256:4973da6ca3db4405c54cd0b26d328be54c7747e89e284fcff166132eb7bccc9c \ - --hash=sha256:4e389622b6927d8133f314949a9812972711a111d577a5d1f4bee5e58736b80a \ - --hash=sha256:51e4de3af4ec3899d6d178a8c005226491c27c4ba84101bfb59c901e10ca9f79 \ - --hash=sha256:5f6f90b72d8ccadb9c6e311c775c8305381db88374c65fa1a68250aa8a9cb3a6 \ - --hash=sha256:6210c05941994290f3f7f175a4a57dbbb2afd9273657614c506d5976db061181 \ - --hash=sha256:6f101b1f780f7fc613d040ca4bdf835c6ef3b00e9bd7125a4255ec574c7916e4 \ - --hash=sha256:7bdcd82189759aba3816d1f729ce42ffded1ac304c151d0a8e89b9996ab863d5 \ - --hash=sha256:7ca25849404be2f8e4b3c59483d9d3c51298a22c1c61a0e84415104dacaf5562 \ - --hash=sha256:81276f0ea79a208d961c433a947029e1a15948966658cf6710bbabb60fcc2639 \ - --hash=sha256:8cadc6e3b5a1f144a039ea08a0bdb03a2a92e19c46be3285123d32029f40a922 \ - --hash=sha256:8e0ddd63e6bf1161800592c71ac794d3fb8001f2caebe0966e77c5234fa9efc3 \ - --hash=sha256:909c97ab43a9c0c0b0ada7a1281430e4e5ec0458e6d9244c0e821bbf152f061d \ - --hash=sha256:96e7a5e9d6e71f9f4fca8eebfd603f8e86c5225bb18eb621b2c1e50b290a9471 \ - --hash=sha256:9a1e657c0f4ea2a23304ee3f964db058c9e9e635cc7019c4aa21c330755ef6fd \ - --hash=sha256:9eb9d22b0a5d8fd9925a7764a054dca914000607dff201a24c791ff5c799e1fa \ - --hash=sha256:af4ff3e388f2fa7bff9f7f2b31b87d5651c45731d3e8cfa0944be43dff5cfbdb \ - --hash=sha256:b042d2a275c8cee83a4b7ae30c45a15e6a4baa65a179a0ec2d78ebb90e4f6699 \ - --hash=sha256:bc821e161ae88bfe8088d11bb39caf2916562e0a2dc7b6d56714a48b784ef0bb \ - --hash=sha256:c505d61b6176aaf982c5717ce04e87da5abc9a36a5b39ac03905c4aafe8de7aa \ - --hash=sha256:c63454aa261a0cf0c5b4718349629793e9e634993538db841165b3df74f37ec0 \ - --hash=sha256:c7362add18b416b69d58c910caa217f980c5ef39b23a38a0880dfd87bdf8cd23 \ - --hash=sha256:d03806036b4f89e3b13b6218fefea8d5312e450935b1a2d55f0524e2ed7c59d9 \ - --hash=sha256:d1b3031093a366ac767b3feb8bcddb596671b3aaff82d4050f984da0c248b615 \ - --hash=sha256:d1c3572526997b36f245a96a2b1713bf79ce99b271bbcf084beb6b9b075f29ea \ - --hash=sha256:efcfe97d1b3c79e486554efddeb8f6f53a4cdd4cf6086642784fa31fc384e1d7 \ - --hash=sha256:f514ef4cd14bb6fb484b4a60203e912cfcb64f2ab139e88c2274511514bf7308 +cryptography==45.0.6 \ + --hash=sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5 \ + --hash=sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74 \ + --hash=sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394 \ + --hash=sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301 \ + --hash=sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08 \ + --hash=sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3 \ + --hash=sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b \ + --hash=sha256:2384f2ab18d9be88a6e4f8972923405e2dbb8d3e16c6b43f15ca491d7831bd18 \ + --hash=sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402 \ + --hash=sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3 \ + --hash=sha256:31a2b9a10530a1cb04ffd6aa1cd4d3be9ed49f7d77a4dafe198f3b382f41545c \ + --hash=sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0 \ + --hash=sha256:3b5bf5267e98661b9b888a9250d05b063220dfa917a8203744454573c7eb79db \ + --hash=sha256:3de77e4df42ac8d4e4d6cdb342d989803ad37707cf8f3fbf7b088c9cbdd46427 \ + --hash=sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f \ + --hash=sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3 \ + --hash=sha256:599c8d7df950aa68baa7e98f7b73f4f414c9f02d0e8104a30c0182a07732638b \ + --hash=sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9 \ + --hash=sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5 \ + --hash=sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719 \ + --hash=sha256:629127cfdcdc6806dfe234734d7cb8ac54edaf572148274fa377a7d3405b0043 \ + --hash=sha256:705bb7c7ecc3d79a50f236adda12ca331c8e7ecfbea51edd931ce5a7a7c4f012 \ + --hash=sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02 \ + --hash=sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2 \ + --hash=sha256:826b46dae41a1155a0c0e66fafba43d0ede1dc16570b95e40c4d83bfcf0a451d \ + --hash=sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec \ + --hash=sha256:cc4d66f5dc4dc37b89cfef1bd5044387f7a1f6f0abb490815628501909332d5d \ + --hash=sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159 \ + --hash=sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453 \ + --hash=sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf \ + --hash=sha256:e5b3dda1b00fb41da3af4c5ef3f922a200e33ee5ba0f0bc9ecf0b0c173958385 \ + --hash=sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9 \ + --hash=sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016 \ + --hash=sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05 \ + --hash=sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42 \ + --hash=sha256:f68f833a9d445cc49f01097d95c83a850795921b3f7cc6488731e69bde3288da \ + --hash=sha256:fc022c1fa5acff6def2fc6d7819bbbd31ccddfe67d075331a65d9cfb28a20983 # via pyjwt cycler==0.12.1 \ --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c # via matplotlib -debugpy==1.8.14 \ - --hash=sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15 \ - --hash=sha256:1b2ac8c13b2645e0b1eaf30e816404990fbdb168e193322be8f545e8c01644a9 \ - --hash=sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f \ - --hash=sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f \ - --hash=sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e \ - --hash=sha256:3d937d93ae4fa51cdc94d3e865f535f185d5f9748efb41d0d49e33bf3365bd79 \ - --hash=sha256:413512d35ff52c2fb0fd2d65e69f373ffd24f0ecb1fac514c04a668599c5ce7f \ - --hash=sha256:4c9156f7524a0d70b7a7e22b2e311d8ba76a15496fb00730e46dcdeedb9e1eea \ - --hash=sha256:5349b7c3735b766a281873fbe32ca9cca343d4cc11ba4a743f84cb854339ff35 \ - --hash=sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f \ - --hash=sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20 \ - --hash=sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e \ - --hash=sha256:7118d462fe9724c887d355eef395fae68bc764fd862cdca94e70dcb9ade8a23d \ - --hash=sha256:7816acea4a46d7e4e50ad8d09d963a680ecc814ae31cdef3622eb05ccacf7b01 \ - --hash=sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322 \ - --hash=sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84 \ - --hash=sha256:93fee753097e85623cab1c0e6a68c76308cd9f13ffdf44127e6fab4fbf024339 \ - --hash=sha256:b1528cfee6c1b1c698eb10b6b096c598738a8238822d218173d21c3086de8123 \ - --hash=sha256:b44985f97cc3dd9d52c42eb59ee9d7ee0c4e7ecd62bca704891f997de4cef23d \ - --hash=sha256:c442f20577b38cc7a9aafecffe1094f78f07fb8423c3dddb384e6b8f49fd2987 \ - --hash=sha256:c99295c76161ad8d507b413cd33422d7c542889fbb73035889420ac1fad354f2 \ - --hash=sha256:cf431c343a99384ac7eab2f763980724834f933a271e90496944195318c619e2 \ - --hash=sha256:d235e4fa78af2de4e5609073972700523e372cf5601742449970110d565ca28c \ - --hash=sha256:d5582bcbe42917bc6bbe5c12db1bffdf21f6bfc28d4554b738bf08d50dc0c8c3 \ - --hash=sha256:f117dedda6d969c5c9483e23f573b38f4e39412845c7bc487b6f2648df30fe84 \ - --hash=sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826 +debugpy==1.8.16 \ + --hash=sha256:135ccd2b1161bade72a7a099c9208811c137a150839e970aeaf121c2467debe8 \ + --hash=sha256:19c9521962475b87da6f673514f7fd610328757ec993bf7ec0d8c96f9a325f9e \ + --hash=sha256:211238306331a9089e253fd997213bc4a4c65f949271057d6695953254095376 \ + --hash=sha256:2801329c38f77c47976d341d18040a9ac09d0c71bf2c8b484ad27c74f83dc36f \ + --hash=sha256:2a3958fb9c2f40ed8ea48a0d34895b461de57a1f9862e7478716c35d76f56c65 \ + --hash=sha256:31e69a1feb1cf6b51efbed3f6c9b0ef03bc46ff050679c4be7ea6d2e23540870 \ + --hash=sha256:64473c4a306ba11a99fe0bb14622ba4fbd943eb004847d9b69b107bde45aa9ea \ + --hash=sha256:67371b28b79a6a12bcc027d94a06158f2fde223e35b5c4e0783b6f9d3b39274a \ + --hash=sha256:687c7ab47948697c03b8f81424aa6dc3f923e6ebab1294732df1ca9773cc67bc \ + --hash=sha256:70f5fcd6d4d0c150a878d2aa37391c52de788c3dc680b97bdb5e529cb80df87a \ + --hash=sha256:75f204684581e9ef3dc2f67687c3c8c183fde2d6675ab131d94084baf8084121 \ + --hash=sha256:833a61ed446426e38b0dd8be3e9d45ae285d424f5bf6cd5b2b559c8f12305508 \ + --hash=sha256:85df3adb1de5258dca910ae0bb185e48c98801ec15018a263a92bb06be1c8787 \ + --hash=sha256:8624a6111dc312ed8c363347a0b59c5acc6210d897e41a7c069de3c53235c9a6 \ + --hash=sha256:88eb9ffdfb59bf63835d146c183d6dba1f722b3ae2a5f4b9fc03e925b3358922 \ + --hash=sha256:a2ba6fc5d7c4bc84bcae6c5f8edf5988146e55ae654b1bb36fecee9e5e77e9e2 \ + --hash=sha256:b202e2843e32e80b3b584bcebfe0e65e0392920dc70df11b2bfe1afcb7a085e4 \ + --hash=sha256:b2abae6dd02523bec2dee16bd6b0781cccb53fd4995e5c71cc659b5f45581898 \ + --hash=sha256:b5aea1083f6f50023e8509399d7dc6535a351cc9f2e8827d1e093175e4d9fa4c \ + --hash=sha256:bee89e948bc236a5c43c4214ac62d28b29388453f5fd328d739035e205365f0b \ + --hash=sha256:c2c47c2e52b40449552843b913786499efcc3dbc21d6c49287d939cd0dbc49fd \ + --hash=sha256:cf358066650439847ec5ff3dae1da98b5461ea5da0173d93d5e10f477c94609a \ + --hash=sha256:d58c48d8dbbbf48a3a3a638714a2d16de537b0dace1e3432b8e92c57d43707f8 \ + --hash=sha256:e5ca7314042e8a614cc2574cd71f6ccd7e13a9708ce3c6d8436959eae56f2378 \ + --hash=sha256:f8340a3ac2ed4f5da59e064aa92e39edd52729a88fbde7bbaa54e08249a04493 \ + --hash=sha256:fee6db83ea5c978baf042440cfe29695e1a5d48a30147abf4c3be87513609817 # via -r src/requirements.in -deprecated==1.2.18 \ - --hash=sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d \ - --hash=sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec - # via pygithub docutils==0.21.2 \ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 @@ -368,57 +368,65 @@ esbonio==0.16.5 \ --hash=sha256:04ba926e3603f7b1fde1abc690b47afd60749b64b1029b6bce8e1de0bb284921 \ --hash=sha256:acab2e16c6cf8f7232fb04e0d48514ce50566516b1f6fcf669ccf2f247e8b10f # via -r src/requirements.in -fonttools==4.57.0 \ - --hash=sha256:03290e818782e7edb159474144fca11e36a8ed6663d1fcbd5268eb550594fd8e \ - --hash=sha256:0425c2e052a5f1516c94e5855dbda706ae5a768631e9fcc34e57d074d1b65b92 \ - --hash=sha256:05efceb2cb5f6ec92a4180fcb7a64aa8d3385fd49cfbbe459350229d1974f0b1 \ - --hash=sha256:17168a4670bbe3775f3f3f72d23ee786bd965395381dfbb70111e25e81505b9d \ - --hash=sha256:3122c604a675513c68bd24c6a8f9091f1c2376d18e8f5fe5a101746c81b3e98f \ - --hash=sha256:34687a5d21f1d688d7d8d416cb4c5b9c87fca8a1797ec0d74b9fdebfa55c09ab \ - --hash=sha256:3871349303bdec958360eedb619169a779956503ffb4543bb3e6211e09b647c4 \ - --hash=sha256:39acf68abdfc74e19de7485f8f7396fa4d2418efea239b7061d6ed6a2510c746 \ - --hash=sha256:3cf97236b192a50a4bf200dc5ba405aa78d4f537a2c6e4c624bb60466d5b03bd \ - --hash=sha256:408ce299696012d503b714778d89aa476f032414ae57e57b42e4b92363e0b8ef \ - --hash=sha256:44c26a311be2ac130f40a96769264809d3b0cb297518669db437d1cc82974888 \ - --hash=sha256:46370ac47a1e91895d40e9ad48effbe8e9d9db1a4b80888095bc00e7beaa042f \ - --hash=sha256:4dea5893b58d4637ffa925536462ba626f8a1b9ffbe2f5c272cdf2c6ebadb817 \ - --hash=sha256:51d8482e96b28fb28aa8e50b5706f3cee06de85cbe2dce80dbd1917ae22ec5a6 \ - --hash=sha256:541cb48191a19ceb1a2a4b90c1fcebd22a1ff7491010d3cf840dd3a68aebd654 \ - --hash=sha256:579ba873d7f2a96f78b2e11028f7472146ae181cae0e4d814a37a09e93d5c5cc \ - --hash=sha256:57e30241524879ea10cdf79c737037221f77cc126a8cdc8ff2c94d4a522504b9 \ - --hash=sha256:69ab81b66ebaa8d430ba56c7a5f9abe0183afefd3a2d6e483060343398b13fb1 \ - --hash=sha256:6e3e1ec10c29bae0ea826b61f265ec5c858c5ba2ce2e69a71a62f285cf8e4595 \ - --hash=sha256:727ece10e065be2f9dd239d15dd5d60a66e17eac11aea47d447f9f03fdbc42de \ - --hash=sha256:7339e6a3283e4b0ade99cade51e97cde3d54cd6d1c3744459e886b66d630c8b3 \ - --hash=sha256:767604f244dc17c68d3e2dbf98e038d11a18abc078f2d0f84b6c24571d9c0b13 \ - --hash=sha256:7a64edd3ff6a7f711a15bd70b4458611fb240176ec11ad8845ccbab4fe6745db \ - --hash=sha256:81aa97669cd726349eb7bd43ca540cf418b279ee3caba5e2e295fb4e8f841c02 \ - --hash=sha256:84c41ba992df5b8d680b89fd84c6a1f2aca2b9f1ae8a67400c8930cd4ea115f6 \ - --hash=sha256:84fd56c78d431606332a0627c16e2a63d243d0d8b05521257d77c6529abe14d8 \ - --hash=sha256:889e45e976c74abc7256d3064aa7c1295aa283c6bb19810b9f8b604dfe5c7f31 \ - --hash=sha256:8e2e12d0d862f43d51e5afb8b9751c77e6bec7d2dc00aad80641364e9df5b199 \ - --hash=sha256:967b65232e104f4b0f6370a62eb33089e00024f2ce143aecbf9755649421c683 \ - --hash=sha256:9d077f909f2343daf4495ba22bb0e23b62886e8ec7c109ee8234bdbd678cf344 \ - --hash=sha256:9d57b4e23ebbe985125d3f0cabbf286efa191ab60bbadb9326091050d88e8213 \ - --hash=sha256:a1968f2a2003c97c4ce6308dc2498d5fd4364ad309900930aa5a503c9851aec8 \ - --hash=sha256:a2a722c0e4bfd9966a11ff55c895c817158fcce1b2b6700205a376403b546ad9 \ - --hash=sha256:a97bb05eb24637714a04dee85bdf0ad1941df64fe3b802ee4ac1c284a5f97b7c \ - --hash=sha256:aff40f8ac6763d05c2c8f6d240c6dac4bb92640a86d9b0c3f3fff4404f34095c \ - --hash=sha256:babe8d1eb059a53e560e7bf29f8e8f4accc8b6cfb9b5fd10e485bde77e71ef41 \ - --hash=sha256:bbceffc80aa02d9e8b99f2a7491ed8c4a783b2fc4020119dc405ca14fb5c758c \ - --hash=sha256:c59375e85126b15a90fcba3443eaac58f3073ba091f02410eaa286da9ad80ed8 \ - --hash=sha256:ca2aed95855506b7ae94e8f1f6217b7673c929e4f4f1217bcaa236253055cb36 \ - --hash=sha256:cc066cb98b912f525ae901a24cd381a656f024f76203bc85f78fcc9e66ae5aec \ - --hash=sha256:cdef9a056c222d0479a1fdb721430f9efd68268014c54e8166133d2643cb05d9 \ - --hash=sha256:d07f1b64008e39fceae7aa99e38df8385d7d24a474a8c9872645c4397b674481 \ - --hash=sha256:d639397de852f2ccfb3134b152c741406752640a266d9c1365b0f23d7b88077f \ - --hash=sha256:dff02c5c8423a657c550b48231d0a48d7e2b2e131088e55983cfe74ccc2c7cc9 \ - --hash=sha256:e952c684274a7714b3160f57ec1d78309f955c6335c04433f07d36c5eb27b1f9 \ - --hash=sha256:ea1e9e43ca56b0c12440a7c689b1350066595bebcaa83baad05b8b2675129d98 \ - --hash=sha256:f022601f3ee9e1f6658ed6d184ce27fa5216cee5b82d279e0f0bde5deebece72 \ - --hash=sha256:f0e9618630edd1910ad4f07f60d77c184b2f572c8ee43305ea3265675cbbfe7e \ - --hash=sha256:f1d6bc9c23356908db712d282acb3eebd4ae5ec6d8b696aa40342b1d84f8e9e3 \ - --hash=sha256:f4376819c1c778d59e0a31db5dc6ede854e9edf28bbfa5b756604727f7f800ac +fonttools==4.59.1 \ + --hash=sha256:01158376b8a418a0bae9625c476cebfcfcb5e6761e9d243b219cd58341e7afbb \ + --hash=sha256:02e4fdf27c550dded10fe038a5981c29f81cb9bc649ff2eaa48e80dab8998f97 \ + --hash=sha256:075f745d539a998cd92cb84c339a82e53e49114ec62aaea8307c80d3ad3aef3a \ + --hash=sha256:0b9e4fa7eaf046ed6ac470f6033d52c052481ff7a6e0a92373d14f556f298dc0 \ + --hash=sha256:1017413cdc8555dce7ee23720da490282ab7ec1cf022af90a241f33f9a49afc4 \ + --hash=sha256:1ab4c1fb45f2984b8b4a3face7cff0f67f9766e9414cbb6fd061e9d77819de98 \ + --hash=sha256:2a2d0d33307f6ad3a2086a95dd607c202ea8852fa9fb52af9b48811154d1428a \ + --hash=sha256:2aeb829ad9d41a2ef17cab8bb5d186049ba38a840f10352e654aa9062ec32dc1 \ + --hash=sha256:2beb5bfc4887a3130f8625349605a3a45fe345655ce6031d1bac11017454b943 \ + --hash=sha256:39dfd42cc2dc647b2c5469bc7a5b234d9a49e72565b96dd14ae6f11c2c59ef15 \ + --hash=sha256:412a5fd6345872a7c249dac5bcce380393f40c1c316ac07f447bc17d51900922 \ + --hash=sha256:419f16d750d78e6d704bfe97b48bba2f73b15c9418f817d0cb8a9ca87a5b94bf \ + --hash=sha256:42052b56d176f8b315fbc09259439c013c0cb2109df72447148aeda677599612 \ + --hash=sha256:43ab814bbba5f02a93a152ee61a04182bb5809bd2bc3609f7822e12c53ae2c91 \ + --hash=sha256:43d177cd0e847ea026fedd9f099dc917da136ed8792d142298a252836390c478 \ + --hash=sha256:4909cce2e35706f3d18c54d3dcce0414ba5e0fb436a454dffec459c61653b513 \ + --hash=sha256:4f04c3ffbfa0baafcbc550657cf83657034eb63304d27b05cff1653b448ccff6 \ + --hash=sha256:5265bc52ed447187d39891b5f21d7217722735d0de9fe81326566570d12851a9 \ + --hash=sha256:57a3708ca6bfccb790f585fa6d8f29432ec329618a09ff94c16bcb3c55994643 \ + --hash=sha256:58a8844f96cff35860647a65345bfca87f47a2494bfb4bef754e58c082511443 \ + --hash=sha256:5b9b4c35b3be45e5bc774d3fc9608bbf4f9a8d371103b858c80edbeed31dd5aa \ + --hash=sha256:5c6d8d773470a5107052874341ed3c487c16ecd179976d81afed89dea5cd7406 \ + --hash=sha256:5d29ab70658d2ec19422b25e6ace00a0b0ae4181ee31e03335eaef53907d2d83 \ + --hash=sha256:5f3f021cea6e36410874763f4a517a5e2d6ac36ca8f95521f3a9fdaad0fe73dc \ + --hash=sha256:6065fdec8ff44c32a483fd44abe5bcdb40dd5e2571a5034b555348f2b3a52cea \ + --hash=sha256:647db657073672a8330608970a984d51573557f328030566521bc03415535042 \ + --hash=sha256:652159e8214eb4856e8387ebcd6b6bd336ee258cbeb639c8be52005b122b9609 \ + --hash=sha256:729367c91eb1ee84e61a733acc485065a00590618ca31c438e7dd4d600c01486 \ + --hash=sha256:74995b402ad09822a4c8002438e54940d9f1ecda898d2bb057729d7da983e4cb \ + --hash=sha256:8156b11c0d5405810d216f53907bd0f8b982aa5f1e7e3127ab3be1a4062154ff \ + --hash=sha256:8387876a8011caec52d327d5e5bca705d9399ec4b17afb8b431ec50d47c17d23 \ + --hash=sha256:89d9957b54246c6251345297dddf77a84d2c19df96af30d2de24093bbdf0528b \ + --hash=sha256:8c8758a7d97848fc8b514b3d9b4cb95243714b2f838dde5e1e3c007375de6214 \ + --hash=sha256:8ee39da0227950f88626c91e219659e6cd725ede826b1c13edd85fc4cec9bbe6 \ + --hash=sha256:8f8ef66ac6db450193ed150e10b3b45dde7aded10c5d279968bc63368027f62b \ + --hash=sha256:94f9721a564978a10d5c12927f99170d18e9a32e5a727c61eae56f956a4d118b \ + --hash=sha256:a960b09ff50c2e87864e83f352e5a90bcf1ad5233df579b1124660e1643de272 \ + --hash=sha256:ac216a2980a2d2b3b88c68a24f8a9bfb203e2490e991b3238502ad8f1e7bfed0 \ + --hash=sha256:b11bc177a0d428b37890825d7d025040d591aa833f85f8d8878ed183354f47df \ + --hash=sha256:bcd52eaa5c4c593ae9f447c1d13e7e4a00ca21d755645efa660b6999425b3c88 \ + --hash=sha256:bf5fb864f80061a40c1747e0dbc4f6e738de58dd6675b07eb80bd06a93b063c4 \ + --hash=sha256:c29ea087843e27a7cffc78406d32a5abf166d92afde7890394e9e079c9b4dbe9 \ + --hash=sha256:c2b0597522d4c5bb18aa5cf258746a2d4a90f25878cbe865e4d35526abd1b9fc \ + --hash=sha256:c536f8a852e8d3fa71dde1ec03892aee50be59f7154b533f0bf3c1174cfd5126 \ + --hash=sha256:c735e385e30278c54f43a0d056736942023c9043f84ee1021eff9fd616d17693 \ + --hash=sha256:c866eef7a0ba320486ade6c32bfc12813d1a5db8567e6904fb56d3d40acc5116 \ + --hash=sha256:cf7c5089d37787387123f1cb8f1793a47c5e1e3d1e4e7bfbc1cc96e0f925eabe \ + --hash=sha256:d31dc137ed8ec71dbc446949eba9035926e6e967b90378805dcf667ff57cabb1 \ + --hash=sha256:d5c3bfdc9663f3d4b565f9cb3b8c1efb3e178186435b45105bde7328cfddd7fe \ + --hash=sha256:d601b153e51a5a6221f0d4ec077b6bfc6ac35bfe6c19aeaa233d8990b2b71726 \ + --hash=sha256:e1ca10da138c300f768bb68e40e5b20b6ecfbd95f91aac4cc15010b6b9d65455 \ + --hash=sha256:e3680884189e2b7c3549f6d304376e64711fd15118e4b1ae81940cb6b1eaa267 \ + --hash=sha256:e54437651e1440ee53a95e6ceb6ee440b67a3d348c76f45f4f48de1a5ecab019 \ + --hash=sha256:e90a89e52deb56b928e761bb5b5f65f13f669bfd96ed5962975debea09776a23 \ + --hash=sha256:e9ad4ce044e3236f0814c906ccce8647046cc557539661e35211faadf76f283b \ + --hash=sha256:ea03f1da0d722fe3c2278a05957e6550175571a4894fbf9d178ceef4a3783d2b \ + --hash=sha256:efbec204fa9f877641747f2d9612b2b656071390d7a7ef07a9dbf0ecf9c7195c \ + --hash=sha256:fb13823a74b3a9204a8ed76d3d6d5ec12e64cc5bc44914eb9ff1cdac04facd43 # via matplotlib h11==0.16.0 \ --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \ @@ -446,95 +454,116 @@ jinja2==3.1.6 \ # via # myst-parser # sphinx -jsonschema==4.23.0 \ - --hash=sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4 \ - --hash=sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566 +jsonschema==4.25.1 \ + --hash=sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63 \ + --hash=sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85 # via sphinx-needs jsonschema-specifications==2025.4.1 \ --hash=sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af \ --hash=sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608 # via jsonschema -kiwisolver==1.4.8 \ - --hash=sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50 \ - --hash=sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c \ - --hash=sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8 \ - --hash=sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc \ - --hash=sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f \ - --hash=sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79 \ - --hash=sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6 \ - --hash=sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2 \ - --hash=sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605 \ - --hash=sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09 \ - --hash=sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab \ - --hash=sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e \ - --hash=sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc \ - --hash=sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8 \ - --hash=sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7 \ - --hash=sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880 \ - --hash=sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b \ - --hash=sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b \ - --hash=sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff \ - --hash=sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3 \ - --hash=sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c \ - --hash=sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0 \ - --hash=sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6 \ - --hash=sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30 \ - --hash=sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47 \ - --hash=sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0 \ - --hash=sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1 \ - --hash=sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90 \ - --hash=sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d \ - --hash=sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b \ - --hash=sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c \ - --hash=sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a \ - --hash=sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e \ - --hash=sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc \ - --hash=sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16 \ - --hash=sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a \ - --hash=sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712 \ - --hash=sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c \ - --hash=sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3 \ - --hash=sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc \ - --hash=sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561 \ - --hash=sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d \ - --hash=sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc \ - --hash=sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db \ - --hash=sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed \ - --hash=sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751 \ - --hash=sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957 \ - --hash=sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165 \ - --hash=sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2 \ - --hash=sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476 \ - --hash=sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84 \ - --hash=sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246 \ - --hash=sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4 \ - --hash=sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25 \ - --hash=sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d \ - --hash=sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271 \ - --hash=sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb \ - --hash=sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31 \ - --hash=sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e \ - --hash=sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85 \ - --hash=sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b \ - --hash=sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7 \ - --hash=sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03 \ - --hash=sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b \ - --hash=sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d \ - --hash=sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a \ - --hash=sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d \ - --hash=sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3 \ - --hash=sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67 \ - --hash=sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f \ - --hash=sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c \ - --hash=sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502 \ - --hash=sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062 \ - --hash=sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954 \ - --hash=sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb \ - --hash=sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a \ - --hash=sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b \ - --hash=sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed \ - --hash=sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34 \ - --hash=sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794 +kiwisolver==1.4.9 \ + --hash=sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c \ + --hash=sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7 \ + --hash=sha256:0856e241c2d3df4efef7c04a1e46b1936b6120c9bcf36dd216e3acd84bc4fb21 \ + --hash=sha256:0a590506f303f512dff6b7f75fd2fd18e16943efee932008fe7140e5fa91d80e \ + --hash=sha256:0ab74e19f6a2b027ea4f845a78827969af45ce790e6cb3e1ebab71bdf9f215ff \ + --hash=sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7 \ + --hash=sha256:0e4e2bf29574a6a7b7f6cb5fa69293b9f96c928949ac4a53ba3f525dffb87f9c \ + --hash=sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26 \ + --hash=sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa \ + --hash=sha256:1a12cf6398e8a0a001a059747a1cbf24705e18fe413bc22de7b3d15c67cffe3f \ + --hash=sha256:1b11d6a633e4ed84fc0ddafd4ebfd8ea49b3f25082c04ad12b8315c11d504dc1 \ + --hash=sha256:1fa333e8b2ce4d9660f2cda9c0e1b6bafcfb2457a9d259faa82289e73ec24891 \ + --hash=sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77 \ + --hash=sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543 \ + --hash=sha256:2489e4e5d7ef9a1c300a5e0196e43d9c739f066ef23270607d45aba368b91f2d \ + --hash=sha256:24c175051354f4a28c5d6a31c93906dc653e2bf234e8a4bbfb964892078898ce \ + --hash=sha256:2635d352d67458b66fd0667c14cb1d4145e9560d503219034a18a87e971ce4f3 \ + --hash=sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60 \ + --hash=sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a \ + --hash=sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089 \ + --hash=sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab \ + --hash=sha256:40092754720b174e6ccf9e845d0d8c7d8e12c3d71e7fc35f55f3813e96376f78 \ + --hash=sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771 \ + --hash=sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f \ + --hash=sha256:497d05f29a1300d14e02e6441cf0f5ee81c1ff5a304b0d9fb77423974684e08b \ + --hash=sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14 \ + --hash=sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32 \ + --hash=sha256:4d1d9e582ad4d63062d34077a9a1e9f3c34088a2ec5135b1f7190c07cf366527 \ + --hash=sha256:52a15b0f35dad39862d376df10c5230155243a2c1a436e39eb55623ccbd68185 \ + --hash=sha256:540c7c72324d864406a009d72f5d6856f49693db95d1fbb46cf86febef873634 \ + --hash=sha256:5656aa670507437af0207645273ccdfee4f14bacd7f7c67a4306d0dcaeaf6eed \ + --hash=sha256:5a0f2724dfd4e3b3ac5a82436a8e6fd16baa7d507117e4279b660fe8ca38a3a1 \ + --hash=sha256:60c439763a969a6af93b4881db0eed8fadf93ee98e18cbc35bc8da868d0c4f0c \ + --hash=sha256:61874cdb0a36016354853593cffc38e56fc9ca5aa97d2c05d3dcf6922cd55a11 \ + --hash=sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752 \ + --hash=sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5 \ + --hash=sha256:72d0eb9fba308b8311685c2268cf7d0a0639a6cd027d8128659f72bdd8a024b4 \ + --hash=sha256:767c23ad1c58c9e827b649a9ab7809fd5fd9db266a9cf02b0e926ddc2c680d58 \ + --hash=sha256:77937e5e2a38a7b48eef0585114fe7930346993a88060d0bf886086d2aa49ef5 \ + --hash=sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198 \ + --hash=sha256:7b4da0d01ac866a57dd61ac258c5607b4cd677f63abaec7b148354d2b2cdd536 \ + --hash=sha256:7cf974dd4e35fa315563ac99d6287a1024e4dc2077b8a7d7cd3d2fb65d283134 \ + --hash=sha256:84fd60810829c27ae375114cd379da1fa65e6918e1da405f356a775d49a62bcf \ + --hash=sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2 \ + --hash=sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2 \ + --hash=sha256:85bd218b5ecfbee8c8a82e121802dcb519a86044c9c3b2e4aef02fa05c6da370 \ + --hash=sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1 \ + --hash=sha256:8fdca1def57a2e88ef339de1737a1449d6dbf5fab184c54a1fca01d541317154 \ + --hash=sha256:90f47e70293fc3688b71271100a1a5453aa9944a81d27ff779c108372cf5567b \ + --hash=sha256:92a2f997387a1b79a75e7803aa7ded2cfbe2823852ccf1ba3bcf613b62ae3197 \ + --hash=sha256:9928fe1eb816d11ae170885a74d074f57af3a0d65777ca47e9aeb854a1fba386 \ + --hash=sha256:9af39d6551f97d31a4deebeac6f45b156f9755ddc59c07b402c148f5dbb6482a \ + --hash=sha256:9cf554f21be770f5111a1690d42313e140355e687e05cf82cb23d0a721a64a48 \ + --hash=sha256:a30fd6fdef1430fd9e1ba7b3398b5ee4e2887783917a687d86ba69985fb08748 \ + --hash=sha256:a31d512c812daea6d8b3be3b2bfcbeb091dbb09177706569bcfc6240dcf8b41c \ + --hash=sha256:a5d0432ccf1c7ab14f9949eec60c5d1f924f17c037e9f8b33352fa05799359b8 \ + --hash=sha256:a60ea74330b91bd22a29638940d115df9dc00af5035a9a2a6ad9399ffb4ceca5 \ + --hash=sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999 \ + --hash=sha256:aedff62918805fb62d43a4aa2ecd4482c380dc76cd31bd7c8878588a61bd0369 \ + --hash=sha256:b34e51affded8faee0dfdb705416153819d8ea9250bbbf7ea1b249bdeb5f1122 \ + --hash=sha256:b4b4d74bda2b8ebf4da5bd42af11d02d04428b2c32846e4c2c93219df8a7987b \ + --hash=sha256:b67e6efbf68e077dd71d1a6b37e43e1a99d0bff1a3d51867d45ee8908b931098 \ + --hash=sha256:b78efa4c6e804ecdf727e580dbb9cba85624d2e1c6b5cb059c66290063bd99a9 \ + --hash=sha256:bb4ae2b57fc1d8cbd1cf7b1d9913803681ffa903e7488012be5b76dedf49297f \ + --hash=sha256:bdd1a81a1860476eb41ac4bc1e07b3f07259e6d55bbf739b79c8aaedcf512799 \ + --hash=sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028 \ + --hash=sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2 \ + --hash=sha256:bfc08add558155345129c7803b3671cf195e6a56e7a12f3dde7c57d9b417f525 \ + --hash=sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d \ + --hash=sha256:c9e7cdf45d594ee04d5be1b24dd9d49f3d1590959b2271fb30b5ca2b262c00fb \ + --hash=sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872 \ + --hash=sha256:cc9617b46837c6468197b5945e196ee9ca43057bb7d9d1ae688101e4e1dddf64 \ + --hash=sha256:ccd09f20ccdbbd341b21a67ab50a119b64a403b09288c27481575105283c1586 \ + --hash=sha256:ce6a3a4e106cf35c2d9c4fa17c05ce0b180db622736845d4315519397a77beaf \ + --hash=sha256:d0005b053977e7b43388ddec89fa567f43d4f6d5c2c0affe57de5ebf290dc552 \ + --hash=sha256:d4188e73af84ca82468f09cadc5ac4db578109e52acb4518d8154698d3a87ca2 \ + --hash=sha256:d4efec7bcf21671db6a3294ff301d2fc861c31faa3c8740d1a94689234d1b415 \ + --hash=sha256:d75aa530ccfaa593da12834b86a0724f58bff12706659baa9227c2ccaa06264c \ + --hash=sha256:d84cd4061ae292d8ac367b2c3fa3aad11cb8625a95d135fe93f286f914f3f5a6 \ + --hash=sha256:d8aacd3d4b33b772542b2e01beb50187536967b514b00003bdda7589722d2a64 \ + --hash=sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d \ + --hash=sha256:d976bbb382b202f71c67f77b0ac11244021cfa3f7dfd9e562eefcea2df711548 \ + --hash=sha256:dba5ee5d3981160c28d5490f0d1b7ed730c22470ff7f6cc26cfcfaacb9896a07 \ + --hash=sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61 \ + --hash=sha256:dd0a578400839256df88c16abddf9ba14813ec5f21362e1fe65022e00c883d4d \ + --hash=sha256:deed0c7258ceb4c44ad5ec7d9918f9f14fd05b2be86378d86cf50e63d1e7b771 \ + --hash=sha256:e09c2279a4d01f099f52d5c4b3d9e208e91edcbd1a175c9662a8b16e000fece9 \ + --hash=sha256:e2ea9f7ab7fbf18fffb1b5434ce7c69a07582f7acc7717720f1d69f3e806f90c \ + --hash=sha256:e6b93f13371d341afee3be9f7c5964e3fe61d5fa30f6a30eb49856935dfe4fc3 \ + --hash=sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16 \ + --hash=sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145 \ + --hash=sha256:ede8c6d533bc6601a47ad4046080d36b8fc99f81e6f1c17b0ac3c2dc91ac7611 \ + --hash=sha256:efb3a45b35622bb6c16dbfab491a8f5a391fe0e9d45ef32f4df85658232ca0e2 \ + --hash=sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464 \ + --hash=sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2 \ + --hash=sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04 \ + --hash=sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54 \ + --hash=sha256:f68e4f3eeca8fb22cc3d731f9715a13b652795ef657a13df1ad0c7dc0e9731df \ + --hash=sha256:fb3b8132019ea572f4611d770991000d7f58127560c4889729248eb5852a102f \ + --hash=sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1 \ + --hash=sha256:fc1795ac5cd0510207482c3d1d3ed781143383b8cfd36f5c645f3897ce066220 # via matplotlib lsprotocol==2023.0.1 \ --hash=sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2 \ @@ -610,45 +639,66 @@ markupsafe==3.0.2 \ --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 # via jinja2 -matplotlib==3.10.1 \ - --hash=sha256:01e63101ebb3014e6e9f80d9cf9ee361a8599ddca2c3e166c563628b39305dbb \ - --hash=sha256:02582304e352f40520727984a5a18f37e8187861f954fea9be7ef06569cf85b4 \ - --hash=sha256:057206ff2d6ab82ff3e94ebd94463d084760ca682ed5f150817b859372ec4401 \ - --hash=sha256:0721a3fd3d5756ed593220a8b86808a36c5031fce489adb5b31ee6dbb47dd5b2 \ - --hash=sha256:0f69dc9713e4ad2fb21a1c30e37bd445d496524257dfda40ff4a8efb3604ab5c \ - --hash=sha256:11b65088c6f3dae784bc72e8d039a2580186285f87448babb9ddb2ad0082993a \ - --hash=sha256:1985ad3d97f51307a2cbfc801a930f120def19ba22864182dacef55277102ba6 \ - --hash=sha256:19b06241ad89c3ae9469e07d77efa87041eac65d78df4fcf9cac318028009b01 \ - --hash=sha256:2589659ea30726284c6c91037216f64a506a9822f8e50592d48ac16a2f29e044 \ - --hash=sha256:35e87384ee9e488d8dd5a2dd7baf471178d38b90618d8ea147aced4ab59c9bea \ - --hash=sha256:3f06bad951eea6422ac4e8bdebcf3a70c59ea0a03338c5d2b109f57b64eb3972 \ - --hash=sha256:4c59af3e8aca75d7744b68e8e78a669e91ccbcf1ac35d0102a7b1b46883f1dd7 \ - --hash=sha256:4f0647b17b667ae745c13721602b540f7aadb2a32c5b96e924cd4fea5dcb90f1 \ - --hash=sha256:56c5d9fcd9879aa8040f196a235e2dcbdf7dd03ab5b07c0696f80bc6cf04bedd \ - --hash=sha256:5d45d3f5245be5b469843450617dcad9af75ca50568acf59997bed9311131a0b \ - --hash=sha256:648406f1899f9a818cef8c0231b44dcfc4ff36f167101c3fd1c9151f24220fdc \ - --hash=sha256:66e907a06e68cb6cfd652c193311d61a12b54f56809cafbed9736ce5ad92f107 \ - --hash=sha256:7e496c01441be4c7d5f96d4e40f7fca06e20dcb40e44c8daa2e740e1757ad9e6 \ - --hash=sha256:8e875b95ac59a7908978fe307ecdbdd9a26af7fa0f33f474a27fcf8c99f64a19 \ - --hash=sha256:8e8e25b1209161d20dfe93037c8a7f7ca796ec9aa326e6e4588d8c4a5dd1e473 \ - --hash=sha256:a144867dd6bf8ba8cb5fc81a158b645037e11b3e5cf8a50bd5f9917cb863adfe \ - --hash=sha256:a3dfb036f34873b46978f55e240cff7a239f6c4409eac62d8145bad3fc6ba5a3 \ - --hash=sha256:a97ff127f295817bc34517255c9db6e71de8eddaab7f837b7d341dee9f2f587f \ - --hash=sha256:aa3854b5f9473564ef40a41bc922be978fab217776e9ae1545c9b3a5cf2092a3 \ - --hash=sha256:bc411ebd5889a78dabbc457b3fa153203e22248bfa6eedc6797be5df0164dbf9 \ - --hash=sha256:c42eee41e1b60fd83ee3292ed83a97a5f2a8239b10c26715d8a6172226988d7b \ - --hash=sha256:c96f2c2f825d1257e437a1482c5a2cf4fee15db4261bd6fc0750f81ba2b4ba3d \ - --hash=sha256:cfd414bce89cc78a7e1d25202e979b3f1af799e416010a20ab2b5ebb3a02425c \ - --hash=sha256:d0673b4b8f131890eb3a1ad058d6e065fb3c6e71f160089b65f8515373394698 \ - --hash=sha256:d3809916157ba871bcdd33d3493acd7fe3037db5daa917ca6e77975a94cef779 \ - --hash=sha256:dc6ab14a7ab3b4d813b88ba957fc05c79493a037f54e246162033591e770de6f \ - --hash=sha256:e8d2d0e3881b129268585bf4765ad3ee73a4591d77b9a18c214ac7e3a79fb2ba \ - --hash=sha256:e9b4bb156abb8fa5e5b2b460196f7db7264fc6d62678c03457979e7d5254b7be \ - --hash=sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16 +matplotlib==3.10.5 \ + --hash=sha256:00b6feadc28a08bd3c65b2894f56cf3c94fc8f7adcbc6ab4516ae1e8ed8f62e2 \ + --hash=sha256:07442d2692c9bd1cceaa4afb4bbe5b57b98a7599de4dabfcca92d3eea70f9ebe \ + --hash=sha256:080c3676a56b8ee1c762bcf8fca3fe709daa1ee23e6ef06ad9f3fc17332f2d2a \ + --hash=sha256:160e125da27a749481eaddc0627962990f6029811dbeae23881833a011a0907f \ + --hash=sha256:1f5f3ec4c191253c5f2b7c07096a142c6a1c024d9f738247bfc8e3f9643fc975 \ + --hash=sha256:1fc0d2a3241cdcb9daaca279204a3351ce9df3c0e7e621c7e04ec28aaacaca30 \ + --hash=sha256:1ff10ea43288f0c8bab608a305dc6c918cc729d429c31dcbbecde3b9f4d5b569 \ + --hash=sha256:21a95b9bf408178d372814de7baacd61c712a62cae560b5e6f35d791776f6516 \ + --hash=sha256:27f52634315e96b1debbfdc5c416592edcd9c4221bc2f520fd39c33db5d9f202 \ + --hash=sha256:2efaf97d72629e74252e0b5e3c46813e9eeaa94e011ecf8084a971a31a97f40b \ + --hash=sha256:33775bbeb75528555a15ac29396940128ef5613cf9a2d31fb1bfd18b3c0c0903 \ + --hash=sha256:352ed6ccfb7998a00881692f38b4ca083c691d3e275b4145423704c34c909076 \ + --hash=sha256:354204db3f7d5caaa10e5de74549ef6a05a4550fdd1c8f831ab9bca81efd39ed \ + --hash=sha256:3967424121d3a46705c9fa9bdb0931de3228f13f73d7bb03c999c88343a89d89 \ + --hash=sha256:3b80eb8621331449fc519541a7461987f10afa4f9cfd91afcd2276ebe19bd56c \ + --hash=sha256:47a388908e469d6ca2a6015858fa924e0e8a2345a37125948d8e93a91c47933e \ + --hash=sha256:48fe6d47380b68a37ccfcc94f009530e84d41f71f5dae7eda7c4a5a84aa0a674 \ + --hash=sha256:4b4984d5064a35b6f66d2c11d668565f4389b1119cc64db7a4c1725bc11adffc \ + --hash=sha256:4fa40a8f98428f789a9dcacd625f59b7bc4e3ef6c8c7c80187a7a709475cf592 \ + --hash=sha256:525f6e28c485c769d1f07935b660c864de41c37fd716bfa64158ea646f7084bb \ + --hash=sha256:52c6573dfcb7726a9907b482cd5b92e6b5499b284ffacb04ffbfe06b3e568124 \ + --hash=sha256:56da3b102cf6da2776fef3e71cd96fcf22103a13594a18ac9a9b31314e0be154 \ + --hash=sha256:5d4773a6d1c106ca05cb5a5515d277a6bb96ed09e5c8fab6b7741b8fcaa62c8f \ + --hash=sha256:64c4535419d5617f7363dad171a5a59963308e0f3f813c4bed6c9e6e2c131512 \ + --hash=sha256:6c49465bf689c4d59d174d0c7795fb42a21d4244d11d70e52b8011987367ac61 \ + --hash=sha256:707f9c292c4cd4716f19ab8a1f93f26598222cd931e0cd98fbbb1c5994bf7667 \ + --hash=sha256:77fab633e94b9da60512d4fa0213daeb76d5a7b05156840c4fd0399b4b818837 \ + --hash=sha256:7e44cada61bec8833c106547786814dd4a266c1b2964fd25daa3804f1b8d4467 \ + --hash=sha256:8a8da0453a7fd8e3da114234ba70c5ba9ef0e98f190309ddfde0f089accd46ea \ + --hash=sha256:8b6b49167d208358983ce26e43aa4196073b4702858670f2eb111f9a10652b4b \ + --hash=sha256:8dee65cb1424b7dc982fe87895b5613d4e691cc57117e8af840da0148ca6c1d7 \ + --hash=sha256:903352681b59f3efbf4546985142a9686ea1d616bb054b09a537a06e4b892ccf \ + --hash=sha256:94986a242747a0605cb3ff1cb98691c736f28a59f8ffe5175acaeb7397c49a5a \ + --hash=sha256:95672a5d628b44207aab91ec20bf59c26da99de12b88f7e0b1fb0a84a86ff959 \ + --hash=sha256:96ef8f5a3696f20f55597ffa91c28e2e73088df25c555f8d4754931515512715 \ + --hash=sha256:97b9d6443419085950ee4a5b1ee08c363e5c43d7176e55513479e53669e88468 \ + --hash=sha256:a17e57e33de901d221a07af32c08870ed4528db0b6059dce7d7e65c1122d4bea \ + --hash=sha256:a23193db2e9d64ece69cac0c8231849db7dd77ce59c7b89948cf9d0ce655a3ce \ + --hash=sha256:a277033048ab22d34f88a3c5243938cef776493f6201a8742ed5f8b553201343 \ + --hash=sha256:a41bcb6e2c8e79dc99c5511ae6f7787d2fb52efd3d805fff06d5d4f667db16b2 \ + --hash=sha256:a6b310f95e1102a8c7c817ef17b60ee5d1851b8c71b63d9286b66b177963039e \ + --hash=sha256:ac3d50760394d78a3c9be6b28318fe22b494c4fcf6407e8fd4794b538251899b \ + --hash=sha256:b072aac0c3ad563a2b3318124756cb6112157017f7431626600ecbe890df57a1 \ + --hash=sha256:b5fa2e941f77eb579005fb804026f9d0a1082276118d01cc6051d0d9626eaa7f \ + --hash=sha256:ba6c3c9c067b83481d647af88b4e441d532acdb5ef22178a14935b0b881188f4 \ + --hash=sha256:c04cba0f93d40e45b3c187c6c52c17f24535b27d545f757a2fffebc06c12b98b \ + --hash=sha256:c61333a8e5e6240e73769d5826b9a31d8b22df76c0778f8480baf1b4b01c9420 \ + --hash=sha256:ceefe5d40807d29a66ae916c6a3915d60ef9f028ce1927b84e727be91d884369 \ + --hash=sha256:d52fd5b684d541b5a51fb276b2b97b010c75bee9aa392f96b4a07aeb491e33c7 \ + --hash=sha256:dc88af74e7ba27de6cbe6faee916024ea35d895ed3d61ef6f58c4ce97da7185a \ + --hash=sha256:dcfc39c452c6a9f9028d3e44d2d721484f665304857188124b505b2c95e1eecf \ + --hash=sha256:e4a6470a118a2e93022ecc7d3bd16b3114b2004ea2bf014fff875b3bc99b70c6 \ + --hash=sha256:ee7a09ae2f4676276f5a65bd9f2bd91b4f9fbaedf49f40267ce3f9b448de501f \ + --hash=sha256:ee98a5c5344dc7f48dc261b6ba5d9900c008fc12beb3fa6ebda81273602cc389 \ + --hash=sha256:f6adb644c9d040ffb0d3434e440490a66cf73dbfa118a6f79cd7568431f7a012 # via sphinx-needs -mdit-py-plugins==0.4.2 \ - --hash=sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636 \ - --hash=sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5 +mdit-py-plugins==0.5.0 \ + --hash=sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f \ + --hash=sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6 # via myst-parser mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ @@ -671,62 +721,81 @@ nodejs-wheel-binaries==22.16.0 \ # via # -r external/score_tooling+/python_basics/requirements.txt # basedpyright -numpy==2.2.5 \ - --hash=sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70 \ - --hash=sha256:02f226baeefa68f7d579e213d0f3493496397d8f1cff5e2b222af274c86a552a \ - --hash=sha256:059b51b658f4414fff78c6d7b1b4e18283ab5fa56d270ff212d5ba0c561846f4 \ - --hash=sha256:0bcb1d057b7571334139129b7f941588f69ce7c4ed15a9d6162b2ea54ded700c \ - --hash=sha256:0cd48122a6b7eab8f06404805b1bd5856200e3ed6f8a1b9a194f9d9054631beb \ - --hash=sha256:19f4718c9012e3baea91a7dba661dcab2451cda2550678dc30d53acb91a7290f \ - --hash=sha256:1a161c2c79ab30fe4501d5a2bbfe8b162490757cf90b7f05be8b80bc02f7bb8e \ - --hash=sha256:1f4a922da1729f4c40932b2af4fe84909c7a6e167e6e99f71838ce3a29f3fe26 \ - --hash=sha256:261a1ef047751bb02f29dfe337230b5882b54521ca121fc7f62668133cb119c9 \ - --hash=sha256:262d23f383170f99cd9191a7c85b9a50970fe9069b2f8ab5d786eca8a675d60b \ - --hash=sha256:2ba321813a00e508d5421104464510cc962a6f791aa2fca1c97b1e65027da80d \ - --hash=sha256:2c1a1c6ccce4022383583a6ded7bbcda22fc635eb4eb1e0a053336425ed36dfa \ - --hash=sha256:352d330048c055ea6db701130abc48a21bec690a8d38f8284e00fab256dc1376 \ - --hash=sha256:369e0d4647c17c9363244f3468f2227d557a74b6781cb62ce57cf3ef5cc7c610 \ - --hash=sha256:36ab5b23915887543441efd0417e6a3baa08634308894316f446027611b53bf1 \ - --hash=sha256:37e32e985f03c06206582a7323ef926b4e78bdaa6915095ef08070471865b906 \ - --hash=sha256:3a801fef99668f309b88640e28d261991bfad9617c27beda4a3aec4f217ea073 \ - --hash=sha256:3d14b17b9be5f9c9301f43d2e2a4886a33b53f4e6fdf9ca2f4cc60aeeee76372 \ - --hash=sha256:422cc684f17bc963da5f59a31530b3936f57c95a29743056ef7a7903a5dbdf88 \ - --hash=sha256:4520caa3807c1ceb005d125a75e715567806fed67e315cea619d5ec6e75a4191 \ - --hash=sha256:47834cde750d3c9f4e52c6ca28a7361859fcaf52695c7dc3cc1a720b8922683e \ - --hash=sha256:47f9ed103af0bc63182609044b0490747e03bd20a67e391192dde119bf43d52f \ - --hash=sha256:498815b96f67dc347e03b719ef49c772589fb74b8ee9ea2c37feae915ad6ebda \ - --hash=sha256:54088a5a147ab71a8e7fdfd8c3601972751ded0739c6b696ad9cb0343e21ab73 \ - --hash=sha256:55f09e00d4dccd76b179c0f18a44f041e5332fd0e022886ba1c0bbf3ea4a18d0 \ - --hash=sha256:5a0ac90e46fdb5649ab6369d1ab6104bfe5854ab19b645bf5cda0127a13034ae \ - --hash=sha256:6411f744f7f20081b1b4e7112e0f4c9c5b08f94b9f086e6f0adf3645f85d3a4d \ - --hash=sha256:6413d48a9be53e183eb06495d8e3b006ef8f87c324af68241bbe7a39e8ff54c3 \ - --hash=sha256:7451f92eddf8503c9b8aa4fe6aa7e87fd51a29c2cfc5f7dbd72efde6c65acf57 \ - --hash=sha256:8b4c0773b6ada798f51f0f8e30c054d32304ccc6e9c5d93d46cb26f3d385ab19 \ - --hash=sha256:8dfa94b6a4374e7851bbb6f35e6ded2120b752b063e6acdd3157e4d2bb922eba \ - --hash=sha256:97c8425d4e26437e65e1d189d22dff4a079b747ff9c2788057bfb8114ce1e133 \ - --hash=sha256:9d75f338f5f79ee23548b03d801d28a505198297534f62416391857ea0479571 \ - --hash=sha256:9de6832228f617c9ef45d948ec1cd8949c482238d68b2477e6f642c33a7b0a54 \ - --hash=sha256:a4cbdef3ddf777423060c6f81b5694bad2dc9675f110c4b2a60dc0181543fac7 \ - --hash=sha256:a9c0d994680cd991b1cb772e8b297340085466a6fe964bc9d4e80f5e2f43c291 \ - --hash=sha256:aa70fdbdc3b169d69e8c59e65c07a1c9351ceb438e627f0fdcd471015cd956be \ - --hash=sha256:abe38cd8381245a7f49967a6010e77dbf3680bd3627c0fe4362dd693b404c7f8 \ - --hash=sha256:b13f04968b46ad705f7c8a80122a42ae8f620536ea38cf4bdd374302926424dd \ - --hash=sha256:b4ea7e1cff6784e58fe281ce7e7f05036b3e1c89c6f922a6bfbc0a7e8768adbe \ - --hash=sha256:b6f91524d31b34f4a5fee24f5bc16dcd1491b668798b6d85585d836c1e633a6a \ - --hash=sha256:c26843fd58f65da9491165072da2cccc372530681de481ef670dcc8e27cfb066 \ - --hash=sha256:c42365005c7a6c42436a54d28c43fe0e01ca11eb2ac3cefe796c25a5f98e5e9b \ - --hash=sha256:c8b82a55ef86a2d8e81b63da85e55f5537d2157165be1cb2ce7cfa57b6aef38b \ - --hash=sha256:ced69262a8278547e63409b2653b372bf4baff0870c57efa76c5703fd6543282 \ - --hash=sha256:d2e3bdadaba0e040d1e7ab39db73e0afe2c74ae277f5614dad53eadbecbbb169 \ - --hash=sha256:d403c84991b5ad291d3809bace5e85f4bbf44a04bdc9a88ed2bb1807b3360bb8 \ - --hash=sha256:d7543263084a85fbc09c704b515395398d31d6395518446237eac219eab9e55e \ - --hash=sha256:d8882a829fd779f0f43998e931c466802a77ca1ee0fe25a3abe50278616b1471 \ - --hash=sha256:e4f0b035d9d0ed519c813ee23e0a733db81ec37d2e9503afbb6e54ccfdee0fa7 \ - --hash=sha256:e8b025c351b9f0e8b5436cf28a07fa4ac0204d67b38f01433ac7f9b870fa38c6 \ - --hash=sha256:eb7fd5b184e5d277afa9ec0ad5e4eb562ecff541e7f60e69ee69c8d59e9aeaba \ - --hash=sha256:ec31367fd6a255dc8de4772bd1658c3e926d8e860a0b6e922b615e532d320ddc \ - --hash=sha256:ee461a4eaab4f165b68780a6a1af95fb23a29932be7569b9fab666c407969051 \ - --hash=sha256:f5045039100ed58fa817a6227a356240ea1b9a1bc141018864c306c1a16d4175 +numpy==2.3.2 \ + --hash=sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5 \ + --hash=sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b \ + --hash=sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631 \ + --hash=sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58 \ + --hash=sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b \ + --hash=sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc \ + --hash=sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089 \ + --hash=sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf \ + --hash=sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15 \ + --hash=sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f \ + --hash=sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3 \ + --hash=sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170 \ + --hash=sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910 \ + --hash=sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91 \ + --hash=sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45 \ + --hash=sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c \ + --hash=sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f \ + --hash=sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b \ + --hash=sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89 \ + --hash=sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a \ + --hash=sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220 \ + --hash=sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e \ + --hash=sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab \ + --hash=sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2 \ + --hash=sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b \ + --hash=sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370 \ + --hash=sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2 \ + --hash=sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee \ + --hash=sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619 \ + --hash=sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712 \ + --hash=sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1 \ + --hash=sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec \ + --hash=sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a \ + --hash=sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450 \ + --hash=sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a \ + --hash=sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2 \ + --hash=sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168 \ + --hash=sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2 \ + --hash=sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73 \ + --hash=sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296 \ + --hash=sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9 \ + --hash=sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125 \ + --hash=sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0 \ + --hash=sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19 \ + --hash=sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b \ + --hash=sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f \ + --hash=sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2 \ + --hash=sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f \ + --hash=sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a \ + --hash=sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6 \ + --hash=sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286 \ + --hash=sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981 \ + --hash=sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f \ + --hash=sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2 \ + --hash=sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0 \ + --hash=sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b \ + --hash=sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b \ + --hash=sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56 \ + --hash=sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5 \ + --hash=sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3 \ + --hash=sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8 \ + --hash=sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0 \ + --hash=sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036 \ + --hash=sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6 \ + --hash=sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8 \ + --hash=sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48 \ + --hash=sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07 \ + --hash=sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b \ + --hash=sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b \ + --hash=sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d \ + --hash=sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0 \ + --hash=sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097 \ + --hash=sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be \ + --hash=sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5 # via # contourpy # matplotlib @@ -738,92 +807,117 @@ packaging==25.0 \ # matplotlib # pytest # sphinx -pillow==11.2.1 \ - --hash=sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928 \ - --hash=sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b \ - --hash=sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91 \ - --hash=sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97 \ - --hash=sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4 \ - --hash=sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193 \ - --hash=sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95 \ - --hash=sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941 \ - --hash=sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f \ - --hash=sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f \ - --hash=sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3 \ - --hash=sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044 \ - --hash=sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb \ - --hash=sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681 \ - --hash=sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d \ - --hash=sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2 \ - --hash=sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb \ - --hash=sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d \ - --hash=sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406 \ - --hash=sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70 \ - --hash=sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79 \ - --hash=sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e \ - --hash=sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013 \ - --hash=sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d \ - --hash=sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2 \ - --hash=sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36 \ - --hash=sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7 \ - --hash=sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751 \ - --hash=sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c \ - --hash=sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c \ - --hash=sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c \ - --hash=sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b \ - --hash=sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1 \ - --hash=sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd \ - --hash=sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8 \ - --hash=sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691 \ - --hash=sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14 \ - --hash=sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b \ - --hash=sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f \ - --hash=sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0 \ - --hash=sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed \ - --hash=sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0 \ - --hash=sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909 \ - --hash=sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22 \ - --hash=sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788 \ - --hash=sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16 \ - --hash=sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156 \ - --hash=sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad \ - --hash=sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076 \ - --hash=sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7 \ - --hash=sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e \ - --hash=sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6 \ - --hash=sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772 \ - --hash=sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155 \ - --hash=sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830 \ - --hash=sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67 \ - --hash=sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4 \ - --hash=sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61 \ - --hash=sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8 \ - --hash=sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01 \ - --hash=sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e \ - --hash=sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1 \ - --hash=sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d \ - --hash=sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579 \ - --hash=sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6 \ - --hash=sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1 \ - --hash=sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7 \ - --hash=sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047 \ - --hash=sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443 \ - --hash=sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a \ - --hash=sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf \ - --hash=sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd \ - --hash=sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193 \ - --hash=sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600 \ - --hash=sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c \ - --hash=sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363 \ - --hash=sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e \ - --hash=sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35 \ - --hash=sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9 \ - --hash=sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28 \ - --hash=sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b +pillow==11.3.0 \ + --hash=sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2 \ + --hash=sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214 \ + --hash=sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e \ + --hash=sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59 \ + --hash=sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50 \ + --hash=sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632 \ + --hash=sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06 \ + --hash=sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a \ + --hash=sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51 \ + --hash=sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced \ + --hash=sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f \ + --hash=sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12 \ + --hash=sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8 \ + --hash=sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6 \ + --hash=sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580 \ + --hash=sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f \ + --hash=sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac \ + --hash=sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860 \ + --hash=sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd \ + --hash=sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722 \ + --hash=sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8 \ + --hash=sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4 \ + --hash=sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673 \ + --hash=sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788 \ + --hash=sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542 \ + --hash=sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e \ + --hash=sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd \ + --hash=sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8 \ + --hash=sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523 \ + --hash=sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967 \ + --hash=sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809 \ + --hash=sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477 \ + --hash=sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027 \ + --hash=sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae \ + --hash=sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b \ + --hash=sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c \ + --hash=sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f \ + --hash=sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e \ + --hash=sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b \ + --hash=sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7 \ + --hash=sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27 \ + --hash=sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361 \ + --hash=sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae \ + --hash=sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d \ + --hash=sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc \ + --hash=sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58 \ + --hash=sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad \ + --hash=sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6 \ + --hash=sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024 \ + --hash=sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978 \ + --hash=sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb \ + --hash=sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d \ + --hash=sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0 \ + --hash=sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9 \ + --hash=sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f \ + --hash=sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874 \ + --hash=sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa \ + --hash=sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081 \ + --hash=sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149 \ + --hash=sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6 \ + --hash=sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d \ + --hash=sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd \ + --hash=sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f \ + --hash=sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c \ + --hash=sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31 \ + --hash=sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e \ + --hash=sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db \ + --hash=sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6 \ + --hash=sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f \ + --hash=sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494 \ + --hash=sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69 \ + --hash=sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94 \ + --hash=sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77 \ + --hash=sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d \ + --hash=sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7 \ + --hash=sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a \ + --hash=sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438 \ + --hash=sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288 \ + --hash=sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b \ + --hash=sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635 \ + --hash=sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3 \ + --hash=sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d \ + --hash=sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe \ + --hash=sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0 \ + --hash=sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe \ + --hash=sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a \ + --hash=sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805 \ + --hash=sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8 \ + --hash=sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36 \ + --hash=sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a \ + --hash=sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b \ + --hash=sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e \ + --hash=sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25 \ + --hash=sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12 \ + --hash=sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada \ + --hash=sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c \ + --hash=sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71 \ + --hash=sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d \ + --hash=sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c \ + --hash=sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6 \ + --hash=sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1 \ + --hash=sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50 \ + --hash=sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653 \ + --hash=sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c \ + --hash=sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4 \ + --hash=sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3 # via matplotlib -platformdirs==4.3.7 \ - --hash=sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94 \ - --hash=sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351 +platformdirs==4.3.8 \ + --hash=sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc \ + --hash=sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4 # via esbonio pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ @@ -839,17 +933,17 @@ pydata-sphinx-theme==0.16.1 \ --hash=sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde \ --hash=sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7 # via -r src/requirements.in -pygithub==2.6.1 \ - --hash=sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3 \ - --hash=sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf +pygithub==2.7.0 \ + --hash=sha256:40ecbfe26dc55cc34ab4b0ffa1d455e6f816ef9a2bc8d6f5ad18ce572f163700 \ + --hash=sha256:7cd6eafabb09b5369afba3586d86b1f1ad6f1326d2ff01bc47bb26615dce4cbb # via -r src/requirements.in pygls==1.3.1 \ --hash=sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018 \ --hash=sha256:6e00f11efc56321bdeb6eac04f6d86131f654c7d49124344a9ebb968da3dd91e # via esbonio -pygments==2.19.1 \ - --hash=sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f \ - --hash=sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c +pygments==2.19.2 \ + --hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \ + --hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b # via # accessible-pygments # pydata-sphinx-theme @@ -875,9 +969,9 @@ pyparsing==3.2.3 \ --hash=sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf \ --hash=sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be # via matplotlib -pyspellchecker==0.8.2 \ - --hash=sha256:2b026be14a162ba810bdda8e5454c56e364f42d3b9e14aeff31706e5ebcdc78f \ - --hash=sha256:4fee22e1859c5153c3bc3953ac3041bf07d4541520b7e01901e955062022290a +pyspellchecker==0.8.3 \ + --hash=sha256:cb06eeafe124837f321e0d02f8e21deab713e966e28e0360319a28a089c43978 \ + --hash=sha256:e993076e98b0da5a99b7cc31085c3022c77a9dc37c5e95f5cf6304b5dbb8b9d2 # via esbonio pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ @@ -950,9 +1044,9 @@ referencing==0.36.2 \ # via # jsonschema # jsonschema-specifications -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 +requests==2.32.5 \ + --hash=sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6 \ + --hash=sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf # via # pygithub # requests-file @@ -962,135 +1056,176 @@ requests-file==2.1.0 \ --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c # via sphinx-needs -rich==14.0.0 \ - --hash=sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0 \ - --hash=sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725 +rich==14.1.0 \ + --hash=sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f \ + --hash=sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8 # via -r src/requirements.in roman-numerals-py==3.1.0 \ --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d # via sphinx -rpds-py==0.24.0 \ - --hash=sha256:0047638c3aa0dbcd0ab99ed1e549bbf0e142c9ecc173b6492868432d8989a046 \ - --hash=sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724 \ - --hash=sha256:041f00419e1da7a03c46042453598479f45be3d787eb837af382bfc169c0db33 \ - --hash=sha256:04ecf5c1ff4d589987b4d9882872f80ba13da7d42427234fce8f22efb43133bc \ - --hash=sha256:04f2b712a2206e13800a8136b07aaedc23af3facab84918e7aa89e4be0260032 \ - --hash=sha256:0aeb3329c1721c43c58cae274d7d2ca85c1690d89485d9c63a006cb79a85771a \ - --hash=sha256:0e374c0ce0ca82e5b67cd61fb964077d40ec177dd2c4eda67dba130de09085c7 \ - --hash=sha256:0f00c16e089282ad68a3820fd0c831c35d3194b7cdc31d6e469511d9bffc535c \ - --hash=sha256:174e46569968ddbbeb8a806d9922f17cd2b524aa753b468f35b97ff9c19cb718 \ - --hash=sha256:1b221c2457d92a1fb3c97bee9095c874144d196f47c038462ae6e4a14436f7bc \ - --hash=sha256:208b3a70a98cf3710e97cabdc308a51cd4f28aa6e7bb11de3d56cd8b74bab98d \ - --hash=sha256:20f2712bd1cc26a3cc16c5a1bfee9ed1abc33d4cdf1aabd297fe0eb724df4272 \ - --hash=sha256:24795c099453e3721fda5d8ddd45f5dfcc8e5a547ce7b8e9da06fecc3832e26f \ - --hash=sha256:2a0f156e9509cee987283abd2296ec816225145a13ed0391df8f71bf1d789e2d \ - --hash=sha256:2b2356688e5d958c4d5cb964af865bea84db29971d3e563fb78e46e20fe1848b \ - --hash=sha256:2c13777ecdbbba2077670285dd1fe50828c8742f6a4119dbef6f83ea13ad10fb \ - --hash=sha256:2d3ee4615df36ab8eb16c2507b11e764dcc11fd350bbf4da16d09cda11fcedef \ - --hash=sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b \ - --hash=sha256:32bab0a56eac685828e00cc2f5d1200c548f8bc11f2e44abf311d6b548ce2e45 \ - --hash=sha256:34d90ad8c045df9a4259c47d2e16a3f21fdb396665c94520dbfe8766e62187a4 \ - --hash=sha256:369d9c6d4c714e36d4a03957b4783217a3ccd1e222cdd67d464a3a479fc17796 \ - --hash=sha256:3a55fc10fdcbf1a4bd3c018eea422c52cf08700cf99c28b5cb10fe97ab77a0d3 \ - --hash=sha256:3d2d8e4508e15fc05b31285c4b00ddf2e0eb94259c2dc896771966a163122a0c \ - --hash=sha256:3fab5f4a2c64a8fb64fc13b3d139848817a64d467dd6ed60dcdd6b479e7febc9 \ - --hash=sha256:43dba99f00f1d37b2a0265a259592d05fcc8e7c19d140fe51c6e6f16faabeb1f \ - --hash=sha256:44d51febb7a114293ffd56c6cf4736cb31cd68c0fddd6aa303ed09ea5a48e029 \ - --hash=sha256:493fe54318bed7d124ce272fc36adbf59d46729659b2c792e87c3b95649cdee9 \ - --hash=sha256:4b28e5122829181de1898c2c97f81c0b3246d49f585f22743a1246420bb8d399 \ - --hash=sha256:4cd031e63bc5f05bdcda120646a0d32f6d729486d0067f09d79c8db5368f4586 \ - --hash=sha256:528927e63a70b4d5f3f5ccc1fa988a35456eb5d15f804d276709c33fc2f19bda \ - --hash=sha256:564c96b6076a98215af52f55efa90d8419cc2ef45d99e314fddefe816bc24f91 \ - --hash=sha256:5db385bacd0c43f24be92b60c857cf760b7f10d8234f4bd4be67b5b20a7c0b6b \ - --hash=sha256:5ef877fa3bbfb40b388a5ae1cb00636a624690dcb9a29a65267054c9ea86d88a \ - --hash=sha256:5f6e3cec44ba05ee5cbdebe92d052f69b63ae792e7d05f1020ac5e964394080c \ - --hash=sha256:5fc13b44de6419d1e7a7e592a4885b323fbc2f46e1f22151e3a8ed3b8b920405 \ - --hash=sha256:60748789e028d2a46fc1c70750454f83c6bdd0d05db50f5ae83e2db500b34da5 \ - --hash=sha256:60d9b630c8025b9458a9d114e3af579a2c54bd32df601c4581bd054e85258143 \ - --hash=sha256:619ca56a5468f933d940e1bf431c6f4e13bef8e688698b067ae68eb4f9b30e3a \ - --hash=sha256:630d3d8ea77eabd6cbcd2ea712e1c5cecb5b558d39547ac988351195db433f6c \ - --hash=sha256:63981feca3f110ed132fd217bf7768ee8ed738a55549883628ee3da75bb9cb78 \ - --hash=sha256:66420986c9afff67ef0c5d1e4cdc2d0e5262f53ad11e4f90e5e22448df485bf0 \ - --hash=sha256:675269d407a257b8c00a6b58205b72eec8231656506c56fd429d924ca00bb350 \ - --hash=sha256:6a4a535013aeeef13c5532f802708cecae8d66c282babb5cd916379b72110cf7 \ - --hash=sha256:6a727fd083009bc83eb83d6950f0c32b3c94c8b80a9b667c87f4bd1274ca30ba \ - --hash=sha256:6e1daf5bf6c2be39654beae83ee6b9a12347cb5aced9a29eecf12a2d25fff664 \ - --hash=sha256:6eea559077d29486c68218178ea946263b87f1c41ae7f996b1f30a983c476a5a \ - --hash=sha256:75a810b7664c17f24bf2ffd7f92416c00ec84b49bb68e6a0d93e542406336b56 \ - --hash=sha256:772cc1b2cd963e7e17e6cc55fe0371fb9c704d63e44cacec7b9b7f523b78919e \ - --hash=sha256:78884d155fd15d9f64f5d6124b486f3d3f7fd7cd71a78e9670a0f6f6ca06fb2d \ - --hash=sha256:79e8d804c2ccd618417e96720ad5cd076a86fa3f8cb310ea386a3e6229bae7d1 \ - --hash=sha256:7e80d375134ddb04231a53800503752093dbb65dad8dabacce2c84cccc78e964 \ - --hash=sha256:8097b3422d020ff1c44effc40ae58e67d93e60d540a65649d2cdaf9466030791 \ - --hash=sha256:8205ee14463248d3349131bb8099efe15cd3ce83b8ef3ace63c7e976998e7124 \ - --hash=sha256:8212ff58ac6dfde49946bea57474a386cca3f7706fc72c25b772b9ca4af6b79e \ - --hash=sha256:823e74ab6fbaa028ec89615ff6acb409e90ff45580c45920d4dfdddb069f2120 \ - --hash=sha256:84e0566f15cf4d769dade9b366b7b87c959be472c92dffb70462dd0844d7cbad \ - --hash=sha256:896c41007931217a343eff197c34513c154267636c8056fb409eafd494c3dcdc \ - --hash=sha256:8aa362811ccdc1f8dadcc916c6d47e554169ab79559319ae9fae7d7752d0d60c \ - --hash=sha256:8b3b397eefecec8e8e39fa65c630ef70a24b09141a6f9fc17b3c3a50bed6b50e \ - --hash=sha256:8ebc7e65ca4b111d928b669713865f021b7773350eeac4a31d3e70144297baba \ - --hash=sha256:9168764133fd919f8dcca2ead66de0105f4ef5659cbb4fa044f7014bed9a1797 \ - --hash=sha256:921ae54f9ecba3b6325df425cf72c074cd469dea843fb5743a26ca7fb2ccb149 \ - --hash=sha256:92558d37d872e808944c3c96d0423b8604879a3d1c86fdad508d7ed91ea547d5 \ - --hash=sha256:951cc481c0c395c4a08639a469d53b7d4afa252529a085418b82a6b43c45c240 \ - --hash=sha256:998c01b8e71cf051c28f5d6f1187abbdf5cf45fc0efce5da6c06447cba997034 \ - --hash=sha256:9abc80fe8c1f87218db116016de575a7998ab1629078c90840e8d11ab423ee25 \ - --hash=sha256:9be4f99bee42ac107870c61dfdb294d912bf81c3c6d45538aad7aecab468b6b7 \ - --hash=sha256:9c39438c55983d48f4bb3487734d040e22dad200dab22c41e331cee145e7a50d \ - --hash=sha256:9d7e8ce990ae17dda686f7e82fd41a055c668e13ddcf058e7fb5e9da20b57793 \ - --hash=sha256:9ea7f4174d2e4194289cb0c4e172d83e79a6404297ff95f2875cf9ac9bced8ba \ - --hash=sha256:a18fc371e900a21d7392517c6f60fe859e802547309e94313cd8181ad9db004d \ - --hash=sha256:a36b452abbf29f68527cf52e181fced56685731c86b52e852053e38d8b60bc8d \ - --hash=sha256:a5b66d1b201cc71bc3081bc2f1fc36b0c1f268b773e03bbc39066651b9e18391 \ - --hash=sha256:a824d2c7a703ba6daaca848f9c3d5cb93af0505be505de70e7e66829affd676e \ - --hash=sha256:a88c0d17d039333a41d9bf4616bd062f0bd7aa0edeb6cafe00a2fc2a804e944f \ - --hash=sha256:aa6800adc8204ce898c8a424303969b7aa6a5e4ad2789c13f8648739830323b7 \ - --hash=sha256:aad911555286884be1e427ef0dc0ba3929e6821cbeca2194b13dc415a462c7fd \ - --hash=sha256:afc6e35f344490faa8276b5f2f7cbf71f88bc2cda4328e00553bd451728c571f \ - --hash=sha256:b9a4df06c35465ef4d81799999bba810c68d29972bf1c31db61bfdb81dd9d5bb \ - --hash=sha256:bb2954155bb8f63bb19d56d80e5e5320b61d71084617ed89efedb861a684baea \ - --hash=sha256:bbc4362e06f950c62cad3d4abf1191021b2ffaf0b31ac230fbf0526453eee75e \ - --hash=sha256:c0145295ca415668420ad142ee42189f78d27af806fcf1f32a18e51d47dd2052 \ - --hash=sha256:c30ff468163a48535ee7e9bf21bd14c7a81147c0e58a36c1078289a8ca7af0bd \ - --hash=sha256:c347a20d79cedc0a7bd51c4d4b7dbc613ca4e65a756b5c3e57ec84bd43505b47 \ - --hash=sha256:c43583ea8517ed2e780a345dd9960896afc1327e8cf3ac8239c167530397440d \ - --hash=sha256:c61a2cb0085c8783906b2f8b1f16a7e65777823c7f4d0a6aaffe26dc0d358dd9 \ - --hash=sha256:c9ca89938dff18828a328af41ffdf3902405a19f4131c88e22e776a8e228c5a8 \ - --hash=sha256:cc31e13ce212e14a539d430428cd365e74f8b2d534f8bc22dd4c9c55b277b875 \ - --hash=sha256:cdabcd3beb2a6dca7027007473d8ef1c3b053347c76f685f5f060a00327b8b65 \ - --hash=sha256:cf86f72d705fc2ef776bb7dd9e5fbba79d7e1f3e258bf9377f8204ad0fc1c51e \ - --hash=sha256:d09dc82af2d3c17e7dd17120b202a79b578d79f2b5424bda209d9966efeed114 \ - --hash=sha256:d3aa13bdf38630da298f2e0d77aca967b200b8cc1473ea05248f6c5e9c9bdb44 \ - --hash=sha256:d69d003296df4840bd445a5d15fa5b6ff6ac40496f956a221c4d1f6f7b4bc4d9 \ - --hash=sha256:d6e109a454412ab82979c5b1b3aee0604eca4bbf9a02693bb9df027af2bfa91a \ - --hash=sha256:d8551e733626afec514b5d15befabea0dd70a343a9f23322860c4f16a9430205 \ - --hash=sha256:d8754d872a5dfc3c5bf9c0e059e8107451364a30d9fd50f1f1a85c4fb9481164 \ - --hash=sha256:d8f9a6e7fd5434817526815f09ea27f2746c4a51ee11bb3439065f5fc754db58 \ - --hash=sha256:dbcbb6db5582ea33ce46a5d20a5793134b5365110d84df4e30b9d37c6fd40ad3 \ - --hash=sha256:e0f3ef95795efcd3b2ec3fe0a5bcfb5dadf5e3996ea2117427e524d4fbf309c6 \ - --hash=sha256:e13ae74a8a3a0c2f22f450f773e35f893484fcfacb00bb4344a7e0f4f48e1f97 \ - --hash=sha256:e274f62cbd274359eff63e5c7e7274c913e8e09620f6a57aae66744b3df046d6 \ - --hash=sha256:e838bf2bb0b91ee67bf2b889a1a841e5ecac06dd7a2b1ef4e6151e2ce155c7ae \ - --hash=sha256:e8acd55bd5b071156bae57b555f5d33697998752673b9de554dd82f5b5352727 \ - --hash=sha256:e8e5ab32cf9eb3647450bc74eb201b27c185d3857276162c101c0f8c6374e098 \ - --hash=sha256:ebcb786b9ff30b994d5969213a8430cbb984cdd7ea9fd6df06663194bd3c450c \ - --hash=sha256:ebea2821cdb5f9fef44933617be76185b80150632736f3d76e54829ab4a3b4d1 \ - --hash=sha256:ed0ef550042a8dbcd657dfb284a8ee00f0ba269d3f2286b0493b15a5694f9fe8 \ - --hash=sha256:eda5c1e2a715a4cbbca2d6d304988460942551e4e5e3b7457b50943cd741626d \ - --hash=sha256:f5c0ed12926dec1dfe7d645333ea59cf93f4d07750986a586f511c0bc61fe103 \ - --hash=sha256:f6016bd950be4dcd047b7475fdf55fb1e1f59fc7403f387be0e8123e4a576d30 \ - --hash=sha256:f9e0057a509e096e47c87f753136c9b10d7a91842d8042c2ee6866899a717c0d \ - --hash=sha256:fc1c892b1ec1f8cbd5da8de287577b455e388d9c328ad592eabbdcb6fc93bee5 \ - --hash=sha256:fc2c1e1b00f88317d9de6b2c2b39b012ebbfe35fe5e7bef980fd2a91f6100a07 \ - --hash=sha256:fd822f019ccccd75c832deb7aa040bb02d70a92eb15a2f16c7987b7ad4ee8d83 +rpds-py==0.27.0 \ + --hash=sha256:010c4843a3b92b54373e3d2291a7447d6c3fc29f591772cc2ea0e9f5c1da434b \ + --hash=sha256:05284439ebe7d9f5f5a668d4d8a0a1d851d16f7d47c78e1fab968c8ad30cab04 \ + --hash=sha256:0665be515767dc727ffa5f74bd2ef60b0ff85dad6bb8f50d91eaa6b5fb226f51 \ + --hash=sha256:069e0384a54f427bd65d7fda83b68a90606a3835901aaff42185fcd94f5a9295 \ + --hash=sha256:08680820d23df1df0a0260f714d12966bc6c42d02e8055a91d61e03f0c47dda0 \ + --hash=sha256:0954e3a92e1d62e83a54ea7b3fdc9efa5d61acef8488a8a3d31fdafbfb00460d \ + --hash=sha256:09965b314091829b378b60607022048953e25f0b396c2b70e7c4c81bcecf932e \ + --hash=sha256:0c431bfb91478d7cbe368d0a699978050d3b112d7f1d440a41e90faa325557fd \ + --hash=sha256:0f401c369186a5743694dd9fc08cba66cf70908757552e1f714bfc5219c655b5 \ + --hash=sha256:0f4f69d7a4300fbf91efb1fb4916421bd57804c01ab938ab50ac9c4aa2212f03 \ + --hash=sha256:11e8e28c0ba0373d052818b600474cfee2fafa6c9f36c8587d217b13ee28ca7d \ + --hash=sha256:130c1ffa5039a333f5926b09e346ab335f0d4ec393b030a18549a7c7e7c2cea4 \ + --hash=sha256:1321bce595ad70e80f97f998db37356b2e22cf98094eba6fe91782e626da2f71 \ + --hash=sha256:13bbc4846ae4c993f07c93feb21a24d8ec637573d567a924b1001e81c8ae80f9 \ + --hash=sha256:14f028eb47f59e9169bfdf9f7ceafd29dd64902141840633683d0bad5b04ff34 \ + --hash=sha256:15ea4d2e182345dd1b4286593601d766411b43f868924afe297570658c31a62b \ + --hash=sha256:181bc29e59e5e5e6e9d63b143ff4d5191224d355e246b5a48c88ce6b35c4e466 \ + --hash=sha256:183f5e221ba3e283cd36fdfbe311d95cd87699a083330b4f792543987167eff1 \ + --hash=sha256:184f0d7b342967f6cda94a07d0e1fae177d11d0b8f17d73e06e36ac02889f303 \ + --hash=sha256:190d7285cd3bb6d31d37a0534d7359c1ee191eb194c511c301f32a4afa5a1dd4 \ + --hash=sha256:19c990fdf5acecbf0623e906ae2e09ce1c58947197f9bced6bbd7482662231c4 \ + --hash=sha256:1d66f45b9399036e890fb9c04e9f70c33857fd8f58ac8db9f3278cfa835440c3 \ + --hash=sha256:203f581accef67300a942e49a37d74c12ceeef4514874c7cede21b012613ca2c \ + --hash=sha256:20e222a44ae9f507d0f2678ee3dd0c45ec1e930f6875d99b8459631c24058aec \ + --hash=sha256:2406d034635d1497c596c40c85f86ecf2bf9611c1df73d14078af8444fe48031 \ + --hash=sha256:249ab91ceaa6b41abc5f19513cb95b45c6f956f6b89f1fe3d99c81255a849f9e \ + --hash=sha256:25a4aebf8ca02bbb90a9b3e7a463bbf3bee02ab1c446840ca07b1695a68ce424 \ + --hash=sha256:27bac29bbbf39601b2aab474daf99dbc8e7176ca3389237a23944b17f8913d97 \ + --hash=sha256:299a245537e697f28a7511d01038c310ac74e8ea213c0019e1fc65f52c0dcb23 \ + --hash=sha256:2cff9bdd6c7b906cc562a505c04a57d92e82d37200027e8d362518df427f96cd \ + --hash=sha256:2e307cb5f66c59ede95c00e93cd84190a5b7f3533d7953690b2036780622ba81 \ + --hash=sha256:2e39169ac6aae06dd79c07c8a69d9da867cef6a6d7883a0186b46bb46ccfb0c3 \ + --hash=sha256:2fe6e18e5c8581f0361b35ae575043c7029d0a92cb3429e6e596c2cdde251432 \ + --hash=sha256:3001013dae10f806380ba739d40dee11db1ecb91684febb8406a87c2ded23dae \ + --hash=sha256:32196b5a99821476537b3f7732432d64d93a58d680a52c5e12a190ee0135d8b5 \ + --hash=sha256:33ba649a6e55ae3808e4c39e01580dc9a9b0d5b02e77b66bb86ef117922b1264 \ + --hash=sha256:341d8acb6724c0c17bdf714319c393bb27f6d23d39bc74f94221b3e59fc31828 \ + --hash=sha256:343cf24de9ed6c728abefc5d5c851d5de06497caa7ac37e5e65dd572921ed1b5 \ + --hash=sha256:36184b44bf60a480863e51021c26aca3dfe8dd2f5eeabb33622b132b9d8b8b54 \ + --hash=sha256:3841f66c1ffdc6cebce8aed64e36db71466f1dc23c0d9a5592e2a782a3042c79 \ + --hash=sha256:4045e2fc4b37ec4b48e8907a5819bdd3380708c139d7cc358f03a3653abedb89 \ + --hash=sha256:419dd9c98bcc9fb0242be89e0c6e922df333b975d4268faa90d58499fd9c9ebe \ + --hash=sha256:42894616da0fc0dcb2ec08a77896c3f56e9cb2f4b66acd76fc8992c3557ceb1c \ + --hash=sha256:42ccc57ff99166a55a59d8c7d14f1a357b7749f9ed3584df74053fd098243451 \ + --hash=sha256:4300e15e7d03660f04be84a125d1bdd0e6b2f674bc0723bc0fd0122f1a4585dc \ + --hash=sha256:443d239d02d9ae55b74015234f2cd8eb09e59fbba30bf60baeb3123ad4c6d5ff \ + --hash=sha256:44524b96481a4c9b8e6c46d6afe43fa1fb485c261e359fbe32b63ff60e3884d8 \ + --hash=sha256:45d04a73c54b6a5fd2bab91a4b5bc8b426949586e61340e212a8484919183859 \ + --hash=sha256:46f48482c1a4748ab2773f75fffbdd1951eb59794e32788834b945da857c47a8 \ + --hash=sha256:4790c9d5dd565ddb3e9f656092f57268951398cef52e364c405ed3112dc7c7c1 \ + --hash=sha256:4bc262ace5a1a7dc3e2eac2fa97b8257ae795389f688b5adf22c5db1e2431c43 \ + --hash=sha256:4c3f8a0d4802df34fcdbeb3dfe3a4d8c9a530baea8fafdf80816fcaac5379d83 \ + --hash=sha256:5355527adaa713ab693cbce7c1e0ec71682f599f61b128cf19d07e5c13c9b1f1 \ + --hash=sha256:555ed147cbe8c8f76e72a4c6cd3b7b761cbf9987891b9448808148204aed74a5 \ + --hash=sha256:55d42a0ef2bdf6bc81e1cc2d49d12460f63c6ae1423c4f4851b828e454ccf6f1 \ + --hash=sha256:59195dc244fc183209cf8a93406889cadde47dfd2f0a6b137783aa9c56d67c85 \ + --hash=sha256:59714ab0a5af25d723d8e9816638faf7f4254234decb7d212715c1aa71eee7be \ + --hash=sha256:5b3a5c8089eed498a3af23ce87a80805ff98f6ef8f7bdb70bd1b7dae5105f6ac \ + --hash=sha256:5d6790ff400254137b81b8053b34417e2c46921e302d655181d55ea46df58cf7 \ + --hash=sha256:5df559e9e7644d9042f626f2c3997b555f347d7a855a15f170b253f6c5bfe358 \ + --hash=sha256:5fa01b3d5e3b7d97efab65bd3d88f164e289ec323a8c033c5c38e53ee25c007e \ + --hash=sha256:61490d57e82e23b45c66f96184237994bfafa914433b8cd1a9bb57fecfced59d \ + --hash=sha256:6168af0be75bba990a39f9431cdfae5f0ad501f4af32ae62e8856307200517b8 \ + --hash=sha256:64a0fe3f334a40b989812de70160de6b0ec7e3c9e4a04c0bbc48d97c5d3600ae \ + --hash=sha256:64f689ab822f9b5eb6dfc69893b4b9366db1d2420f7db1f6a2adf2a9ca15ad64 \ + --hash=sha256:699c346abc73993962cac7bb4f02f58e438840fa5458a048d3a178a7a670ba86 \ + --hash=sha256:6b96b0b784fe5fd03beffff2b1533dc0d85e92bab8d1b2c24ef3a5dc8fac5669 \ + --hash=sha256:6bde37765564cd22a676dd8101b657839a1854cfaa9c382c5abf6ff7accfd4ae \ + --hash=sha256:6c135708e987f46053e0a1246a206f53717f9fadfba27174a9769ad4befba5c3 \ + --hash=sha256:6c27a7054b5224710fcfb1a626ec3ff4f28bcb89b899148c72873b18210e446b \ + --hash=sha256:6de6a7f622860af0146cb9ee148682ff4d0cea0b8fd3ad51ce4d40efb2f061d0 \ + --hash=sha256:737005088449ddd3b3df5a95476ee1c2c5c669f5c30eed909548a92939c0e12d \ + --hash=sha256:7451ede3560086abe1aa27dcdcf55cd15c96b56f543fb12e5826eee6f721f858 \ + --hash=sha256:7873b65686a6471c0037139aa000d23fe94628e0daaa27b6e40607c90e3f5ec4 \ + --hash=sha256:79af163a4b40bbd8cfd7ca86ec8b54b81121d3b213b4435ea27d6568bcba3e9d \ + --hash=sha256:7aed8118ae20515974650d08eb724150dc2e20c2814bcc307089569995e88a14 \ + --hash=sha256:7cf9bc4508efb18d8dff6934b602324eb9f8c6644749627ce001d6f38a490889 \ + --hash=sha256:7e57906e38583a2cba67046a09c2637e23297618dc1f3caddbc493f2be97c93f \ + --hash=sha256:7ec85994f96a58cf7ed288caa344b7fe31fd1d503bdf13d7331ead5f70ab60d5 \ + --hash=sha256:81f81bbd7cdb4bdc418c09a73809abeda8f263a6bf8f9c7f93ed98b5597af39d \ + --hash=sha256:86aca1616922b40d8ac1b3073a1ead4255a2f13405e5700c01f7c8d29a03972d \ + --hash=sha256:88051c3b7d5325409f433c5a40328fcb0685fc04e5db49ff936e910901d10114 \ + --hash=sha256:887ab1f12b0d227e9260558a4a2320024b20102207ada65c43e1ffc4546df72e \ + --hash=sha256:8a06aa1197ec0281eb1d7daf6073e199eb832fe591ffa329b88bae28f25f5fe5 \ + --hash=sha256:8a1dca5507fa1337f75dcd5070218b20bc68cf8844271c923c1b79dfcbc20391 \ + --hash=sha256:8b23cf252f180cda89220b378d917180f29d313cd6a07b2431c0d3b776aae86f \ + --hash=sha256:8d0e09cf4863c74106b5265c2c310f36146e2b445ff7b3018a56799f28f39f6f \ + --hash=sha256:8de567dec6d451649a781633d36f5c7501711adee329d76c095be2178855b042 \ + --hash=sha256:90fb790138c1a89a2e58c9282fe1089638401f2f3b8dddd758499041bc6e0774 \ + --hash=sha256:92f3b3ec3e6008a1fe00b7c0946a170f161ac00645cde35e3c9a68c2475e8156 \ + --hash=sha256:935afcdea4751b0ac918047a2df3f720212892347767aea28f5b3bf7be4f27c0 \ + --hash=sha256:9a0ff7ee28583ab30a52f371b40f54e7138c52ca67f8ca17ccb7ccf0b383cb5f \ + --hash=sha256:9ad08547995a57e74fea6abaf5940d399447935faebbd2612b3b0ca6f987946b \ + --hash=sha256:9b2a4e17bfd68536c3b801800941c95a1d4a06e3cada11c146093ba939d9638d \ + --hash=sha256:9b78430703cfcf5f5e86eb74027a1ed03a93509273d7c705babb547f03e60016 \ + --hash=sha256:9d0f92b78cfc3b74a42239fdd8c1266f4715b573204c234d2f9fc3fc7a24f185 \ + --hash=sha256:9da162b718b12c4219eeeeb68a5b7552fbc7aadedf2efee440f88b9c0e54b45d \ + --hash=sha256:a00c91104c173c9043bc46f7b30ee5e6d2f6b1149f11f545580f5d6fdff42c0b \ + --hash=sha256:a029be818059870664157194e46ce0e995082ac49926f1423c1f058534d2aaa9 \ + --hash=sha256:a1b3db5fae5cbce2131b7420a3f83553d4d89514c03d67804ced36161fe8b6b2 \ + --hash=sha256:a4cf32a26fa744101b67bfd28c55d992cd19438aff611a46cac7f066afca8fd4 \ + --hash=sha256:aa0bf113d15e8abdfee92aa4db86761b709a09954083afcb5bf0f952d6065fdb \ + --hash=sha256:ab47fe727c13c09d0e6f508e3a49e545008e23bf762a245b020391b621f5b726 \ + --hash=sha256:af22763a0a1eff106426a6e1f13c4582e0d0ad89c1493ab6c058236174cd6c6a \ + --hash=sha256:af9d4fd79ee1cc8e7caf693ee02737daabfc0fcf2773ca0a4735b356c8ad6f7c \ + --hash=sha256:b1fef1f13c842a39a03409e30ca0bf87b39a1e2a305a9924deadb75a43105d23 \ + --hash=sha256:b2eff8ee57c5996b0d2a07c3601fb4ce5fbc37547344a26945dd9e5cbd1ed27a \ + --hash=sha256:b4c4fbbcff474e1e5f38be1bf04511c03d492d42eec0babda5d03af3b5589374 \ + --hash=sha256:b8a4131698b6992b2a56015f51646711ec5d893a0b314a4b985477868e240c87 \ + --hash=sha256:b8a7acf04fda1f30f1007f3cc96d29d8cf0a53e626e4e1655fdf4eabc082d367 \ + --hash=sha256:ba783541be46f27c8faea5a6645e193943c17ea2f0ffe593639d906a327a9bcc \ + --hash=sha256:be0744661afbc4099fef7f4e604e7f1ea1be1dd7284f357924af12a705cc7d5c \ + --hash=sha256:be3964f7312ea05ed283b20f87cb533fdc555b2e428cc7be64612c0b2124f08c \ + --hash=sha256:be806e2961cd390a89d6c3ce8c2ae34271cfcd05660f716257838bb560f1c3b6 \ + --hash=sha256:bec77545d188f8bdd29d42bccb9191682a46fb2e655e3d1fb446d47c55ac3b8d \ + --hash=sha256:c10d92fb6d7fd827e44055fcd932ad93dac6a11e832d51534d77b97d1d85400f \ + --hash=sha256:c3782fb753aa825b4ccabc04292e07897e2fd941448eabf666856c5530277626 \ + --hash=sha256:c9ce7a9e967afc0a2af7caa0d15a3e9c1054815f73d6a8cb9225b61921b419bd \ + --hash=sha256:cb0702c12983be3b2fab98ead349ac63a98216d28dda6f518f52da5498a27a1b \ + --hash=sha256:cbc619e84a5e3ab2d452de831c88bdcad824414e9c2d28cd101f94dbdf26329c \ + --hash=sha256:ce4ed8e0c7dbc5b19352b9c2c6131dd23b95fa8698b5cdd076307a33626b72dc \ + --hash=sha256:ce96ab0bdfcef1b8c371ada2100767ace6804ea35aacce0aef3aeb4f3f499ca8 \ + --hash=sha256:cf824aceaeffff029ccfba0da637d432ca71ab21f13e7f6f5179cd88ebc77a8a \ + --hash=sha256:d2a81bdcfde4245468f7030a75a37d50400ac2455c3a4819d9d550c937f90ab5 \ + --hash=sha256:d2cc2b34f9e1d31ce255174da82902ad75bd7c0d88a33df54a77a22f2ef421ee \ + --hash=sha256:d2f184336bc1d6abfaaa1262ed42739c3789b1e3a65a29916a615307d22ffd2e \ + --hash=sha256:d3c622c39f04d5751408f5b801ecb527e6e0a471b367f420a877f7a660d583f6 \ + --hash=sha256:d7cf5e726b6fa977e428a61880fb108a62f28b6d0c7ef675b117eaff7076df49 \ + --hash=sha256:d85d784c619370d9329bbd670f41ff5f2ae62ea4519761b679d0f57f0f0ee267 \ + --hash=sha256:d93ebdb82363d2e7bec64eecdc3632b59e84bd270d74fe5be1659f7787052f9b \ + --hash=sha256:db8a6313dbac934193fc17fe7610f70cd8181c542a91382531bef5ed785e5615 \ + --hash=sha256:dbc2ab5d10544eb485baa76c63c501303b716a5c405ff2469a1d8ceffaabf622 \ + --hash=sha256:dbd749cff1defbde270ca346b69b3baf5f1297213ef322254bf2a28537f0b046 \ + --hash=sha256:dc662bc9375a6a394b62dfd331874c434819f10ee3902123200dbcf116963f89 \ + --hash=sha256:dc6b0d5a1ea0318ef2def2b6a55dccf1dcaf77d605672347271ed7b829860765 \ + --hash=sha256:dc79d192fb76fc0c84f2c58672c17bbbc383fd26c3cdc29daae16ce3d927e8b2 \ + --hash=sha256:dd2c1d27ebfe6a015cfa2005b7fe8c52d5019f7bbdd801bc6f7499aab9ae739e \ + --hash=sha256:dea0808153f1fbbad772669d906cddd92100277533a03845de6893cadeffc8be \ + --hash=sha256:e0d7151a1bd5d0a203a5008fc4ae51a159a610cb82ab0a9b2c4d80241745582e \ + --hash=sha256:e14aab02258cb776a108107bd15f5b5e4a1bbaa61ef33b36693dfab6f89d54f9 \ + --hash=sha256:e24d8031a2c62f34853756d9208eeafa6b940a1efcbfe36e8f57d99d52bb7261 \ + --hash=sha256:e36c80c49853b3ffda7aa1831bf175c13356b210c73128c861f3aa93c3cc4015 \ + --hash=sha256:e377e4cf8795cdbdff75b8f0223d7b6c68ff4fef36799d88ccf3a995a91c0112 \ + --hash=sha256:e3acb9c16530362aeaef4e84d57db357002dc5cbfac9a23414c3e73c08301ab2 \ + --hash=sha256:e3dc8d4ede2dbae6c0fc2b6c958bf51ce9fd7e9b40c0f5b8835c3fde44f5807d \ + --hash=sha256:e6491658dd2569f05860bad645569145c8626ac231877b0fb2d5f9bcb7054089 \ + --hash=sha256:eb91d252b35004a84670dfeafadb042528b19842a0080d8b53e5ec1128e8f433 \ + --hash=sha256:f0396e894bd1e66c74ecbc08b4f6a03dc331140942c4b1d345dd131b68574a60 \ + --hash=sha256:f09c9d4c26fa79c1bad927efb05aca2391350b8e61c38cbc0d7d3c814e463124 \ + --hash=sha256:f3cd110e02c5bf17d8fb562f6c9df5c20e73029d587cf8602a2da6c5ef1e32cb \ + --hash=sha256:f7a37dd208f0d658e0487522078b1ed68cd6bce20ef4b5a915d2809b9094b410 \ + --hash=sha256:fae4a01ef8c4cb2bbe92ef2063149596907dc4a881a8d26743b3f6b304713171 \ + --hash=sha256:fc327f4497b7087d06204235199daf208fd01c82d80465dc5efa4ec9df1c5b4e \ + --hash=sha256:fcc01c57ce6e70b728af02b2401c5bc853a9e14eb07deda30624374f0aebfe42 \ + --hash=sha256:fde355b02934cc6b07200cc3b27ab0c15870a757d1a72fd401aa92e2ea3c6bfe # via # jsonschema # referencing -ruamel-yaml==0.18.10 \ - --hash=sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58 \ - --hash=sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1 +ruamel-yaml==0.18.15 \ + --hash=sha256:148f6488d698b7a5eded5ea793a025308b25eca97208181b6a026037f391f701 \ + --hash=sha256:dbfca74b018c4c3fba0b9cc9ee33e53c371194a9000e694995e620490fd40700 # via -r src/requirements.in ruamel-yaml-clib==0.2.12 \ --hash=sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b \ @@ -1148,9 +1283,9 @@ sniffio==1.3.1 \ --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc # via anyio -snowballstemmer==2.2.0 \ - --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ - --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a +snowballstemmer==3.0.1 \ + --hash=sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064 \ + --hash=sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895 # via sphinx soupsieve==2.7 \ --hash=sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4 \ @@ -1222,101 +1357,138 @@ sphinxcontrib-serializinghtml==2.0.0 \ --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d # via sphinx -starlette==0.46.2 \ - --hash=sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35 \ - --hash=sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5 +starlette==0.47.2 \ + --hash=sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8 \ + --hash=sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b # via sphinx-autobuild -typing-extensions==4.13.2 \ - --hash=sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c \ - --hash=sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef +typing-extensions==4.14.1 \ + --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \ + --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76 # via # anyio # beautifulsoup4 + # cattrs # pydata-sphinx-theme # pygithub # referencing -urllib3==2.4.0 \ - --hash=sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466 \ - --hash=sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813 + # starlette +urllib3==2.5.0 \ + --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ + --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc # via # pygithub # requests -uvicorn==0.34.2 \ - --hash=sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328 \ - --hash=sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403 +uvicorn==0.35.0 \ + --hash=sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a \ + --hash=sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01 # via sphinx-autobuild -watchfiles==1.0.5 \ - --hash=sha256:0125f91f70e0732a9f8ee01e49515c35d38ba48db507a50c5bdcad9503af5827 \ - --hash=sha256:0a04059f4923ce4e856b4b4e5e783a70f49d9663d22a4c3b3298165996d1377f \ - --hash=sha256:0b289572c33a0deae62daa57e44a25b99b783e5f7aed81b314232b3d3c81a11d \ - --hash=sha256:10f6ae86d5cb647bf58f9f655fcf577f713915a5d69057a0371bc257e2553234 \ - --hash=sha256:13bb21f8ba3248386337c9fa51c528868e6c34a707f729ab041c846d52a0c69a \ - --hash=sha256:15ac96dd567ad6c71c71f7b2c658cb22b7734901546cd50a475128ab557593ca \ - --hash=sha256:18b3bd29954bc4abeeb4e9d9cf0b30227f0f206c86657674f544cb032296acd5 \ - --hash=sha256:1909e0a9cd95251b15bff4261de5dd7550885bd172e3536824bf1cf6b121e200 \ - --hash=sha256:1a2902ede862969077b97523987c38db28abbe09fb19866e711485d9fbf0d417 \ - --hash=sha256:1a7bac2bde1d661fb31f4d4e8e539e178774b76db3c2c17c4bb3e960a5de07a2 \ - --hash=sha256:237f9be419e977a0f8f6b2e7b0475ababe78ff1ab06822df95d914a945eac827 \ - --hash=sha256:266710eb6fddc1f5e51843c70e3bebfb0f5e77cf4f27129278c70554104d19ed \ - --hash=sha256:29c7fd632ccaf5517c16a5188e36f6612d6472ccf55382db6c7fe3fcccb7f59f \ - --hash=sha256:2b7a21715fb12274a71d335cff6c71fe7f676b293d322722fe708a9ec81d91f5 \ - --hash=sha256:2cfb371be97d4db374cba381b9f911dd35bb5f4c58faa7b8b7106c8853e5d225 \ - --hash=sha256:2cfcb3952350e95603f232a7a15f6c5f86c5375e46f0bd4ae70d43e3e063c13d \ - --hash=sha256:2f1fefb2e90e89959447bc0420fddd1e76f625784340d64a2f7d5983ef9ad246 \ - --hash=sha256:360a398c3a19672cf93527f7e8d8b60d8275119c5d900f2e184d32483117a705 \ - --hash=sha256:3e380c89983ce6e6fe2dd1e1921b9952fb4e6da882931abd1824c092ed495dec \ - --hash=sha256:4a8ec1e4e16e2d5bafc9ba82f7aaecfeec990ca7cd27e84fb6f191804ed2fcfc \ - --hash=sha256:4ab626da2fc1ac277bbf752446470b367f84b50295264d2d313e28dc4405d663 \ - --hash=sha256:4b6227351e11c57ae997d222e13f5b6f1f0700d84b8c52304e8675d33a808382 \ - --hash=sha256:554389562c29c2c182e3908b149095051f81d28c2fec79ad6c8997d7d63e0009 \ - --hash=sha256:5c40fe7dd9e5f81e0847b1ea64e1f5dd79dd61afbedb57759df06767ac719b40 \ - --hash=sha256:68b2dddba7a4e6151384e252a5632efcaa9bc5d1c4b567f3cb621306b2ca9f63 \ - --hash=sha256:7ee32c9a9bee4d0b7bd7cbeb53cb185cf0b622ac761efaa2eba84006c3b3a614 \ - --hash=sha256:830aa432ba5c491d52a15b51526c29e4a4b92bf4f92253787f9726fe01519487 \ - --hash=sha256:832ccc221927c860e7286c55c9b6ebcc0265d5e072f49c7f6456c7798d2b39aa \ - --hash=sha256:839ebd0df4a18c5b3c1b890145b5a3f5f64063c2a0d02b13c76d78fe5de34936 \ - --hash=sha256:852de68acd6212cd6d33edf21e6f9e56e5d98c6add46f48244bd479d97c967c6 \ - --hash=sha256:85fbb6102b3296926d0c62cfc9347f6237fb9400aecd0ba6bbda94cae15f2b3b \ - --hash=sha256:86c0df05b47a79d80351cd179893f2f9c1b1cae49d96e8b3290c7f4bd0ca0a92 \ - --hash=sha256:894342d61d355446d02cd3988a7326af344143eb33a2fd5d38482a92072d9563 \ - --hash=sha256:8c0db396e6003d99bb2d7232c957b5f0b5634bbd1b24e381a5afcc880f7373fb \ - --hash=sha256:8e637810586e6fe380c8bc1b3910accd7f1d3a9a7262c8a78d4c8fb3ba6a2b3d \ - --hash=sha256:9475b0093767e1475095f2aeb1d219fb9664081d403d1dff81342df8cd707034 \ - --hash=sha256:95cf944fcfc394c5f9de794ce581914900f82ff1f855326f25ebcf24d5397418 \ - --hash=sha256:974866e0db748ebf1eccab17862bc0f0303807ed9cda465d1324625b81293a18 \ - --hash=sha256:9848b21ae152fe79c10dd0197304ada8f7b586d3ebc3f27f43c506e5a52a863c \ - --hash=sha256:9f4571a783914feda92018ef3901dab8caf5b029325b5fe4558c074582815249 \ - --hash=sha256:a056c2f692d65bf1e99c41045e3bdcaea3cb9e6b5a53dcaf60a5f3bd95fc9763 \ - --hash=sha256:a0dbcb1c2d8f2ab6e0a81c6699b236932bd264d4cef1ac475858d16c403de74d \ - --hash=sha256:a16512051a822a416b0d477d5f8c0e67b67c1a20d9acecb0aafa3aa4d6e7d256 \ - --hash=sha256:a2014a2b18ad3ca53b1f6c23f8cd94a18ce930c1837bd891262c182640eb40a6 \ - --hash=sha256:a3904d88955fda461ea2531fcf6ef73584ca921415d5cfa44457a225f4a42bc1 \ - --hash=sha256:a74add8d7727e6404d5dc4dcd7fac65d4d82f95928bbee0cf5414c900e86773e \ - --hash=sha256:ab44e1580924d1ffd7b3938e02716d5ad190441965138b4aa1d1f31ea0877f04 \ - --hash=sha256:b551d4fb482fc57d852b4541f911ba28957d051c8776e79c3b4a51eb5e2a1b11 \ - --hash=sha256:b5eb568c2aa6018e26da9e6c86f3ec3fd958cee7f0311b35c2630fa4217d17f2 \ - --hash=sha256:b659576b950865fdad31fa491d31d37cf78b27113a7671d39f919828587b429b \ - --hash=sha256:b6e76ceb1dd18c8e29c73f47d41866972e891fc4cc7ba014f487def72c1cf096 \ - --hash=sha256:b7529b5dcc114679d43827d8c35a07c493ad6f083633d573d81c660abc5979e9 \ - --hash=sha256:b9dca99744991fc9850d18015c4f0438865414e50069670f5f7eee08340d8b40 \ - --hash=sha256:ba5552a1b07c8edbf197055bc9d518b8f0d98a1c6a73a293bc0726dce068ed01 \ - --hash=sha256:bfe0cbc787770e52a96c6fda6726ace75be7f840cb327e1b08d7d54eadc3bc85 \ - --hash=sha256:c0901429650652d3f0da90bad42bdafc1f9143ff3605633c455c999a2d786cac \ - --hash=sha256:cb1489f25b051a89fae574505cc26360c8e95e227a9500182a7fe0afcc500ce0 \ - --hash=sha256:cd47d063fbeabd4c6cae1d4bcaa38f0902f8dc5ed168072874ea11d0c7afc1ff \ - --hash=sha256:d363152c5e16b29d66cbde8fa614f9e313e6f94a8204eaab268db52231fe5358 \ - --hash=sha256:d5730f3aa35e646103b53389d5bc77edfbf578ab6dab2e005142b5b80a35ef25 \ - --hash=sha256:d6f9367b132078b2ceb8d066ff6c93a970a18c3029cea37bfd7b2d3dd2e5db8f \ - --hash=sha256:dfd6ae1c385ab481766b3c61c44aca2b3cd775f6f7c0fa93d979ddec853d29d5 \ - --hash=sha256:e0da39ff917af8b27a4bdc5a97ac577552a38aac0d260a859c1517ea3dc1a7c4 \ - --hash=sha256:ecf6cd9f83d7c023b1aba15d13f705ca7b7d38675c121f3cc4a6e25bd0857ee9 \ - --hash=sha256:ee0822ce1b8a14fe5a066f93edd20aada932acfe348bede8aa2149f1a4489512 \ - --hash=sha256:f2e55a9b162e06e3f862fb61e399fe9f05d908d019d87bf5b496a04ef18a970a \ - --hash=sha256:f436601594f15bf406518af922a89dcaab416568edb6f65c4e5bbbad1ea45c11 \ - --hash=sha256:f59b870db1f1ae5a9ac28245707d955c8721dd6565e7f411024fa374b5362d1d \ - --hash=sha256:fc533aa50664ebd6c628b2f30591956519462f5d27f951ed03d6c82b2dfd9965 \ - --hash=sha256:fe43139b2c0fdc4a14d4f8d5b5d967f7a2777fd3d38ecf5b1ec669b0d7e43c21 \ - --hash=sha256:fed1cd825158dcaae36acce7b2db33dcbfd12b30c34317a88b8ed80f0541cc57 +watchfiles==1.1.0 \ + --hash=sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a \ + --hash=sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f \ + --hash=sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6 \ + --hash=sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3 \ + --hash=sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7 \ + --hash=sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a \ + --hash=sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259 \ + --hash=sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297 \ + --hash=sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1 \ + --hash=sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c \ + --hash=sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a \ + --hash=sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b \ + --hash=sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb \ + --hash=sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc \ + --hash=sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b \ + --hash=sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339 \ + --hash=sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9 \ + --hash=sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df \ + --hash=sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb \ + --hash=sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4 \ + --hash=sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5 \ + --hash=sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc \ + --hash=sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c \ + --hash=sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8 \ + --hash=sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433 \ + --hash=sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12 \ + --hash=sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30 \ + --hash=sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0 \ + --hash=sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86 \ + --hash=sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c \ + --hash=sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5 \ + --hash=sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866 \ + --hash=sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb \ + --hash=sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2 \ + --hash=sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e \ + --hash=sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575 \ + --hash=sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f \ + --hash=sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a \ + --hash=sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f \ + --hash=sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d \ + --hash=sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277 \ + --hash=sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9 \ + --hash=sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf \ + --hash=sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92 \ + --hash=sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72 \ + --hash=sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b \ + --hash=sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68 \ + --hash=sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa \ + --hash=sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc \ + --hash=sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b \ + --hash=sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd \ + --hash=sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4 \ + --hash=sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7 \ + --hash=sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792 \ + --hash=sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9 \ + --hash=sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0 \ + --hash=sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297 \ + --hash=sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef \ + --hash=sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179 \ + --hash=sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d \ + --hash=sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea \ + --hash=sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5 \ + --hash=sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee \ + --hash=sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82 \ + --hash=sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011 \ + --hash=sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e \ + --hash=sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4 \ + --hash=sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf \ + --hash=sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db \ + --hash=sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20 \ + --hash=sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4 \ + --hash=sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575 \ + --hash=sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa \ + --hash=sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c \ + --hash=sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f \ + --hash=sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f \ + --hash=sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267 \ + --hash=sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018 \ + --hash=sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2 \ + --hash=sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d \ + --hash=sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd \ + --hash=sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47 \ + --hash=sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb \ + --hash=sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29 \ + --hash=sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147 \ + --hash=sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8 \ + --hash=sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670 \ + --hash=sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587 \ + --hash=sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97 \ + --hash=sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c \ + --hash=sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5 \ + --hash=sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e \ + --hash=sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e \ + --hash=sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6 \ + --hash=sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc \ + --hash=sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e \ + --hash=sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8 \ + --hash=sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895 \ + --hash=sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7 \ + --hash=sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432 \ + --hash=sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc \ + --hash=sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633 \ + --hash=sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f \ + --hash=sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77 \ + --hash=sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12 \ + --hash=sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f # via sphinx-autobuild websockets==15.0.1 \ --hash=sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2 \ @@ -1389,84 +1561,3 @@ websockets==15.0.1 \ --hash=sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f \ --hash=sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7 # via sphinx-autobuild -wrapt==1.17.2 \ - --hash=sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f \ - --hash=sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c \ - --hash=sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a \ - --hash=sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b \ - --hash=sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555 \ - --hash=sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c \ - --hash=sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b \ - --hash=sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6 \ - --hash=sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8 \ - --hash=sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662 \ - --hash=sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061 \ - --hash=sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998 \ - --hash=sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb \ - --hash=sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62 \ - --hash=sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984 \ - --hash=sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392 \ - --hash=sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2 \ - --hash=sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306 \ - --hash=sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7 \ - --hash=sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3 \ - --hash=sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9 \ - --hash=sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6 \ - --hash=sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192 \ - --hash=sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317 \ - --hash=sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f \ - --hash=sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda \ - --hash=sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563 \ - --hash=sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a \ - --hash=sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f \ - --hash=sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d \ - --hash=sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9 \ - --hash=sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8 \ - --hash=sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82 \ - --hash=sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9 \ - --hash=sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845 \ - --hash=sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82 \ - --hash=sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125 \ - --hash=sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504 \ - --hash=sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b \ - --hash=sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7 \ - --hash=sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc \ - --hash=sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6 \ - --hash=sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40 \ - --hash=sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a \ - --hash=sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3 \ - --hash=sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a \ - --hash=sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72 \ - --hash=sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681 \ - --hash=sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438 \ - --hash=sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae \ - --hash=sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2 \ - --hash=sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb \ - --hash=sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5 \ - --hash=sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a \ - --hash=sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3 \ - --hash=sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8 \ - --hash=sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2 \ - --hash=sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22 \ - --hash=sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72 \ - --hash=sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061 \ - --hash=sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f \ - --hash=sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9 \ - --hash=sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04 \ - --hash=sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98 \ - --hash=sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9 \ - --hash=sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f \ - --hash=sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b \ - --hash=sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925 \ - --hash=sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6 \ - --hash=sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0 \ - --hash=sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9 \ - --hash=sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c \ - --hash=sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991 \ - --hash=sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6 \ - --hash=sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000 \ - --hash=sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb \ - --hash=sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119 \ - --hash=sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b \ - --hash=sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58 - # via deprecated From 9f410b3b0eb0043e13b197cb1473f2ce3a369ca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 22 Aug 2025 08:10:52 +0200 Subject: [PATCH 114/231] Upgrade Versions (#223) --- MODULE.bazel | 9 ++------- docs/requirements/test_overview.rst | 6 +++--- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 16bdae07..ac79e51c 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.0.2", + version = "1.1.0", compatibility_level = 1, ) @@ -101,12 +101,7 @@ bazel_dep(name = "score_process", version = "1.1.1") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "0.0.0") -git_override( - module_name = "score_tooling", - commit = "07bb8bce1dc5ed806b934bbb7bb49f6b796e0387", - remote = "https://github.com/eclipse-score/tooling", -) +bazel_dep(name = "score_tooling", version = "1.0.0") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") diff --git a/docs/requirements/test_overview.rst b/docs/requirements/test_overview.rst index 9b0dc20a..b3658bc1 100644 --- a/docs/requirements/test_overview.rst +++ b/docs/requirements/test_overview.rst @@ -7,19 +7,19 @@ Testing Statistics .. needtable:: SUCCESSFUL TEST :filter: result == "passed" :tags: TEST - :columns: external_url as "source_link"; name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique + :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" .. needtable:: FAILED TEST :filter: result == "failed" :tags: TEST - :columns: external_url as "source_link"; name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique + :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" .. needtable:: OTHER TEST :filter: result != "failed" and result != "passed" :tags: TEST - :columns: external_url as "source_link"; name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique + :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" .. needpie:: Test Results From 753dbe79b40f347af05ea1b13602109d89389ac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 22 Aug 2025 12:27:29 +0200 Subject: [PATCH 115/231] Added some test decorators & changed test rst (#224) --- docs/requirements/requirements.rst | 17 +++++ docs/requirements/test_overview.rst | 17 +++-- .../tests/test_check_options.py | 68 +++++++++++++------ .../tests/test_metamodel__init__.py | 22 ++++++ .../score_source_code_linker/__init__.py | 4 ++ .../need_source_links.py | 3 + .../score_source_code_linker/needlinks.py | 2 + .../score_source_code_linker/testlink.py | 2 + .../tests/test_codelink.py | 66 +++++++++++++++++- .../tests/test_testlink.py | 36 ++++++++++ .../tests/test_xml_parser.py | 34 +++++++++- .../score_source_code_linker/xml_parser.py | 2 + 12 files changed, 245 insertions(+), 28 deletions(-) diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 26bb3e85..3c142971 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -975,9 +975,26 @@ Overview of Tool to Process Requirements .. ------------------------------------------------------------------------ .. +Grouped Requirements +#################### + +.. tool_req:: Metamodel + :id: tool_req__docs_metamodel + :tags: metamodel + :implemented: YES + + Docs-as-Code shall provide a metamodel for definining config in a `metamodel.yaml` in the source code repository. + + .. note:: "satisfied by" is something like "used by" or "required by". + + .. needextend:: c.this_doc() and type == 'tool_req' :safety: ASIL_B :security: NO .. needextend:: c.this_doc() and type == 'tool_req' and not status :status: valid + +.. needextend:: "metamodel.yaml" in source_code_link + :+satisfies: tool_req__docs_metamodel + :+tags: config diff --git a/docs/requirements/test_overview.rst b/docs/requirements/test_overview.rst index b3658bc1..c03d24c4 100644 --- a/docs/requirements/test_overview.rst +++ b/docs/requirements/test_overview.rst @@ -4,24 +4,33 @@ Testing Statistics ================== -.. needtable:: SUCCESSFUL TEST +.. needtable:: SUCCESSFUL TESTS :filter: result == "passed" :tags: TEST :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" - -.. needtable:: FAILED TEST +.. needtable:: FAILED TESTS :filter: result == "failed" :tags: TEST :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" -.. needtable:: OTHER TEST +.. needtable:: SKIPPED/DISABLED TESTS :filter: result != "failed" and result != "passed" :tags: TEST :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" +.. needpie:: Requirements That Have A Linked Test + :labels: requirement not implemeted, not tested, tested + :colors: red,yellow, green + :legend: + + type == 'tool_req' and implemented == 'NO' + type == 'tool_req' and testlink == '' and (implemented == 'YES' or implemented == 'PARTIAL') + type == 'tool_req' and testlink != '' and (implemented == 'YES' or implemented == 'PARTIAL') + + .. needpie:: Test Results :labels: passed, failed, skipped :colors: green, red, orange diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 85ba06dd..a24759c0 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -22,17 +22,9 @@ from score_metamodel.tests import fake_check_logger, need from sphinx.application import Sphinx +from attribute_plugin import add_test_properties + -@pytest.mark.metadata( - Verifies=["tool_req__toolchain_sphinx_needs_build__options"], - Description=( - "It should check if directives have required options and required values." - ), - ASIL="ASIL_B", - Priority="1", - TestType="Requirements-based test", - DerivationTechnique="Analysis of requirements", -) class NeedTypeDict(TypedDict, total=False): directive: str mandatory_options: dict[str, str | int] | None @@ -79,8 +71,13 @@ class TestCheckOptions: } ] + @add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", + ) def test_unknown_directive(self): - # Given a need with an unknown type, should raise an error + """Given a need with an unknown type, should raise an error""" need_1 = need( target_id="tool_req__001", id="tool_req__001", @@ -101,8 +98,13 @@ def test_unknown_directive(self): expect_location=False, ) + @add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", + ) def test_unknown_directive_extra_option(self): - # Given a need an unknown/undefined type, should raise an error + """Given a need an unknown/undefined type, should raise an error""" need_1 = need( target_id="tool_req__001", type="unknown_type", @@ -123,10 +125,16 @@ def test_unknown_directive_extra_option(self): expect_location=False, ) + @add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", + ) def test_missing_mandatory_options_info(self): - # Given any need of known type - # with missing mandatory options info - # it should raise an error + """ + Given any need of known type with missing mandatory options info + it should raise an error + """ need_1 = need( target_id="wf_req__001", id="wf_req__001", @@ -148,10 +156,16 @@ def test_missing_mandatory_options_info(self): expect_location=False, ) + @add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", + ) def test_invalid_option_type(self): - # Given any need of known type - # with missing mandatory options info - # it should raise an error + """ + Given any need of known type with missing mandatory options info + it should raise an error + """ need_1 = need( target_id="wf_req__001", id="wf_req__001", @@ -173,9 +187,16 @@ def test_invalid_option_type(self): expect_location=False, ) + @add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", + ) def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): - # Given a need with an option that is not listed - # in the required and optional options + """ + Given a need with an option that is not listed + in the required and optional options + """ need_1 = need( target_id="tool_req__001", id="tool_req__0011", @@ -200,8 +221,13 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): expect_location=False, ) + @add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", + ) def test_invalid_option_value_type_raises_value_error(self): - # Given a need with an option of wrong type (list with non-str) + """Given a need with an option of wrong type (list with non-str)""" need_1 = need( target_id="tool_req__002", id="tool_req__002", diff --git a/src/extensions/score_metamodel/tests/test_metamodel__init__.py b/src/extensions/score_metamodel/tests/test_metamodel__init__.py index 1cd7041a..ea88aa7f 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel__init__.py +++ b/src/extensions/score_metamodel/tests/test_metamodel__init__.py @@ -18,6 +18,8 @@ parse_checks_filter, ) +from attribute_plugin import add_test_properties + def dummy_local_check(app, need, log): pass @@ -36,23 +38,43 @@ def setup_checks(): graph_checks.append(dummy_graph_check) +@add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_returns_empty_list_when_filter_is_empty(): """Return an empty list if no filter string is provided.""" assert parse_checks_filter("") == [] +@add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_returns_valid_checks(): """Return the provided valid check names.""" result = parse_checks_filter("dummy_local_check,dummy_graph_check") assert result == ["dummy_local_check", "dummy_graph_check"] +@add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_strips_whitespace(): """Remove surrounding spaces from each check name.""" result = parse_checks_filter(" dummy_local_check , dummy_graph_check ") assert result == ["dummy_local_check", "dummy_graph_check"] +@add_test_properties( + partially_verifies=["tool_req__docs_metamodel"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_raises_assertion_for_invalid_check(): """Raise AssertionError if a check name is unknown.""" with pytest.raises(AssertionError) as exc_info: diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 0f0274a1..d499a022 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -16,6 +16,10 @@ source code links from a JSON file and add them to the needs. """ +# req-Id: tool_req__docs_test_link_testcase +# req-Id: tool_req__docs_dd_link_source_code_link +# This whole directory implements the above mentioned tool requirements + from collections import defaultdict from copy import deepcopy from pathlib import Path diff --git a/src/extensions/score_source_code_linker/need_source_links.py b/src/extensions/score_source_code_linker/need_source_links.py index f0310c0c..d5e10a10 100644 --- a/src/extensions/score_source_code_linker/need_source_links.py +++ b/src/extensions/score_source_code_linker/need_source_links.py @@ -16,6 +16,9 @@ It also defines a decoder and encoder for SourceCodeLinks to enable JSON read/write """ +# req-Id: tool_req__docs_test_link_testcase +# req-Id: tool_req__docs_dd_link_source_code_link + import json from dataclasses import asdict, dataclass, field from pathlib import Path diff --git a/src/extensions/score_source_code_linker/needlinks.py b/src/extensions/score_source_code_linker/needlinks.py index 2d3ca246..c890b13e 100644 --- a/src/extensions/score_source_code_linker/needlinks.py +++ b/src/extensions/score_source_code_linker/needlinks.py @@ -10,6 +10,8 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +# req-Id: tool_req__docs_dd_link_source_code_link + import json from dataclasses import asdict, dataclass from pathlib import Path diff --git a/src/extensions/score_source_code_linker/testlink.py b/src/extensions/score_source_code_linker/testlink.py index 30416e5d..ccf31b10 100644 --- a/src/extensions/score_source_code_linker/testlink.py +++ b/src/extensions/score_source_code_linker/testlink.py @@ -18,6 +18,8 @@ TestLink => The datatype that is ultimately saved inside of the JSON """ +# req-Id: tool_req__docs_test_link_testcase + import html import json import re diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index dc196a17..703a986e 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -191,6 +191,11 @@ def sample_needs(): # Test utility functions +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_get_cache_filename(): """Test cache filename generation.""" build_dir = Path("/tmp/build") @@ -205,6 +210,11 @@ def make_needs(needs_dict): ) +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_find_need_direct_match(): """Test finding a need with direct ID match.""" all_needs = make_needs( @@ -218,6 +228,11 @@ def test_find_need_direct_match(): assert result["id"] == "REQ_001" +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_find_need_with_prefix(): """Test finding a need with prefix matching.""" @@ -232,6 +247,11 @@ def test_find_need_with_prefix(): assert result["id"] == "PREFIX_REQ_001" +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_find_need_multiple_prefixes(): """Test finding a need with multiple prefixes.""" all_needs = make_needs( @@ -249,6 +269,11 @@ def test_find_need_multiple_prefixes(): assert result["id"] == "SECOND_REQ_001" +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_find_need_not_found(): """Test finding a need that doesn't exist.""" all_needs = make_needs( @@ -261,6 +286,11 @@ def test_find_need_not_found(): assert result is None +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_group_by_need(sample_needlinks): """Test grouping source code links by need ID.""" result = group_by_need(sample_needlinks) @@ -280,12 +310,22 @@ def test_group_by_need(sample_needlinks): assert len(found_link.links.CodeLinks) == 1 +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_group_by_need_empty_list(): """Test grouping empty list of needlinks.""" result = group_by_need([], []) assert len(result) == 0 +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_get_github_link_with_real_repo(git_repo): """Test generating GitHub link with real repository.""" # Create a needlink @@ -313,6 +353,11 @@ def test_get_github_link_with_real_repo(git_repo): # Test cache file operations +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_cache_file_operations(temp_dir, sample_needlinks): """Test storing and loading cache files.""" cache_file = temp_dir / "test_cache.json" @@ -337,6 +382,11 @@ def test_cache_file_operations(temp_dir, sample_needlinks): assert loaded_links[3].line == 2 +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_cache_file_with_encoded_comments(temp_dir): """Test that cache file properly handles encoded comments.""" # Create needlinks with spaces in tags and full_line @@ -367,6 +417,13 @@ def test_cache_file_with_encoded_comments(temp_dir): # Integration tests + + +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_group_by_need_and_find_need_integration(sample_needlinks): """Test grouping links and finding needs together.""" # Group the test links @@ -397,8 +454,8 @@ def test_group_by_need_and_find_need_integration(sample_needlinks): @add_test_properties( partially_verifies=["tool_req__docs_dd_link_source_code_link"], - test_type="interface-test", - derivation_technique="design-analysis", + test_type="requirements-based", + derivation_technique="requirements-analysis", ) def test_source_linker_end_to_end_with_real_files(temp_dir, git_repo): """Test end-to-end workflow with real files and git repo.""" @@ -490,6 +547,11 @@ def another_function(): assert f"src/{needlink.file.name}#L{needlink.line}" in github_link +@add_test_properties( + partially_verifies=["tool_req__docs_dd_link_source_code_link"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_multiple_commits_hash_consistency(git_repo): """Test that git hash remains consistent and links update properly.""" # Get initial hash diff --git a/src/extensions/score_source_code_linker/tests/test_testlink.py b/src/extensions/score_source_code_linker/tests/test_testlink.py index 09e08d25..2f35f8a3 100644 --- a/src/extensions/score_source_code_linker/tests/test_testlink.py +++ b/src/extensions/score_source_code_linker/tests/test_testlink.py @@ -21,9 +21,16 @@ load_test_xml_parsed_json, store_test_xml_parsed_json, ) +from attribute_plugin import add_test_properties +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_testlink_serialization_roundtrip(): + """Ensure that Encode/Decode is reversible""" link = DataForTestLink( name="my_test", file=Path("some/file.py"), @@ -46,19 +53,42 @@ def test_testlink_encoder_handles_path(): assert '"file": "some/thing.py"' in encoded +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_decoder_ignores_irrelevant_dicts(): + """Ensure Decoder ignores data it doesn't understand""" input_data = {"foo": "bar"} result = DataForTestLink_JSON_Decoder(input_data) assert result == input_data +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_clean_text_removes_ansi_and_html_unescapes(): + """ + Test if text clean works as intended. + It should remove ANSI color & text styles, as well as convert HTML things back to Chars + """ raw = "\x1b[31m<b>Warning</b>\x1b[0m\nExtra line" cleaned = DataOfTestCase.clean_text(raw) assert cleaned == "Warning Extra line" +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_testcaseneed_to_dict_multiple_links(): + """ + Ensure that a DataOfTestCase can transform itself into a list of DataForTestLink. + """ case = DataOfTestCase( name="TC_01", file="src/test.py", @@ -84,7 +114,13 @@ def test_testcaseneed_to_dict_multiple_links(): assert link.result == "failed" +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_store_and_load_testlinks_roundtrip(tmp_path): + """Ensure that Encode/Decode is reversible""" file = tmp_path / "testlinks.json" links = [ diff --git a/src/extensions/score_source_code_linker/tests/test_xml_parser.py b/src/extensions/score_source_code_linker/tests/test_xml_parser.py index c87e7947..2464a992 100644 --- a/src/extensions/score_source_code_linker/tests/test_xml_parser.py +++ b/src/extensions/score_source_code_linker/tests/test_xml_parser.py @@ -25,6 +25,8 @@ import src.extensions.score_source_code_linker.xml_parser as xml_parser from src.extensions.score_source_code_linker.testlink import DataOfTestCase +from attribute_plugin import add_test_properties + # Unsure if I should make these last a session or not @pytest.fixture @@ -90,14 +92,26 @@ def make_tc( return root, dir1, dir2 +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_find_xml_files(tmp_xml_dirs): + """Ensure xml files are found as expected""" root, dir1, dir2 = tmp_xml_dirs found = xml_parser.find_xml_files(root) expected = {dir1 / "test.xml", dir2 / "test.xml"} assert set(found) == expected +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_parse_testcase_result(): + """Ensure Testcase results are parsed as intended""" tc = ET.Element("testcase", {"name": "a"}) assert xml_parser.parse_testcase_result(tc) == ("passed", "") @@ -113,7 +127,13 @@ def test_parse_testcase_result(): assert xml_parser.parse_testcase_result(tc4) == ("skipped", "skp") +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_parse_properties(): + """Ensure properties of testcases are parsed as intended""" cp: dict[str, Any] = {} props_el = ET.Element("properties") ET.SubElement(props_el, "property", {"name": "A", "value": "1"}) @@ -123,8 +143,14 @@ def test_parse_properties(): assert "Description" not in res +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_read_test_xml_file(tmp_xml_dirs): - root, dir1, dir2 = tmp_xml_dirs + """Ensure a whole pre-defined xml file is parsed correctly""" + _, dir1, dir2 = tmp_xml_dirs needs1, no_props1 = xml_parser.read_test_xml_file(dir1 / "test.xml") assert isinstance(needs1, list) and len(needs1) == 1 @@ -138,7 +164,13 @@ def test_read_test_xml_file(tmp_xml_dirs): assert no_props2 == ["tc_no_props"] +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) def test_short_hash_consistency_and_format(): + """Ensure shorthash has the intended format""" h1 = xml_parser.short_hash("foo") h2 = xml_parser.short_hash("foo") assert h1 == h2 diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index 82ca0bbe..20849c38 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -15,6 +15,8 @@ It also generates external needs out of the parsed testcases to enable linking to requirements &gathering statistics """ +# req-Id: tool_req__docs_test_link_testcase + import contextlib import base64 import hashlib From bb83e99a9960b18ac07c89f14b99d6d314f7d42e Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 22 Aug 2025 13:05:20 +0200 Subject: [PATCH 116/231] simplify static path configuration and add bazel 7 support (#228) --- src/extensions/score_layout/__init__.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index aa1b0761..1485e568 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -36,18 +36,17 @@ def update_config(app: Sphinx, _config: Any): app.config.html_theme_options = html_options.return_html_theme_options(app) # Setting HTML static path - # For now this seems the only place this is used / needed. - # In the future it might be a good idea to make this available in other places, - # maybe via the 'find_runfiles' lib if r := os.getenv("RUNFILES_DIR"): - dirs = [str(x) for x in Path(r).glob("*score_docs_as_code+")] - if dirs: - # Happens if 'score_docs_as_code' is used as Module - p = str(r) + "/score_docs_as_code+/src/assets" + if (Path(r) / "score_docs_as_code+").exists(): + # Docs-as-code used as a module with bazel 8 + module = "score_docs_as_code+" + elif (Path(r) / "score_docs_as_code~").exists(): + # Docs-as-code used as a module with bazel 7 + module = "score_docs_as_code~" else: - # Only happens in 'score_docs_as_code' repository - p = str(r) + "/_main/src/assets" - app.config.html_static_path = app.config.html_static_path + [p] + # Docs-as-code is the current module + module = "_main" + app.config.html_static_path.append(str(Path(r) / module / "src/assets")) app.add_css_file("css/score.css", priority=500) app.add_css_file("css/score_needs.css", priority=500) From 92a4806f56917b4469a095c6bc8327d9731d03aa Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 22 Aug 2025 13:11:41 +0200 Subject: [PATCH 117/231] dr-001 (#227) * Fix artefact * switch mermaid version --- docs/conf.py | 3 + .../001-test-results-in-workflow.md | 110 ++++++++++++++++++ .../decisions_and_concepts/index.rst | 8 ++ docs/internals/index.rst | 1 + 4 files changed, 122 insertions(+) create mode 100644 docs/internals/decisions_and_concepts/001-test-results-in-workflow.md create mode 100644 docs/internals/decisions_and_concepts/index.rst diff --git a/docs/conf.py b/docs/conf.py index 4e98b780..263776cc 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -42,6 +42,9 @@ "sphinxcontrib.mermaid", ] +# Same as current VS Code extension +mermaid_version = "11.6.0" + myst_enable_extensions = ["colon_fence"] exclude_patterns = [ diff --git a/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md b/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md new file mode 100644 index 00000000..a2e240fe --- /dev/null +++ b/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md @@ -0,0 +1,110 @@ +--- +id: Docs-As-Code-DR-001 +status: "Draft" +owner: Infrastructure Community +--- + +# Decision Record 001: Test results in Docs-As-Code Workflows + +## Goals + +* PR + * Early detection of issues + * Generate website-preview +* Post-Merge + * Generate website + +## Problem Statement + +Some parts of generating docs are rather slow: +1) Embedding test results, implies that we need test results +2) Generating HTML output, especially generating diagrams (PlantUML) + +Note: the used tools are irrelevant for the problem statement. + +## Current Situation + +```{mermaid} +flowchart TD + subgraph subGraph0["Pre-Merge Workflow"] + parallel["parallel"] + PR["Pull Request
"] + DOCS1["HTML Build
<slow>"] + TESTS["tests
<slow>"] + WP1["Website Preview"] + TF2["PR Feedback"] + end + subgraph subGraph1["Post-Merge Workflow"] + DOCS2["HTML Build
<slow>"] + PM["Post-Merge
"] + W["Website"] + end + PR --> parallel + parallel --> DOCS1 & TESTS + DOCS1 --> WP1 + TESTS --> TF2 + PM --> DOCS2 + DOCS2 --> W + + parallel@{ shape: fork} + PR@{ shape: event} + DOCS1@{ shape: lean-l} + TESTS@{ shape: out-in} + WP1@{ shape: stored-data} + TF2@{ shape: stored-data} + DOCS2@{ shape: lean-l} + PM@{ shape: event} + W@{ shape: stored-data} +``` + + +## Solution + +A combination of test and docs workflows: + +```{mermaid} +flowchart TD + subgraph subGraph0["Pre-Merge Workflow"] + DC["Docs Verification
<fast>"] + DCF["PR Feedback"] + PR["Pull Request"] + T1["tests
<slow>"] + TF["PR Feedback"] + HB1["HTML Build
<slow>"] + WP["Website Preview"] + parallel["parallel"] + end + subgraph subGraph1["Post-Merge Workflow"] + T2["tests"] + PM["Post-Merge"] + HB2["HTML Build"] + W["Website"] + end + +DC --> DCF +DC -- cache --> HB1 +T1 -- results --> HB1 +T1 --> TF +HB1 --> WP +PR --> parallel +parallel --> DC & T1 + +PM --> T2 +T2 --> HB2 +HB2 --> W + +PR@{ shape: event} + +DC@{ shape: out-in} +T1@{ shape: out-in} +HB1@{ shape: out-in} +parallel@{ shape: fork} +TF@{ shape: stored-data} +WP@{ shape: stored-data} +DCF@{ shape: stored-data} + +PM@{ shape: event} +W@{ shape: stored-data} +T2@{ shape: out-in} +HB2@{ shape: out-in} +``` diff --git a/docs/internals/decisions_and_concepts/index.rst b/docs/internals/decisions_and_concepts/index.rst new file mode 100644 index 00000000..09678b30 --- /dev/null +++ b/docs/internals/decisions_and_concepts/index.rst @@ -0,0 +1,8 @@ +Decisions And Concepts +====================== + +.. toctree:: + :maxdepth: 1 + :glob: + + * diff --git a/docs/internals/index.rst b/docs/internals/index.rst index 51fd3ba6..8df6d8cd 100644 --- a/docs/internals/index.rst +++ b/docs/internals/index.rst @@ -8,3 +8,4 @@ Internals extensions/index benchmark_results + decisions_and_concepts/index From 039abec73e1ec2f64405b3977772e500faf00553 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Mon, 25 Aug 2025 21:25:24 +0200 Subject: [PATCH 118/231] Improve docs as code performance (#229) --- .../score_draw_uml_funcs/helpers.py | 8 ++- .../score_header_service/header_service.py | 2 +- .../checks/attributes_format.py | 6 +- .../score_metamodel/checks/check_options.py | 2 +- .../score_metamodel/checks/graph_checks.py | 2 +- .../checks/id_contains_feature.py | 6 +- .../score_metamodel/checks/standards.py | 6 +- .../score_source_code_linker/xml_parser.py | 5 +- src/find_runfiles/__init__.py | 69 +++++++++---------- src/helper_lib/__init__.py | 29 +++++--- 10 files changed, 72 insertions(+), 63 deletions(-) diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index 09594a1d..6cb609f0 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -236,16 +236,18 @@ def get_real_interface_logical( logger.info(f"{logical_op}: not implemented by any logical operation") continue - real_ifops.extend(real_ifop) if real_ifop not in real_ifops else None + if real_ifop[0] not in real_ifops: + real_ifops.extend(real_ifop) - # Per definition a operation can only be included by one interface + # Per definition an operation can only be included by one interface real_iface = all_needs[real_ifop[0]].get("included_by") if not real_iface: logger.info(f"{real_ifop[0]}: not included in any interface") continue - real_ifaces.extend(real_iface) if real_iface not in real_ifaces else None + if real_iface[0] not in real_ifaces: + real_ifaces.extend(real_iface) return real_ifaces diff --git a/src/extensions/score_header_service/header_service.py b/src/extensions/score_header_service/header_service.py index 8529cf5d..2324422d 100644 --- a/src/extensions/score_header_service/header_service.py +++ b/src/extensions/score_header_service/header_service.py @@ -89,7 +89,7 @@ def __init__( :param config: The configuration dictionary. :param kwargs: Additional keyword arguments. """ - super(BaseService, self).__init__() + super().__init__() def request_from_directive( self, directive: SphinxDirective diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 95c75ec2..37ee3c61 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -67,11 +67,11 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): that would be replaced by actually feature names. --- """ - max_lenght = 45 + max_length = 45 parts = need["id"].split("__") if parts[1] == "example_feature": - max_lenght += 17 # _example_feature_ - if len(need["id"]) > max_lenght: + max_length += 17 # _example_feature_ + if len(need["id"]) > max_length: length = len(need["id"]) if "example_feature" in need["id"]: length -= 17 diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index f715ea2b..97d466b7 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -195,7 +195,7 @@ def check_extra_options( extra_options = [ option - for option in list(need.keys()) + for option in need if option not in allowed_options and need[option] not in [None, {}, "", []] and not option.endswith("_back") diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index fae247ee..cdd51e23 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -79,7 +79,7 @@ def eval_need_condition( if not isinstance(condition, str): raise ValueError( f"Invalid condition type: condition ({type(condition)})," - + " expected str or dict." + " expected str or dict." ) return eval_need_check(need, condition, log) diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index f347f7cb..cd1b2ee4 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -56,10 +56,10 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): ) # allow abbreviation of the feature - initials = "".join( - featurepart[0].lower() for featurepart in featureparts if len(featureparts) > 1 + initials = ( + "".join(fp[0].lower() for fp in featureparts) if len(featureparts) > 1 else "" ) - foundinitials = initials in docname.lower() + foundinitials = bool(initials) and initials in docname.lower() if not (foundfeatpart or foundinitials): log.warning_for_option( diff --git a/src/extensions/score_metamodel/checks/standards.py b/src/extensions/score_metamodel/checks/standards.py index 76cf5392..2c029e6f 100644 --- a/src/extensions/score_metamodel/checks/standards.py +++ b/src/extensions/score_metamodel/checks/standards.py @@ -312,9 +312,9 @@ def my_pie_workproducts_contained_in_exactly_one_workflow( if output in workproduct_analysis: workproduct_analysis[output]["count"] += 1 - nb_wp_connected_to_one_workflow = nb_wp_connected_to_more_than_one_workflow = ( - not_connected_wp - ) = 0 + not_connected_wp = 0 + nb_wp_connected_to_one_workflow = 0 + nb_wp_connected_to_more_than_one_workflow = 0 for analysis in workproduct_analysis.values(): count = analysis["count"] diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index 20849c38..c742cc2b 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -17,8 +17,8 @@ # req-Id: tool_req__docs_test_link_testcase -import contextlib import base64 +import contextlib import hashlib import itertools import os @@ -198,8 +198,7 @@ def build_test_needs_from_files( b, z = read_test_xml_file(f) non_prop_tests = ", ".join(n for n in z) if non_prop_tests: - logger.info("The following tests do not have properties.") - logger.info(non_prop_tests + "\n") + logger.info(f"Tests missing properties: {non_prop_tests}") tcns.extend(b) for c in b: construct_and_add_need(app, c) diff --git a/src/find_runfiles/__init__.py b/src/find_runfiles/__init__.py index c40ea977..e4662004 100644 --- a/src/find_runfiles/__init__.py +++ b/src/find_runfiles/__init__.py @@ -21,23 +21,27 @@ def _log_debug(message: str): # TODO: why does logger not print anything? - logger.debug(message) - print(message) + if logger.hasHandlers(): + logger.debug(message) + else: + print(message) -def find_git_root(): +def find_git_root() -> Path: # TODO: is __file__ ever resolved into the bazel cache directories? # Then this function will not work! - # TODO: use os.getenv("BUILD_WORKSPACE_DIRECTORY")? - git_root = Path(__file__).resolve() - while not (git_root / ".git").exists(): - git_root = git_root.parent - if git_root == Path("/"): - sys.exit( - "Could not find git root. Please run this script from the " - "root of the repository." - ) - return git_root + workspace = os.getenv("BUILD_WORKSPACE_DIRECTORY") + if workspace: + return Path(workspace) + + for parent in Path(__file__).resolve().parents: + if (parent / ".git").exists(): + return parent + + sys.exit( + "Could not find git root. " + "Please run this script from the root of the repository." + ) def get_runfiles_dir_impl( @@ -49,31 +53,26 @@ def get_runfiles_dir_impl( """Functional (and therefore testable) logic to determine the runfiles directory.""" _log_debug( - "get_runfiles_dir_impl(\n" - f" {cwd=},\n" - f" {conf_dir=},\n" - f" {env_runfiles=},\n" - f" {git_root=}\n" - ")" + f"get_runfiles_dir_impl(\n cwd={cwd},\n conf_dir={conf_dir},\n" + f" env_runfiles={env_runfiles},\n git_root={git_root}\n)" ) if env_runfiles: # Runfiles are only available when running in Bazel. - # bazel build and bazel run are both supported. + # Both `bazel build` and `bazel run` are supported. # i.e. `bazel build //:docs` and `bazel run //:docs`. - _log_debug("Using env[runfiles] to find the runfiles...") - - if env_runfiles.is_absolute(): - # In case of `bazel run` it will point to the global cache directory, which - # has a new hash every time. And it's not pretty. - # However `bazel-out` is a symlink to that same cache directory! - parts = str(env_runfiles).split("/bazel-out/") - if len(parts) != 2: - # This will intentionally also fail if "bazel-out" appears multiple - # times in the path. Will be fixed on demand only. + _log_debug("Using env[RUNFILES_DIR] to find the runfiles...") + + if env_runfiles.is_absolute() and "bazel-out" in env_runfiles.parts: + # In case of `bazel run` it will point to the global cache directory, + # which has a new hash every time. And it's not pretty. + # However, `bazel-out` is a symlink to that same cache directory! + try: + idx = env_runfiles.parts.index("bazel-out") + runfiles_dir = git_root.joinpath(*env_runfiles.parts[idx:]) + _log_debug(f"Made runfiles dir pretty: {runfiles_dir}") + except ValueError: sys.exit("Could not find bazel-out in runfiles path.") - runfiles_dir = git_root / Path("bazel-out") / parts[1] - _log_debug(f"Made runfiles dir pretty: {runfiles_dir}") else: runfiles_dir = git_root / env_runfiles @@ -84,12 +83,8 @@ def get_runfiles_dir_impl( # environment. _log_debug("Running outside bazel.") - print(f"{git_root=}") - # TODO: "process-docs" is in SOURCE_DIR!! - runfiles_dir = ( - Path(git_root) / "bazel-bin" / "process-docs" / "ide_support.runfiles" - ) + runfiles_dir = git_root / "bazel-bin" / "process-docs" / "ide_support.runfiles" return runfiles_dir diff --git a/src/helper_lib/__init__.py b/src/helper_lib/__init__.py index ff201eae..1ca80ca0 100644 --- a/src/helper_lib/__init__.py +++ b/src/helper_lib/__init__.py @@ -61,16 +61,23 @@ def parse_remote_git_output(str_line: str) -> str: Input: 'origin git@github.com:MaximilianSoerenPollak/docs-as-code.git' Output: 'MaximilianSoerenPollak/docs-as-code' """ - if len(str_line.split()) < 2: + parts = str_line.split(maxsplit=2) # split into up to three parts [remote, url, ...] + if len(parts) < 2: LOGGER.warning( f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. " + "Expected example: 'origin git@github.com:user/repo.git'" ) return "" - url = str_line.split()[1] # Get the URL part - # Handle SSH format (git@github.com:user/repo.git) Get part after github.com/ - path = url.split(":")[1] if url.startswith("git@") else "/".join(url.split("/")[3:]) - return path.replace(".git", "") + + url = parts[1] # Get the URL part + + # Handle SSH vs HTTPS formats directly + if url.startswith("git@"): + path = url.split(":", 1)[-1] + else: + path = "/".join(url.split("/")[3:]) + + return path.removesuffix(".git") def get_github_repo_info(git_root_cwd: Path) -> str: @@ -146,16 +153,22 @@ def get_current_git_hash(git_root: Path) -> str: result = subprocess.run( ["git", "log", "-n", "1", "--pretty=format:%H"], cwd=git_root, + text=True, # ✅ decode automatically capture_output=True, check=True, ) - decoded_result = result.stdout.strip().decode() + decoded_result = result.stdout.strip() + + if len(decoded_result) != 40: + raise ValueError(f"Unexpected git hash length: {decoded_result}") + + if not all(c in "0123456789abcdef" for c in decoded_result): + raise ValueError(f"Invalid characters in git hash: {decoded_result}") - assert all(c in "0123456789abcdef" for c in decoded_result) return decoded_result except Exception as e: LOGGER.warning( - f"Unexpected error while trying to get git_hash. Exceuted in: {git_root}", + f"Unexpected error while trying to get git_hash. Executed in: {git_root}", exc_info=e, ) raise From 525909319f9eb03c66dbc4bc69b9bb2a8f54ad40 Mon Sep 17 00:00:00 2001 From: Aymen-Soussi-01 Date: Tue, 26 Aug 2025 14:07:53 +0200 Subject: [PATCH 119/231] Add Pull request template (#230) --- .github/pull_request_template.md | 33 ++++++++++++++++++++++++++++++++ src/helper_lib/__init__.py | 4 +++- 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 .github/pull_request_template.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..30e23e01 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,33 @@ + +# Pull Request Template – docs-as-code + +## 📌 Description +- What does this PR change? +- Why is it needed? +- Which task it's related to? + +## Impact Analysis + +- [ ] This change does not violate any tool requirements and is covered by existing tool requirements +- [ ] This change does not violate any design decisions +- [ ] Otherwise I have created a ticket for new tool qualification + +## ✅ Checklist +Before requesting a review, please confirm that you have: + +- [ ] Added/updated documentation for new or changed features +- [ ] Added/updated tests to cover the changes +- [ ] Verified that existing tests pass locally +- [ ] Followed project coding standards and guidelines + + +## 📖 Documentation +- Does this PR update docs? +- If not, explain why documentation is not needed. + +--- + +⚠️ **Note:** Pull requests with missing tests or documentation will not be merged. diff --git a/src/helper_lib/__init__.py b/src/helper_lib/__init__.py index 1ca80ca0..001977d2 100644 --- a/src/helper_lib/__init__.py +++ b/src/helper_lib/__init__.py @@ -61,7 +61,9 @@ def parse_remote_git_output(str_line: str) -> str: Input: 'origin git@github.com:MaximilianSoerenPollak/docs-as-code.git' Output: 'MaximilianSoerenPollak/docs-as-code' """ - parts = str_line.split(maxsplit=2) # split into up to three parts [remote, url, ...] + parts = str_line.split( + maxsplit=2 + ) # split into up to three parts [remote, url, ...] if len(parts) < 2: LOGGER.warning( f"Got wrong input line from 'get_github_repo_info'. Input: {str_line}. " From 1a7657e370f794992c4d39fa61fe9d7c7dc4de1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20M=C3=BCller?= <42868757+mmr1909@users.noreply.github.com> Date: Tue, 26 Aug 2025 16:58:02 +0200 Subject: [PATCH 120/231] add test to verify the model loading (#231) --- src/extensions/score_metamodel/BUILD | 5 +- .../tests/model/simple_model.yaml | 44 ++++++++++ .../tests/test_metamodel_load.py | 81 +++++++++++++++++++ 3 files changed, 129 insertions(+), 1 deletion(-) create mode 100644 src/extensions/score_metamodel/tests/model/simple_model.yaml create mode 100644 src/extensions/score_metamodel/tests/test_metamodel_load.py diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index 7bf0d602..40cb645f 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -34,7 +34,10 @@ score_py_pytest( srcs = glob(["tests/*.py"]), # All requirements already in the library so no need to have it double data = glob( - ["tests/**/*.rst"], + [ + "tests/**/*.rst", + "tests/**/*.yaml", + ], ), deps = [":score_metamodel"], ) diff --git a/src/extensions/score_metamodel/tests/model/simple_model.yaml b/src/extensions/score_metamodel/tests/model/simple_model.yaml new file mode 100644 index 00000000..a78e97ba --- /dev/null +++ b/src/extensions/score_metamodel/tests/model/simple_model.yaml @@ -0,0 +1,44 @@ +needs_types_base_options: + optional_options: + global_opt: "global_value" + +prohibited_words_checks: + title_check: + title: + - stop_word1 + content_check: + types: + - req_type + content: + - weak_word1 + +needs_types: + type1: + title: "Type 1" + prefix: "T1" + color: "blue" + style: "bold" + mandatory_options: + opt1: "value1" + optional_options: + opt2: "value2" + opt3: "value3" + mandatory_links: + link1: "value1" + optional_links: + link2: "value2" + tags: + - req_type + +needs_extra_links: + link_option1: + incoming: "incoming1" + outgoing: "outgoing1" + +graph_checks: + needs_graph_check: + needs: + include: type1 + condition: opt1 == test + check: + link1: opt1 == test diff --git a/src/extensions/score_metamodel/tests/test_metamodel_load.py b/src/extensions/score_metamodel/tests/test_metamodel_load.py new file mode 100644 index 00000000..3411735f --- /dev/null +++ b/src/extensions/score_metamodel/tests/test_metamodel_load.py @@ -0,0 +1,81 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +from pathlib import Path +from unittest.mock import mock_open, patch + +from score_metamodel import ProhibitedWordCheck, load_metamodel_data + +MODEL_DIR = Path(__file__).absolute().parent / "model" + + +def load_model_data(model_file: str) -> str: + print(f"Loading model data from {model_file}") + model_path = Path(MODEL_DIR) / model_file + with open(model_path) as f: + return f.read() + + +def test_load_metamodel_data(): + model_data: str = load_model_data("simple_model.yaml") + + with patch("builtins.open", mock_open(read_data=model_data)): + # Call the function + result = load_metamodel_data() + + # Assertions + assert "needs_types" in result + assert len(result["needs_types"]) == 1 + assert result["needs_types"][0]["directive"] == "type1" + assert result["needs_types"][0]["title"] == "Type 1" + assert result["needs_types"][0]["prefix"] == "T1" + assert result["needs_types"][0]["color"] == "blue" + assert result["needs_types"][0]["style"] == "bold" + assert result["needs_types"][0]["mandatory_options"] == {"opt1": "value1"} + assert result["needs_types"][0]["opt_opt"] == { + "opt2": "value2", + "opt3": "value3", + "global_opt": "global_value", + } + assert result["needs_types"][0]["req_link"] == [("link1", "value1")] + assert result["needs_types"][0]["opt_link"] == [("link2", "value2")] + + assert "needs_extra_links" in result + assert len(result["needs_extra_links"]) == 1 + assert result["needs_extra_links"][0] == { + "option": "link_option1", + "incoming": "incoming1", + "outgoing": "outgoing1", + } + + assert "needs_extra_options" in result + assert result["needs_extra_options"] == ["global_opt", "opt1", "opt2", "opt3"] + + assert "prohibited_words_checks" in result + assert result["prohibited_words_checks"][0] == ProhibitedWordCheck( + name="title_check", option_check={"title": ["stop_word1"]} + ) + + assert result["prohibited_words_checks"][1] == ProhibitedWordCheck( + name="content_check", + option_check={"content": ["weak_word1"]}, + types=["req_type"], + ) + + assert "needs_graph_check" in result + assert result["needs_graph_check"]["needs_graph_check"]["needs"] == { + "include": "type1", + "condition": "opt1 == test", + } + assert result["needs_graph_check"]["needs_graph_check"]["check"] == { + "link1": "opt1 == test", + } From 0272f9e478f50bc7fa2288e4ee16a6d293500a16 Mon Sep 17 00:00:00 2001 From: Andreas Kaluza Date: Thu, 28 Aug 2025 08:21:55 +0200 Subject: [PATCH 121/231] add puml-theme-score.puml (#232) --- src/assets/puml-theme-score.puml | 578 ++++++++++++++++++++++++ src/extensions/score_layout/__init__.py | 3 + 2 files changed, 581 insertions(+) create mode 100644 src/assets/puml-theme-score.puml diff --git a/src/assets/puml-theme-score.puml b/src/assets/puml-theme-score.puml new file mode 100644 index 00000000..c0317fed --- /dev/null +++ b/src/assets/puml-theme-score.puml @@ -0,0 +1,578 @@ +'' ****************************************************************************** +'' Copyright (c) 2025 Contributors to the Eclipse Foundation +'' +'' See the NOTICE file(s) distributed with this work for additional +'' information regarding copyright ownership. +'' +'' This program and the accompanying materials are made available under the +'' terms of the Apache License Version 2.0 which is available at +'' https://www.apache.org/licenses/LICENSE-2.0 +'' +'' SPDX-License-Identifier: Apache-2.0 AND MIT +'' +'' Portions of this file are derived from the "materia" PlantUML theme, +'' originally published under the MIT License: +'' Copyright (c) 2019 Brett Schwarz +'' https://github.com/bschwarz/puml-themes +'' +'' The materia theme itself is based on the "Bootstrap Materia" theme, +'' also published under the MIT License: +'' https://bootswatch.com/materia/ +'' +'' Full text of the MIT license is available in LICENSES/MIT.txt +'' ****************************************************************************** + + +!$THEME = "score" + +!if %not(%variable_exists("$BGCOLOR")) +!$BGCOLOR = "transparent" +!endif + +skinparam backgroundColor $BGCOLOR +skinparam useBetaStyle true + + + +!$SCORE = "#45ADA8" +!$BLUE = "#2196F3" +!$INDIGO = "#6610f2" +!$PURPLE = "#6f42c1" +!$PINK = "#e83e8c" +!$RED = "#e51c23" +!$ORANGE = "#fd7e14" +!$YELLOW = "#ff9800" +!$GREEN = "#4CAF50" +!$TEAL = "#20c997" +!$CYAN = "#9C27B0" +!$WHITE = "#FFF" +!$GRAY_DARK = "#222" +!$GRAY = "#666" +!$PRIMARY = $SCORE +!$SECONDARY = %lighten($SCORE, 80) +!$SUCCESS = "#4CAF50" +!$INFO = %lighten($SCORE, 40) +!$WARNING = "#ff9800" +!$DANGER = "#e51c23" +!$LIGHT = "#fff" +!$DARK = "#222" + +'' *_LIGHT = tint (lighter) of the main color of 80% +'' *_DARK = shade (darker) of the main color of 80% +'' +!$FGCOLOR = %darken($SCORE, 80) +!$PRIMARY_LIGHT = %lighten($SCORE, 40) +!$PRIMARY_DARK = %darken($SCORE, 40) +!$PRIMARY_TEXT = %darken($SCORE, 80) +!$SECONDARY_LIGHT = "#fff" +!$SECONDARY_DARK = "#cccccc" +!$SECONDARY_TEXT = %darken($SCORE, 80) +!$INFO_LIGHT = %lighten($INFO, 40) +!$INFO_DARK = %darken($INFO, 40) +!$INFO_TEXT = %darken($SCORE, 80) +!$SUCCESS_LIGHT = "#70bf73" +!$SUCCESS_DARK = "#3D8C40" +!$SUCCESS_TEXT = %darken($SCORE, 80) +!$WARNING_LIGHT = "#ffad33" +!$WARNING_DARK = "#CC7A00" +!$WARNING_TEXT = %darken($SCORE, 80) +!$DANGER_DARK = "#b7161c" +!$DANGER_LIGHT = "#B7161C" +!$DANGER_TEXT = %darken($SCORE, 80) + +!procedure $success($msg) + $msg +!endprocedure + +!procedure $failure($msg) + $msg +!endprocedure + +!procedure $warning($msg) + $msg +!endprocedure + +!procedure $primary_scheme() + FontColor $PRIMARY_TEXT + BorderColor $PRIMARY + BackgroundColor $PRIMARY_LIGHT-$PRIMARY +!endprocedure +'' +'' Style settings +'' + +'' +'' Global Default Values +'' +skinparam defaultFontName "Arial" +skinparam defaultFontSize 12 +skinparam dpi 100 +skinparam shadowing true +skinparam roundcorner 8 +skinparam ParticipantPadding 1 +skinparam BoxPadding 1 +skinparam Padding 1 +skinparam ArrowColor $GRAY +skinparam stereotype { + CBackgroundColor $SECONDARY_LIGHT + CBorderColor $SECONDARY_DARK + ABackgroundColor $SUCCESS_LIGHT + ABorderColor $SUCCESS_DARK + IBackgroundColor $DANGER_LIGHT + IBorderColor $DANGER_DARK + EBackgroundColor $WARNING_LIGHT + EBorderColor $WARNING_DARK + NBackgroundColor $INFO_LIGHT + NBorderColor $INFO_DARK +} +skinparam title { + FontColor $PRIMARY + BorderColor $SECONDARY_DARK + FontSize 20 + BorderRoundCorner 8 + BorderThickness 1 + BackgroundColor $SECONDARY_LIGHT-$SECONDARY +} + +skinparam legend { + BackgroundColor $SECONDARY + BorderColor $SECONDARY_DARK + FontColor $DARK +} + +!startsub swimlane +skinparam swimlane { + BorderColor $PRIMARY + BorderThickness 2 + TitleBackgroundColor $SECONDARY_LIGHT-$SECONDARY + TitleFontColor $PRIMARY +} +!endsub + +!startsub activity + +skinparam activity { + $primary_scheme() + BarColor $PRIMARY_DARK + StartColor $PRIMARY + EndColor $PRIMARY + '' + DiamondBackgroundColor $PRIMARY_LIGHT + DiamondBorderColor $PRIMARY_DARK + DiamondFontColor $SECONDARY_TEXT + NoteFontColor %darken($SCORE, 40) +} +!endsub + +!startsub participant + +skinparam participant { + $primary_scheme() + ParticipantBorderThickness 2 +} +!endsub + +!startsub actor + +skinparam actor { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub arrow + +skinparam arrow { + Thickness 1 + Color $PRIMARY + FontColor $FGCOLOR +} +!endsub + +!startsub sequence + +skinparam sequence { + BorderColor $PRIMARY + ' For some reason sequence title font color does not pick up from global + TitleFontColor $PRIMARY + BackgroundColor transparent + StartColor $PRIMARY + EndColor $PRIMARY + '' + BoxBackgroundColor transparent + BoxBorderColor $GRAY + BoxFontColor $DARK + '' + DelayFontColor $DARK + '' + LifeLineBorderColor $PRIMARY_DARK + LifeLineBorderThickness 2 + LifeLineBackgroundColor $PRIMARY_LIGHT + '' + GroupBorderColor $GRAY + GroupFontColor $DARK + GroupHeaderFontColor $INFO + '' + DividerBackgroundColor $WHITE-$LIGHT + DividerBorderColor $GRAY + DividerBorderThickness 2 + DividerFontColor $DARK + '' + ReferenceBackgroundColor transparent + ReferenceBorderColor $GRAY + ReferenceFontColor $DARK + ReferenceHeaderFontColor $INFO + '' + StereotypeFontColor $PRIMARY_TEXT + '' + ParticipantBorderThickness 0 + GroupBodyBackgroundColor transparent +} +!endsub + +!startsub partition + +skinparam partition { + BorderColor $PRIMARY + FontColor $PRIMARY + BackgroundColor transparent +} +!endsub + +!startsub collections + +skinparam collections { + $primary_scheme() +} +!endsub + +!startsub control + +skinparam control { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub entity + +skinparam entity { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub boundary + +skinparam boundary { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub agent + +skinparam agent { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub note + +skinparam note { + BorderThickness 1 + BackgroundColor $INFO_LIGHT-$INFO + BorderColor $INFO + FontColor %darken($SCORE, 40) +} +!endsub + +!startsub artifact + +skinparam artifact { + $primary_scheme() +} +!endsub + +!startsub component + +skinparam component { + $primary_scheme() +} +!endsub + +!startsub interface + +skinparam interface { + BackgroundColor $DANGER_LIGHT + BorderColor $DANGER + FontColor $DARK +} +!endsub + +!startsub storage + +skinparam storage { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub node + +skinparam node { + BackgroundColor $SECONDARY_LIGHT-$SECONDARY + BorderColor $PRIMARY + FontColor $DARK +} +!endsub + +!startsub cloud + +skinparam cloud { + BackgroundColor transparent + BorderColor $PRIMARY + FontColor $DARK +} +!endsub + +!startsub database + +skinparam database { + $primary_scheme() + FontColor $DARK +} +!endsub + +!startsub class + +skinparam class { + $primary_scheme() + HeaderBackgroundColor $PRIMARY-$PRIMARY_DARK + StereotypeFontColor $DARK + BorderThickness 1 + AttributeFontColor $LIGHT + AttributeFontSize 11 +} +!endsub + +!startsub object + +skinparam object { + $primary_scheme() + StereotypeFontColor $DARK + BorderThickness 1 + AttributeFontColor $SECONDARY_TEXT + AttributeFontSize 11 +} +!endsub + +!startsub usecase + +skinparam usecase { + $primary_scheme() + BorderThickness 2 + StereotypeFontColor $PRIMARY +} +!endsub + +!startsub rectangle + +skinparam rectangle { + FontColor $PRIMARY + BackgroundColor $SECONDARY_LIGHT-$SECONDARY + BorderThickness 2 + StereotypeFontColor $PRIMARY +} +!endsub + +!startsub package + +skinparam package { + $primary_scheme() + BackgroundColor $SECONDARY_LIGHT-$SECONDARY + BorderThickness 2 +} +!endsub + +!startsub folder + +skinparam folder { + BackgroundColor $WHITE-$SECONDARY_LIGHT + BorderColor $PRIMARY + FontColor $PRIMARY + BorderThickness 2 +} +!endsub + +!startsub frame + +skinparam frame { + BackgroundColor $WHITE-$SECONDARY_LIGHT + BorderColor $INFO + FontColor $INFO + BorderThickness 2 +} +!endsub + +!startsub state + +skinparam state { + $primary_scheme() + BorderColor $PRIMARY_DARK + StartColor $INFO + EndColor $INFO + AttributeFontColor $SECONDARY_TEXT + AttributeFontSize 11 +} +!endsub + +!startsub queue + +skinparam queue { + $primary_scheme() +} +!endsub + +!startsub card + +skinparam card { + BackgroundColor $INFO_LIGHT-$INFO + BorderColor $INFO + FontColor $INFO_TEXT +} +!endsub + +!startsub file + +skinparam file { + BackgroundColor $SECONDARY_LIGHT-$SECONDARY + BorderColor $GRAY + FontColor $GRAY + +} +!endsub + +!startsub stack + +skinparam stack { + $primary_scheme() +} +!endsub diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 1485e568..9b373759 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -47,6 +47,9 @@ def update_config(app: Sphinx, _config: Any): # Docs-as-code is the current module module = "_main" app.config.html_static_path.append(str(Path(r) / module / "src/assets")) + app.config.needs_flow_configs = { + "score_config": f"!include {Path(r) / module / 'src/assets/puml-theme-score.puml'}" + } app.add_css_file("css/score.css", priority=500) app.add_css_file("css/score_needs.css", priority=500) From 47a80da4fcefff4ea669cda489353d044ec0bc62 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 29 Aug 2025 13:58:04 +0200 Subject: [PATCH 122/231] Simplify-consumer-tests (#237) --- .github/workflows/consumer_test.yml | 81 +++++++---------------------- MODULE.bazel | 4 +- scripts/run-linters.sh | 8 +-- src/tests/test_consumer.py | 2 +- 4 files changed, 25 insertions(+), 70 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 08827c75..7a07e726 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -11,80 +11,35 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -name: Run Consumer Tests on Comment -on: issue_comment - -permissions: - statuses: write - contents: read - pull-requests: read +name: Consumer Tests +on: + pull_request: + types: [opened, reopened, synchronize] + merge_group: + types: [checks_requested] jobs: - consumer_test: - if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, '/consumer-test') }} + test: runs-on: ubuntu-latest - steps: - - name: Get PR details - id: pr_details - uses: actions/github-script@v7 - with: - script: | - const { data: pr } = await github.rest.pulls.get({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: context.issue.number - }); - core.setOutput('head_sha', pr.head.sha); - core.setOutput('head_ref', pr.head.ref); + strategy: + fail-fast: false + matrix: + consumer: ["process_description", "score", "module_template"] + steps: - name: Checkout PR uses: actions/checkout@v4.2.2 - with: - ref: refs/pull/${{ github.event.issue.number }}/head - - name: Set Consumer Tests Status - Pending - uses: actions/github-script@v7 - with: - script: | - await github.rest.repos.createCommitStatus({ - owner: context.repo.owner, - repo: context.repo.repo, - sha: '${{ steps.pr_details.outputs.head_sha }}', - state: 'pending', - context: 'Consumer Tests (Manual)', - description: 'Running consumer tests (manually triggered by @${{ github.event.comment.user.login }})', - target_url: `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${{ github.run_id }}` - }); - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Prepare Python + run: | + bazel run //:ide_support - name: Run Consumer tests - id: consumer_tests run: | - bazel run //:ide_support - .venv_docs/bin/python -m pytest -s -v src/tests/ + .venv_docs/bin/python -m pytest -s -v src/tests/ --repo=$CONSUMER + env: FORCE_COLOR: "1" TERM: xterm-256color PYTHONUNBUFFERED: "1" - - - name: Report Consumer Tests Status - if: always() - uses: actions/github-script@v7 - with: - script: | - const outcome = '${{ steps.consumer_tests.outcome }}'; - const state = outcome === 'success' ? 'success' : 'failure'; - console.log(`Test outcome: ${outcome}, state: ${state}`); - console.log(`Head SHA: ${{ steps.pr_details.outputs.head_sha }}`); - await github.rest.repos.createCommitStatus({ - owner: context.repo.owner, - repo: context.repo.repo, - sha: '${{ steps.pr_details.outputs.head_sha }}', - state: state, - context: 'Consumer Tests (Manual)', - description: `Consumer tests ${outcome} (manually triggered by @${{ github.event.comment.user.login }})`, - target_url: `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${{ github.run_id }}` - }); - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CONSUMER: ${{ matrix.consumer }} diff --git a/MODULE.bazel b/MODULE.bazel index ac79e51c..e93202ea 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -57,7 +57,7 @@ pip.parse( use_repo(pip, "pip_process") # Additional Python rules provided by aspect, e.g. an improved version of -bazel_dep(name = "aspect_rules_py", version = "1.6.3") +bazel_dep(name = "aspect_rules_py", version = "1.4.0") bazel_dep(name = "buildifier_prebuilt", version = "8.2.0.2") ############################################################################### @@ -101,7 +101,7 @@ bazel_dep(name = "score_process", version = "1.1.1") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "1.0.0") +bazel_dep(name = "score_tooling", version = "1.0.2") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") diff --git a/scripts/run-linters.sh b/scripts/run-linters.sh index f58d92b8..17ff20a5 100755 --- a/scripts/run-linters.sh +++ b/scripts/run-linters.sh @@ -16,21 +16,21 @@ set -euo pipefail bazel run //:ide_support echo "Running Ruff linter..." -bazel run @score_linter//:ruff check +bazel run @score_tooling//tools:ruff check echo "Running basedpyright..." .venv_docs/bin/python3 -m basedpyright echo "Running Actionlint..." -bazel run @score_linter//:actionlint +bazel run @score_tooling//tools:actionlint echo "Running Shellcheck..." find . \ -type d \( -name .git -o -name .venv -o -name bazel-out -o -name node_modules \) -prune -false \ -o -type f -exec grep -Il '^#!.*sh' {} \; | \ -xargs bazel run @score_linter//:shellcheck -- +xargs bazel run @score_tooling//tools:shellcheck -- echo "Running Yamlfmt..." -bazel run @score_linter//:yamlfmt -- $(find . \ +bazel run @score_tooling//tools:yamlfmt -- $(find . \ -type d \( -name .git -o -name .venv -o -name bazel-out -o -name node_modules \) -prune -false \ -o -type f \( -name "*.yaml" -o -name "*.yml" \) | tr '\n' '\0' | xargs -0) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index db315e05..e5da97de 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -478,7 +478,7 @@ def setup_test_environment(sphinx_base_dir, pytestconfig): """Set up the test environment and return necessary paths and metadata.""" git_root = find_git_root() - assert git_root is None, "Git root was not found" + assert git_root, "Git root was not found" gh_url = get_github_base_url() current_hash = get_current_git_commit(git_root) From b3617ae0f684bc6518332d9f804f684c6496199f Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 29 Aug 2025 14:26:17 +0200 Subject: [PATCH 123/231] strip down conf.py to the bare minimum (#233) --- .github/pull_request_template.md | 23 ++---- MODULE.bazel | 2 +- docs.bzl | 10 +-- docs/conf.py | 53 +------------ src/extensions/score_sphinx_bundle/BUILD | 30 ++++++++ .../score_sphinx_bundle/__init__.py | 74 +++++++++++++++++++ src/tests/test_consumer.py | 9 ++- 7 files changed, 122 insertions(+), 79 deletions(-) create mode 100644 src/extensions/score_sphinx_bundle/BUILD create mode 100644 src/extensions/score_sphinx_bundle/__init__.py diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 30e23e01..9e67f6c3 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -2,32 +2,23 @@ Thank you for your contribution! Please fill out this template to help us review your PR effectively. --> -# Pull Request Template – docs-as-code ## 📌 Description -- What does this PR change? -- Why is it needed? -- Which task it's related to? + -## Impact Analysis - +## 🚨 Impact Analysis + + - [ ] This change does not violate any tool requirements and is covered by existing tool requirements - [ ] This change does not violate any design decisions - [ ] Otherwise I have created a ticket for new tool qualification ## ✅ Checklist -Before requesting a review, please confirm that you have: + + - [ ] Added/updated documentation for new or changed features - [ ] Added/updated tests to cover the changes -- [ ] Verified that existing tests pass locally - [ ] Followed project coding standards and guidelines - -## 📖 Documentation -- Does this PR update docs? -- If not, explain why documentation is not needed. - ---- - -⚠️ **Note:** Pull requests with missing tests or documentation will not be merged. + diff --git a/MODULE.bazel b/MODULE.bazel index e93202ea..bf432a73 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.1.0", + version = "1.2.0", compatibility_level = 1, ) diff --git a/docs.bzl b/docs.bzl index df23235b..23f10052 100644 --- a/docs.bzl +++ b/docs.bzl @@ -56,15 +56,7 @@ def docs(source_dir = "docs", data = [], deps = []): deps = deps + all_requirements + [ "@score_docs_as_code//src:plantuml_for_python", - "@score_docs_as_code//src/extensions:score_plantuml", - "@score_docs_as_code//src/find_runfiles:find_runfiles", - "@score_docs_as_code//src/extensions/score_draw_uml_funcs:score_draw_uml_funcs", - "@score_docs_as_code//src/extensions/score_header_service:score_header_service", - "@score_docs_as_code//src/extensions/score_layout:score_layout", - "@score_docs_as_code//src/extensions/score_metamodel:score_metamodel", - "@score_docs_as_code//src/extensions/score_source_code_linker:score_source_code_linker", - # NOTE: Do not comment this in, can only be enabled once tooling is released & process upgraded - #"@score_tooling//python_basics/score_pytest:attribute_plugin" + "@score_docs_as_code//src/extensions/score_sphinx_bundle:score_sphinx_bundle", ] sphinx_build_binary( diff --git a/docs/conf.py b/docs/conf.py index 263776cc..8051d9f2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,60 +11,9 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - project = "Score Docs-as-Code" project_url = "https://eclipse-score.github.io/docs-as-code/" project_prefix = "DOCS_" -author = "S-CORE" version = "0.1" -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -extensions = [ - "sphinx_design", - "sphinx_needs", - "myst_parser", - "sphinxcontrib.plantuml", - "score_plantuml", - "score_source_code_linker", - "score_metamodel", - "score_draw_uml_funcs", - "score_layout", - "sphinxcontrib.mermaid", -] - -# Same as current VS Code extension -mermaid_version = "11.6.0" - -myst_enable_extensions = ["colon_fence"] - -exclude_patterns = [ - # The following entries are not required when building the documentation via 'bazel - # build //:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //:docs' or esbonio, these - # entries are required to prevent the build from failing. - "bazel-*", - ".venv*", -] - -# Enable markdown rendering -source_suffix = { - ".rst": "restructuredtext", - ".md": "markdown", -} - -templates_path = ["templates"] - - -# Enable numref -numfig = True -# needs_builder_filter = "" +extensions = ["score_sphinx_bundle"] diff --git a/src/extensions/score_sphinx_bundle/BUILD b/src/extensions/score_sphinx_bundle/BUILD new file mode 100644 index 00000000..c7f96d0d --- /dev/null +++ b/src/extensions/score_sphinx_bundle/BUILD @@ -0,0 +1,30 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "all_requirements") + +py_library( + name = "score_sphinx_bundle", + srcs = ["__init__.py"], + visibility = ["//visibility:public"], + deps = all_requirements + [ + "@score_docs_as_code//src/extensions:score_plantuml", + "@score_docs_as_code//src/extensions/score_draw_uml_funcs", + "@score_docs_as_code//src/extensions/score_header_service", + "@score_docs_as_code//src/extensions/score_layout", + "@score_docs_as_code//src/extensions/score_metamodel", + "@score_docs_as_code//src/extensions/score_source_code_linker", + "@score_docs_as_code//src/find_runfiles", + "@score_docs_as_code//src/helper_lib", + ], +) diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py new file mode 100644 index 00000000..4617f1fc --- /dev/null +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -0,0 +1,74 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +from typing import Any + + +from sphinx.application import Sphinx + +# Note: order matters! +# Extensions are loaded in this order. +# (Although not sure what has to be loaded first) +score_extensions = [ + "score_metamodel", + "sphinx_design", + "sphinx_needs", + "myst_parser", + "sphinxcontrib.plantuml", + "score_plantuml", + "score_source_code_linker", + "score_draw_uml_funcs", + "score_layout", + "sphinxcontrib.mermaid", +] + + +def setup(app: Sphinx) -> dict[str, object]: + # Global settings + # Note: the "sub-extensions" also set their own config values + + # Same as current VS Code extension + app.config.mermaid_version = "11.6.0" + + # enable "..."-syntax in markdown + app.config.myst_enable_extensions = ["colon_fence"] + + app.config.exclude_patterns = [ + # The following entries are not required when building the documentation via 'bazel + # build //:docs', as that command runs in a sandboxed environment. However, when + # building the documentation via 'bazel run //:docs' or esbonio, these + # entries are required to prevent the build from failing. + "bazel-*", + ".venv*", + ] + + # Enable markdown rendering + app.config.source_suffix = { + ".rst": "restructuredtext", + ".md": "markdown", + } + + app.config.templates_path = ["templates"] + + app.config.numfig = True + + app.config.author = "S-CORE" + + # Load the actual extensions list + for e in score_extensions: + app.setup_extension(e) + + return { + "version": "0.1", + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index e5da97de..e2dddaee 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -79,7 +79,12 @@ class Result: ConsumerRepo( name="process_description", git_url="https://github.com/eclipse-score/process_description.git", - commands=["bazel run //:docs"], + commands=[ + "bazel run //:ide_support", + "bazel run //:docs_check", + "bazel run //:docs", + "bazel build //:needs_json", + ], test_commands=[], ), ConsumerRepo( @@ -87,6 +92,7 @@ class Result: git_url="https://github.com/eclipse-score/score.git", commands=[ "bazel run //:ide_support", + "bazel run //:docs_check", "bazel run //:docs", "bazel build //:needs_json", ], @@ -97,6 +103,7 @@ class Result: git_url="https://github.com/eclipse-score/module_template.git", commands=[ "bazel run //:ide_support", + "bazel run //:docs_check", "bazel run //:docs", "bazel build //:needs_json", ], From 5b52558baf28a5b19ea6473ca61715e3855ac5ef Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 29 Aug 2025 16:22:40 +0200 Subject: [PATCH 124/231] fix some of linter warnings (#238) --- src/extensions/score_layout/__init__.py | 6 +-- .../tests/test_check_options.py | 3 +- .../tests/test_metamodel__init__.py | 3 +- .../score_source_code_linker/__init__.py | 6 ++- .../need_source_links.py | 19 ++++--- .../score_source_code_linker/testlink.py | 10 ++-- .../tests/test_codelink.py | 4 +- .../test_source_code_link_integration.py | 54 +++++++++---------- .../tests/test_testlink.py | 9 ++-- .../tests/test_xml_parser.py | 11 ++-- .../score_source_code_linker/xml_parser.py | 23 +++++--- .../score_sphinx_bundle/__init__.py | 11 ++-- 12 files changed, 83 insertions(+), 76 deletions(-) diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 9b373759..914b5951 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -47,9 +47,9 @@ def update_config(app: Sphinx, _config: Any): # Docs-as-code is the current module module = "_main" app.config.html_static_path.append(str(Path(r) / module / "src/assets")) - app.config.needs_flow_configs = { - "score_config": f"!include {Path(r) / module / 'src/assets/puml-theme-score.puml'}" - } + + puml = Path(r) / module / "src/assets/puml-theme-score.puml" + app.config.needs_flow_configs = {"score_config": f"!include {puml}"} app.add_css_file("css/score.css", priority=500) app.add_css_file("css/score_needs.css", priority=500) diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index a24759c0..881e5f9a 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -15,6 +15,7 @@ from unittest.mock import Mock import pytest +from attribute_plugin import add_test_properties from score_metamodel.checks.check_options import ( check_extra_options, check_options, @@ -22,8 +23,6 @@ from score_metamodel.tests import fake_check_logger, need from sphinx.application import Sphinx -from attribute_plugin import add_test_properties - class NeedTypeDict(TypedDict, total=False): directive: str diff --git a/src/extensions/score_metamodel/tests/test_metamodel__init__.py b/src/extensions/score_metamodel/tests/test_metamodel__init__.py index ea88aa7f..b2d967f3 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel__init__.py +++ b/src/extensions/score_metamodel/tests/test_metamodel__init__.py @@ -11,6 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import pytest +from attribute_plugin import add_test_properties from src.extensions.score_metamodel.__init__ import ( graph_checks, @@ -18,8 +19,6 @@ parse_checks_filter, ) -from attribute_plugin import add_test_properties - def dummy_local_check(app, need, log): pass diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index d499a022..415e6e88 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -357,12 +357,14 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 for n in source_code_links.links.CodeLinks: LOGGER.warning( - f"{n.file}:{n.line}: Could not find {source_code_links.need} in documentation [CODE LINK]", + f"{n.file}:{n.line}: Could not find {source_code_links.need} " + "in documentation [CODE LINK]", type="score_source_code_linker", ) for n in source_code_links.links.TestLinks: LOGGER.warning( - f"{n.file}:{n.line}: Could not find {source_code_links.need} in documentation [TEST LINK]", + f"{n.file}:{n.line}: Could not find {source_code_links.need} " + "in documentation [TEST LINK]", type="score_source_code_linker", ) continue diff --git a/src/extensions/score_source_code_linker/need_source_links.py b/src/extensions/score_source_code_linker/need_source_links.py index d5e10a10..6c738da8 100644 --- a/src/extensions/score_source_code_linker/need_source_links.py +++ b/src/extensions/score_source_code_linker/need_source_links.py @@ -11,9 +11,9 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* """ -This file defines NeedSourceLinks as well as SourceCodeLinks. -Both datatypes are used in the 'grouped cache' JSON that contains 'CodeLinks' and 'TestLinks' -It also defines a decoder and encoder for SourceCodeLinks to enable JSON read/write +This file defines NeedSourceLinks as well as SourceCodeLinks. Both datatypes are used in +the 'grouped cache' JSON that contains 'CodeLinks' and 'TestLinks' It also defines a +decoder and encoder for SourceCodeLinks to enable JSON read/write """ # req-Id: tool_req__docs_test_link_testcase @@ -56,9 +56,9 @@ class SourceCodeLinks: class SourceCodeLinks_JSON_Encoder(json.JSONEncoder): def default(self, o: object): - if isinstance(o, (SourceCodeLinks, NeedSourceLinks)): + if isinstance(o, SourceCodeLinks | NeedSourceLinks): return asdict(o) - if isinstance(o, (NeedLink, DataForTestLink)): + if isinstance(o, NeedLink | DataForTestLink): return asdict(o) if isinstance(o, Path): return str(o) @@ -81,7 +81,8 @@ def SourceCodeLinks_JSON_Decoder(d: dict[str, Any]) -> SourceCodeLinks | dict[st def store_source_code_links_combined_json( file: Path, source_code_links: list[SourceCodeLinks] ): - # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + # After `rm -rf _build` or on clean builds the directory does not exist, so we need + # to create it file.parent.mkdir(exist_ok=True) with open(file, "w") as f: json.dump( @@ -99,9 +100,11 @@ def load_source_code_links_combined_json(file: Path) -> list[SourceCodeLinks]: object_hook=SourceCodeLinks_JSON_Decoder, ) assert isinstance(links, list), ( - "The combined source code linker links should be a list of SourceCodeLinks objects." + "The combined source code linker links should be " + "a list of SourceCodeLinks objects." ) assert all(isinstance(link, SourceCodeLinks) for link in links), ( - "All items in combined_source_code_linker_cache should be SourceCodeLinks objects." + "All items in combined_source_code_linker_cache should be " + "SourceCodeLinks objects." ) return links diff --git a/src/extensions/score_source_code_linker/testlink.py b/src/extensions/score_source_code_linker/testlink.py index ccf31b10..3640201e 100644 --- a/src/extensions/score_source_code_linker/testlink.py +++ b/src/extensions/score_source_code_linker/testlink.py @@ -194,10 +194,11 @@ def DataOfTestCase_JSON_Decoder(d: dict[str, Any]) -> DataOfTestCase | dict[str, def store_test_xml_parsed_json(file: Path, testlist: list[DataForTestLink]): """ - TestCases that are 'skipped' do not have properties, therefore they will NOT be saved/transformed - to TestLinks. + TestCases that are 'skipped' do not have properties, therefore they will NOT be + saved/transformed to TestLinks. """ - # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + # After `rm -rf _build` or on clean builds the directory does not exist, so we need + # to create it file.parent.mkdir(exist_ok=True) with open(file, "w") as f: json.dump( @@ -224,7 +225,8 @@ def load_test_xml_parsed_json(file: Path) -> list[DataForTestLink]: def store_data_of_test_case_json(file: Path, testneeds: list[DataOfTestCase]): - # After `rm -rf _build` or on clean builds the directory does not exist, so we need to create it + # After `rm -rf _build` or on clean builds the directory does not exist, so we need + # to create it file.parent.mkdir(exist_ok=True) with open(file, "w") as f: json.dump( diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index 703a986e..a7fe0a01 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -19,10 +19,10 @@ from typing import Any import pytest +from attribute_plugin import add_test_properties from sphinx_needs.data import NeedsMutable from src.extensions.score_metamodel.tests import need as test_need -from attribute_plugin import add_test_properties # Import the module under test # Note: You'll need to adjust these imports based on your actual module structure @@ -38,8 +38,6 @@ ) from src.helper_lib import ( get_current_git_hash, - get_github_repo_info, - parse_remote_git_output, ) from src.helper_lib.additional_functions import get_github_link diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py index 9f7d2578..d5c6af0c 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -10,6 +10,7 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +import contextlib import json import os import shutil @@ -46,7 +47,7 @@ def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: @pytest.fixture() -def git_repo_setup(sphinx_base_dir) -> Path: +def git_repo_setup(sphinx_base_dir: Path) -> Path: """Creating git repo, to make testing possible""" repo_path = sphinx_base_dir @@ -68,7 +69,7 @@ def git_repo_setup(sphinx_base_dir) -> Path: @pytest.fixture() -def create_demo_files(sphinx_base_dir, git_repo_setup): +def create_demo_files(sphinx_base_dir: Path, git_repo_setup): repo_path = sphinx_base_dir # Create some source files with requirement IDs @@ -141,7 +142,7 @@ def make_codelink_source_2(): return ( """ # Another implementation file -# Though we should make sure this +# Though we should make sure this # is at a different line than the other ID #""" + """ req-Id: TREQ_ID_1 @@ -214,7 +215,7 @@ def construct_gh_url() -> str: @pytest.fixture() def sphinx_app_setup( - sphinx_base_dir, create_demo_files, git_repo_setup + sphinx_base_dir: Path, create_demo_files, git_repo_setup ) -> Callable[[], SphinxTestApp]: def _create_app(): base_dir = sphinx_base_dir @@ -223,11 +224,9 @@ def _create_app(): # CRITICAL: Change to a directory that exists and is accessible # This fixes the "no such file or directory" error in Bazel original_cwd = None - try: + # Current working directory doesn't exist, which is the problem + with contextlib.suppress(FileNotFoundError): original_cwd = os.getcwd() - except FileNotFoundError: - # Current working directory doesn't exist, which is the problem - pass # Change to the base_dir before creating SphinxTestApp os.chdir(base_dir) @@ -243,11 +242,9 @@ def _create_app(): finally: # Try to restore original directory, but don't fail if it doesn't exist if original_cwd is not None: - try: + # Original directory might not exist anymore in Bazel sandbox + with contextlib.suppress(FileNotFoundError, OSError): os.chdir(original_cwd) - except (FileNotFoundError, OSError): - # Original directory might not exist anymore in Bazel sandbox - pass return _create_app @@ -302,8 +299,7 @@ def basic_needs(): @pytest.fixture() -def example_source_link_text_all_ok(sphinx_base_dir): - repo_path = sphinx_base_dir +def example_source_link_text_all_ok(sphinx_base_dir: Path): return { "TREQ_ID_1": [ NeedLink( @@ -334,8 +330,7 @@ def example_source_link_text_all_ok(sphinx_base_dir): @pytest.fixture() -def example_test_link_text_all_ok(sphinx_base_dir): - repo_path = sphinx_base_dir +def example_test_link_text_all_ok(sphinx_base_dir: Path): return { "TREQ_ID_1": [ DataForTestLink( @@ -392,8 +387,7 @@ def example_test_link_text_all_ok(sphinx_base_dir): @pytest.fixture() -def example_source_link_text_non_existent(sphinx_base_dir): - repo_path = sphinx_base_dir +def example_source_link_text_non_existent(sphinx_base_dir: Path): return [ { "TREQ_ID_200": [ @@ -409,11 +403,11 @@ def example_source_link_text_non_existent(sphinx_base_dir): ] -def make_source_link(needlinks): +def make_source_link(needlinks: list[NeedLink]): return ", ".join(f"{get_github_link(n)}<>{n.file}:{n.line}" for n in needlinks) -def make_test_link(testlinks): +def make_test_link(testlinks: list[DataForTestLink]): return ", ".join(f"{get_github_link(n)}<>{n.name}" for n in testlinks) @@ -424,12 +418,14 @@ def compare_json_files(file1: Path, expected_file: Path, object_hook): with open(expected_file) as f2: json2 = json.load(f2, object_hook=object_hook) assert len(json1) == len(json2), ( - f"{file1}'s lenth are not the same as in the golden file lenght. Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" + f"{file1}'s lenth are not the same as in the golden file lenght. " + f"Len of{file1}: {len(json1)}. Len of Golden File: {len(json2)}" ) c1 = Counter(n for n in json1) c2 = Counter(n for n in json2) assert c1 == c2, ( - f"Testfile does not have same needs as golden file. Testfile: {c1}\nGoldenFile: {c2}" + f"Testfile does not have same needs as golden file. " + f"Testfile: {c1}\nGoldenFile: {c2}" ) @@ -441,7 +437,8 @@ def compare_grouped_json_files(file1: Path, golden_file: Path): json2 = json.load(f2, object_hook=SourceCodeLinks_TEST_JSON_Decoder) assert len(json1) == len(json2), ( - f"Input & Expected have different Lenghts. Input: {file1}: {len(json1)}, Expected: {golden_file}: {len(json2)}" + "Input & Expected have different Lenghts. " + f"Input: {file1}: {len(json1)}, Expected: {golden_file}: {len(json2)}" ) json1_sorted = sorted(json1, key=lambda x: x.need) @@ -468,11 +465,14 @@ def compare_grouped_json_files(file1: Path, golden_file: Path): ) +@pytest.mark.skip( + "Flaky test, see https://github.com/eclipse-score/docs-as-code/issues/226" +) def test_source_link_integration_ok( sphinx_app_setup: Callable[[], SphinxTestApp], - example_source_link_text_all_ok: dict[str, list[str]], - example_test_link_text_all_ok: dict[str, list[str]], - sphinx_base_dir, + example_source_link_text_all_ok: dict[str, list[NeedLink]], + example_test_link_text_all_ok: dict[str, list[DataForTestLink]], + sphinx_base_dir: Path, git_repo_setup, create_demo_files, ): @@ -536,7 +536,7 @@ def test_source_link_integration_ok( def test_source_link_integration_non_existent_id( sphinx_app_setup: Callable[[], SphinxTestApp], example_source_link_text_non_existent: dict[str, list[str]], - sphinx_base_dir, + sphinx_base_dir: Path, git_repo_setup, create_demo_files, ): diff --git a/src/extensions/score_source_code_linker/tests/test_testlink.py b/src/extensions/score_source_code_linker/tests/test_testlink.py index 2f35f8a3..452faff3 100644 --- a/src/extensions/score_source_code_linker/tests/test_testlink.py +++ b/src/extensions/score_source_code_linker/tests/test_testlink.py @@ -13,6 +13,8 @@ import json from pathlib import Path +from attribute_plugin import add_test_properties + from src.extensions.score_source_code_linker.testlink import ( DataForTestLink, DataForTestLink_JSON_Decoder, @@ -21,7 +23,6 @@ load_test_xml_parsed_json, store_test_xml_parsed_json, ) -from attribute_plugin import add_test_properties @add_test_properties( @@ -72,8 +73,8 @@ def test_decoder_ignores_irrelevant_dicts(): ) def test_clean_text_removes_ansi_and_html_unescapes(): """ - Test if text clean works as intended. - It should remove ANSI color & text styles, as well as convert HTML things back to Chars + Test if text clean works as intended. It should remove ANSI color & text styles, as + well as convert HTML things back to Chars """ raw = "\x1b[31m<b>Warning</b>\x1b[0m\nExtra line" cleaned = DataOfTestCase.clean_text(raw) @@ -119,7 +120,7 @@ def test_testcaseneed_to_dict_multiple_links(): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_store_and_load_testlinks_roundtrip(tmp_path): +def test_store_and_load_testlinks_roundtrip(tmp_path: Path): """Ensure that Encode/Decode is reversible""" file = tmp_path / "testlinks.json" diff --git a/src/extensions/score_source_code_linker/tests/test_xml_parser.py b/src/extensions/score_source_code_linker/tests/test_xml_parser.py index 2464a992..0119a83e 100644 --- a/src/extensions/score_source_code_linker/tests/test_xml_parser.py +++ b/src/extensions/score_source_code_linker/tests/test_xml_parser.py @@ -21,16 +21,15 @@ from typing import Any import pytest +from attribute_plugin import add_test_properties import src.extensions.score_source_code_linker.xml_parser as xml_parser from src.extensions.score_source_code_linker.testlink import DataOfTestCase -from attribute_plugin import add_test_properties - # Unsure if I should make these last a session or not @pytest.fixture -def tmp_xml_dirs(tmp_path): +def tmp_xml_dirs(tmp_path: Path): root = tmp_path / "bazel-testlogs" dir1 = root / "with_props" dir2 = root / "no_props" @@ -48,7 +47,7 @@ def write(file_path: Path, testcases: list[ET.Element]): def make_tc( name: str, result: str = "", - props: dict[str, str] = dict(), + props: dict[str, str] | None = None, file: str = "", line: int = 0, ): @@ -84,8 +83,8 @@ def make_tc( write(dir1 / "test.xml", [tc1]) # File without properties - # HINT: Once the assertions in xml_parser are back and active, this should allow us to catch that the tests - # Need to be changed too. + # HINT: Once the assertions in xml_parser are back and active, this should allow us + # to catch that the tests Need to be changed too. tc2 = make_tc("tc_no_props", file="path2", line=20) write(dir2 / "test.xml", [tc2]) diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index c742cc2b..075a9ef8 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -11,8 +11,9 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* """ -This file deals with finding and parsing of test.xml files that get created during `bazel test`. -It also generates external needs out of the parsed testcases to enable linking to requirements &gathering statistics +This file deals with finding and parsing of test.xml files that get created during +`bazel test`. It also generates external needs out of the parsed testcases to enable +linking to requirements &gathering statistics """ # req-Id: tool_req__docs_test_link_testcase @@ -68,7 +69,8 @@ def parse_testcase_result(testcase: ET.Element) -> tuple[str, str]: return "skipped", skipped.get("message", "") # TODO: Test all possible permuations of this to find if this is unreachable raise ValueError( - f"Testcase: {testcase.get('name')}. Did not find 'failed', 'skipped' or 'passed' in test" + f"Testcase: {testcase.get('name')}. " + "Did not find 'failed', 'skipped' or 'passed' in test" ) @@ -77,7 +79,8 @@ def parse_properties(case_properties: dict[str, Any], properties: Element): prop_name = prop.get("name", "") prop_value = prop.get("value", "") # We ignore the Description of the test as a 'property'. - # Every language just needs to ensure each test does have a description. No matter where this resides. + # Every language just needs to ensure each test does have a description. + # No matter where this resides. if prop_name == "Description": continue case_properties[prop_name] = prop_value @@ -103,7 +106,8 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str]]: case_properties = {} testname = testcase.get("name") assert testname is not None, ( - f"Testcase: {testcase} does not have a 'name' attribute. This is mandatory. This should not happen, something is wrong." + f"Testcase: {testcase} does not have a 'name' attribute. " + "This is mandatory. This should not happen, something is wrong." ) test_file = testcase.get("file") line = testcase.get("line") @@ -128,7 +132,8 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str]]: ) properties_element = testcase.find("properties") - # HINT: This list is hard coded here, might not be ideal to have that in the long run. + # HINT: This list is hard coded here, might not be ideal to have that in the + # long run. if properties_element is None: non_prop_tests.append(testname) continue @@ -180,7 +185,8 @@ def run_xml_parser(app: Sphinx, env: BuildEnvironment): output = list( itertools.chain.from_iterable(tcn.get_test_links() for tcn in test_case_needs) ) - # This is not ideal, due to duplication, but I can't think of a better solution right now + # This is not ideal, due to duplication, but I can't think of a better solution + # right now store_test_xml_parsed_json(app.outdir / "score_xml_parser_cache.json", output) @@ -188,7 +194,8 @@ def build_test_needs_from_files( app: Sphinx, env: BuildEnvironment, xml_paths: list[Path] ) -> list[DataOfTestCase]: """ - Reading in all test.xml files, and building 'testcase' external need objects out of them. + Reading in all test.xml files, and building 'testcase' external need objects out of + them. Returns: - list[TestCaseNeed] diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index 4617f1fc..8056b325 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -10,9 +10,6 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from typing import Any - - from sphinx.application import Sphinx # Note: order matters! @@ -43,10 +40,10 @@ def setup(app: Sphinx) -> dict[str, object]: app.config.myst_enable_extensions = ["colon_fence"] app.config.exclude_patterns = [ - # The following entries are not required when building the documentation via 'bazel - # build //:docs', as that command runs in a sandboxed environment. However, when - # building the documentation via 'bazel run //:docs' or esbonio, these - # entries are required to prevent the build from failing. + # The following entries are not required when building the documentation via + # 'bazel build //:docs', as that command runs in a sandboxed environment. + # However, when building the documentation via 'bazel run //:docs' or esbonio, + # these entries are required to prevent the build from failing. "bazel-*", ".venv*", ] From df9dc12a4ebf6d197e1f6bbb4d32ee1698179075 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Mon, 1 Sep 2025 12:13:01 +0200 Subject: [PATCH 125/231] fix: score_sphinx_bundle must load plantuml before sphinx-needs (#240) --- src/extensions/score_plantuml.py | 21 +++++++++++-------- .../score_sphinx_bundle/__init__.py | 8 +++---- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index 2669e0cd..c452315e 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -68,23 +68,26 @@ def get_runfiles_dir() -> Path: return runfiles_dir -def find_correct_path(runfiles: str) -> str: +def find_correct_path(runfiles: Path) -> Path: """ This ensures that the 'plantuml' binary path is found in local 'score_docs_as_code' and module use. """ - dirs = [str(x) for x in Path(runfiles).glob("*score_docs_as_code+")] - if dirs: - # Happens if 'score_docs_as_code' is used as Module - p = runfiles + "/score_docs_as_code+/src/plantuml" + if (Path(runfiles) / "score_docs_as_code+").exists(): + # Docs-as-code used as a module with bazel 8 + module = "score_docs_as_code+" + elif (Path(runfiles) / "score_docs_as_code~").exists(): + # Docs-as-code used as a module with bazel 7 + module = "score_docs_as_code~" else: - # Only happens in 'score_docs_as_code' repository - p = runfiles + "/../plantuml" - return p + # Docs-as-code is the current module + module = "_main" + + return runfiles / module / "src" / "plantuml" def setup(app: Sphinx): - app.config.plantuml = find_correct_path(str(get_runfiles_dir())) + app.config.plantuml = str(find_correct_path(get_runfiles_dir())) app.config.plantuml_output_format = "svg_obj" app.config.plantuml_syntax_error_image = True app.config.needs_build_needumls = "_plantuml_sources" diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index 8056b325..815062d5 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -14,14 +14,14 @@ # Note: order matters! # Extensions are loaded in this order. -# (Although not sure what has to be loaded first) +# e.g. plantuml MUST be loaded before sphinx-needs score_extensions = [ + "sphinxcontrib.plantuml", + "score_plantuml", + "sphinx_needs", "score_metamodel", "sphinx_design", - "sphinx_needs", "myst_parser", - "sphinxcontrib.plantuml", - "score_plantuml", "score_source_code_linker", "score_draw_uml_funcs", "score_layout", From b7ac1d5d54ec364118d1e8074638d22839f053db Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Mon, 8 Sep 2025 09:33:46 +0200 Subject: [PATCH 126/231] lint: Fix lint errors (#245) --- .github/workflows/consumer_test.yml | 2 +- pyproject.toml | 3 + scripts/run-linters.sh | 11 ++- .../score_draw_uml_funcs/__init__.py | 19 ++--- .../score_draw_uml_funcs/helpers.py | 5 +- src/extensions/score_layout/html_options.py | 6 +- src/extensions/score_layout/sphinx_options.py | 6 +- src/extensions/score_metamodel/__init__.py | 63 ++++++++++------ .../checks/attributes_format.py | 7 +- .../score_metamodel/checks/graph_checks.py | 13 ++-- .../checks/id_contains_feature.py | 4 +- .../tests/test_check_options.py | 21 +++--- .../tests/test_metamodel__init__.py | 9 ++- .../score_metamodel/tests/test_standards.py | 72 +++++++++---------- .../score_source_code_linker/__init__.py | 6 +- .../score_source_code_linker/testlink.py | 12 ++-- .../tests/test_codelink.py | 51 +++++++------ .../tests/test_need_source_links.py | 16 +++-- .../test_source_code_link_integration.py | 26 ++++--- .../tests/test_testlink.py | 3 +- .../tests/test_xml_parser.py | 24 ++++--- .../score_source_code_linker/xml_parser.py | 10 ++- src/helper_lib/test_helper_lib.py | 40 +++++------ src/incremental.py | 4 +- src/tests/conftest.py | 5 +- src/tests/test_consumer.py | 49 +++++++------ 26 files changed, 293 insertions(+), 194 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 7a07e726..38ed7562 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -36,7 +36,7 @@ jobs: - name: Run Consumer tests run: | - .venv_docs/bin/python -m pytest -s -v src/tests/ --repo=$CONSUMER + .venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" env: FORCE_COLOR: "1" diff --git a/pyproject.toml b/pyproject.toml index 1b74cd11..41a0a876 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,9 @@ exclude = [ ".venv*/**", ] +venvPath = "." +venv = ".venv_docs" + [tool.ruff] extend = "bazel-bin/ide_support.runfiles/score_tooling+/python_basics/pyproject.toml" diff --git a/scripts/run-linters.sh b/scripts/run-linters.sh index 17ff20a5..83b0436d 100755 --- a/scripts/run-linters.sh +++ b/scripts/run-linters.sh @@ -25,12 +25,17 @@ echo "Running Actionlint..." bazel run @score_tooling//tools:actionlint echo "Running Shellcheck..." +# SC2038: find/xargs pattern works fine here despite non-alphanumeric filename warning +# The recommended -print0 | xargs -0 solution causes: +# "openBinaryFile: does not exist" with bazel run +# shellcheck disable=SC2038 find . \ -type d \( -name .git -o -name .venv -o -name bazel-out -o -name node_modules \) -prune -false \ -o -type f -exec grep -Il '^#!.*sh' {} \; | \ -xargs bazel run @score_tooling//tools:shellcheck -- +# SC1128: Shebang after copyright header is intentional +xargs bazel run @score_tooling//tools:shellcheck -- --exclude=SC1128 echo "Running Yamlfmt..." -bazel run @score_tooling//tools:yamlfmt -- $(find . \ +bazel run @score_tooling//tools:yamlfmt -- "$(find . \ -type d \( -name .git -o -name .venv -o -name bazel-out -o -name node_modules \) -prune -false \ - -o -type f \( -name "*.yaml" -o -name "*.yml" \) | tr '\n' '\0' | xargs -0) + -o -type f \( -name "*.yaml" -o -name "*.yml" \) | tr '\n' '\0' | xargs -0)" diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index da29e6cc..500ded74 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -30,7 +30,7 @@ from collections.abc import Callable from functools import cache from pathlib import Path -from typing import Any +from typing import Any, cast from score_draw_uml_funcs.helpers import ( gen_header, @@ -101,6 +101,7 @@ def _process_interfaces( linkage_text: str, ) -> str: """Helper to process either implemented or used interfaces.""" + for iface in iface_list: # check for misspelled interface if not all_needs.get(iface, []): @@ -108,16 +109,18 @@ def _process_interfaces( continue if relation == "implements": - if not proc_dict.get(iface, []): + proc_impl_dict = cast(dict[str, str], proc_dict) + if not proc_impl_dict.get(iface, []): linkage_text += ( f"{gen_link_text(need, '-u->', all_needs[iface], 'implements')} \n" ) - proc_dict[iface] = need["id"] + proc_impl_dict[iface] = need["id"] else: # "uses" - if not proc_dict.get(iface, []): - proc_dict[iface] = [need["id"]] + proc_used_dict = cast(dict[str, list[str]], proc_dict) + if not proc_used_dict.get(iface, []): + proc_used_dict[iface] = [need["id"]] else: - proc_dict[iface].append(need["id"]) + proc_used_dict[iface].append(need["id"]) return linkage_text @@ -377,7 +380,7 @@ def draw_module( all_needs, proc_impl_interfaces, proc_used_interfaces, - local_impl_interfaces, + list(local_impl_interfaces), structure_text, linkage_text, ) @@ -417,7 +420,7 @@ def _collect_interfaces_and_modules( if comps: impl_comp[iface] = comps[0] - if imcomp := impl_comp.get(iface, {}): + if imcomp := impl_comp.get(iface): module = get_module(imcomp, all_needs) # FIXME: sometimes module is empty, then the following code fails if not module: diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index 6cb609f0..1ef552dc 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -293,7 +293,10 @@ def get_impl_comp_from_logic_iface( real_iface: str, all_needs: dict[str, dict[str, str]] ) -> list[str]: """Get implementing component of the interface""" - implcomp: list[str] = all_needs[real_iface].get("implements_back", []) + implements_back = all_needs[real_iface].get("implements_back", []) + implcomp: list[str] = ( + [implements_back] if isinstance(implements_back, str) else implements_back + ) if not implcomp: logger.info( diff --git a/src/extensions/score_layout/html_options.py b/src/extensions/score_layout/html_options.py index 5794a9bd..aa2cc549 100644 --- a/src/extensions/score_layout/html_options.py +++ b/src/extensions/score_layout/html_options.py @@ -10,11 +10,13 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +from typing import Any + from sphinx.application import Sphinx -def return_html_theme_options(app: Sphinx) -> dict[str, object]: - theme_options = { +def return_html_theme_options(app: Sphinx) -> dict[str, Any]: + theme_options: dict[str, Any] = { "navbar_align": "content", "header_links_before_dropdown": 5, "icon_links": [ diff --git a/src/extensions/score_layout/sphinx_options.py b/src/extensions/score_layout/sphinx_options.py index cc9e402a..141e075a 100644 --- a/src/extensions/score_layout/sphinx_options.py +++ b/src/extensions/score_layout/sphinx_options.py @@ -39,8 +39,10 @@ class SingleLayout(TypedDict): "initial=False)>>", ], "meta_left": [ - '<>', - '<>', + '<>', + '<>', ], "meta_right": [], "footer_left": ["<>"], diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index c045192d..672bc4cd 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -16,6 +16,7 @@ from collections.abc import Callable from dataclasses import dataclass, field from pathlib import Path +from typing import Any, cast from ruamel.yaml import YAML from sphinx.application import Sphinx @@ -153,7 +154,9 @@ def is_check_enabled(check: local_check_function | graph_check_function): # TODO: exit code -def convert_checks_to_dataclass(checks_dict) -> list[ProhibitedWordCheck]: +def convert_checks_to_dataclass( + checks_dict: dict[str, dict[str, Any]], +) -> list[ProhibitedWordCheck]: return [ ProhibitedWordCheck( name=check_name, @@ -164,7 +167,7 @@ def convert_checks_to_dataclass(checks_dict) -> list[ProhibitedWordCheck]: ] -def load_metamodel_data(): +def load_metamodel_data() -> dict[str, Any]: """ Load and process metamodel.yaml. @@ -178,18 +181,22 @@ def load_metamodel_data(): yaml = YAML() with open(yaml_path, encoding="utf-8") as f: - data = yaml.load(f) + data = cast(dict[str, Any], yaml.load(f)) # Access the custom validation block - types_dict = data.get("needs_types", {}) - links_dict = data.get("needs_extra_links", {}) - graph_check_dict = data.get("graph_checks", {}) - global_base_options = data.get("needs_types_base_options", {}) - global_base_options_optional_opts = global_base_options.get("optional_options", {}) + types_dict = cast(dict[str, Any], data.get("needs_types", {})) + links_dict = cast(dict[str, Any], data.get("needs_extra_links", {})) + graph_check_dict = cast(dict[str, Any], data.get("graph_checks", {})) + global_base_options = cast(dict[str, Any], data.get("needs_types_base_options", {})) + global_base_options_optional_opts = cast( + dict[str, Any], global_base_options.get("optional_options", {}) + ) # Get the stop_words and weak_words as separate lists - proh_checks_dict = data.get("prohibited_words_checks", {}) + proh_checks_dict = cast( + dict[str, dict[str, Any]], data.get("prohibited_words_checks", {}) + ) prohibited_words_checks = convert_checks_to_dataclass(proh_checks_dict) # Default options by sphinx, sphinx-needs or anything else we need to account for @@ -198,10 +205,12 @@ def load_metamodel_data(): # Convert "types" from {directive_name: {...}, ...} to a list of dicts needs_types_list = [] - all_options = set() + all_options: set[str] = set() for directive_name, directive_data in types_dict.items(): + directive_name = cast(str, directive_name) + directive_data = cast(dict[str, Any], directive_data) # Build up a single "needs_types" item - one_type = { + one_type: dict[str, Any] = { "directive": directive_name, "title": directive_data.get("title", ""), "prefix": directive_data.get("prefix", ""), @@ -213,14 +222,18 @@ def load_metamodel_data(): one_type["style"] = directive_data["style"] # Store mandatory_options and optional_options directly as a dict - mandatory_options = directive_data.get("mandatory_options", {}) + mandatory_options = cast( + dict[str, Any], directive_data.get("mandatory_options", {}) + ) one_type["mandatory_options"] = mandatory_options - tags = directive_data.get("tags", []) + tags = cast(list[str], directive_data.get("tags", [])) one_type["tags"] = tags - parts = directive_data.get("parts", 3) + parts = cast(int, directive_data.get("parts", 3)) one_type["parts"] = parts - optional_options = directive_data.get("optional_options", {}) + optional_options = cast( + dict[str, Any], directive_data.get("optional_options", {}) + ) optional_options.update(global_base_options_optional_opts) one_type["opt_opt"] = optional_options @@ -228,20 +241,28 @@ def load_metamodel_data(): all_options.update(list(optional_options.keys())) # mandatory_links => "req_link" - mand_links_yaml = directive_data.get("mandatory_links", {}) + mand_links_yaml = cast( + dict[str, Any], directive_data.get("mandatory_links", {}) + ) if mand_links_yaml: - one_type["req_link"] = [(k, v) for k, v in mand_links_yaml.items()] + one_type["req_link"] = [ + (cast(str, k), cast(Any, v)) for k, v in mand_links_yaml.items() + ] # optional_links => "opt_link" - opt_links_yaml = directive_data.get("optional_links", {}) + opt_links_yaml = cast(dict[str, Any], directive_data.get("optional_links", {})) if opt_links_yaml: - one_type["opt_link"] = [(k, v) for k, v in opt_links_yaml.items()] + one_type["opt_link"] = [ + (cast(str, k), cast(Any, v)) for k, v in opt_links_yaml.items() + ] needs_types_list.append(one_type) # Convert "links" dict -> list of {"option", "incoming", "outgoing"} - needs_extra_links_list = [] + needs_extra_links_list: list[dict[str, str]] = [] for link_option, link_data in links_dict.items(): + link_option = cast(str, link_option) + link_data = cast(dict[str, Any], link_data) needs_extra_links_list.append( { "option": link_option, @@ -254,7 +275,7 @@ def load_metamodel_data(): # As otherwise sphinx errors, due to an option being registered twice. # They are still inside the extra options we extract to enable # constraint checking via regex - needs_extra_options = sorted(all_options - set(default_options_list)) + needs_extra_options: list[str] = sorted(all_options - set(default_options_list)) return { "prohibited_words_checks": prohibited_words_checks, diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index 37ee3c61..ed05ae3f 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -12,6 +12,7 @@ # ******************************************************************************* import string +from typing import cast from score_metamodel import CheckLogger, ProhibitedWordCheck, ScoreNeedType, local_check from sphinx.application import Sphinx @@ -91,7 +92,11 @@ def _check_options_for_prohibited_words( ] for option in options: forbidden_words = prohibited_word_checks.option_check[option] - for word in need[option].split(): + option_value = need.get(option) + if not isinstance(option_value, str): + continue + option_text = cast(str, option_value) + for word in option_text.split(): normalized = word.strip(string.punctuation).lower() if normalized in forbidden_words: msg = ( diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index cdd51e23..e697419e 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -13,7 +13,7 @@ import operator from collections.abc import Callable from functools import reduce -from typing import Any +from typing import Any, cast from score_metamodel import ( CheckLogger, @@ -127,7 +127,7 @@ def filter_needs_by_criteria( for pat in pattern: if not any(need_type["directive"] == pat for need_type in needs_types): - log.warning(f"Unknown need type `{pat}` in graph check.") + log.warning(f"Unknown need type `{pat}` in graph check.", location="") for need in needs: if need_pattern == "include": @@ -155,7 +155,7 @@ def check_metamodel_graph( # Iterate over all graph checks for check_name, check_config in graph_checks_global.items(): needs_selection_criteria: dict[str, str] = check_config.get("needs") - check_to_perform: dict[str, str | dict] = check_config.get("check") + check_to_perform: dict[str, str | dict[str, Any]] = check_config.get("check") explanation = check_config.get("explanation", "") assert explanation != "", ( f"Explanation for graph check {check_name} is missing. " @@ -176,9 +176,12 @@ def check_metamodel_graph( log.warning_for_need(need, msg) continue - parent_ids: list[str] = need[parent_relation] + parent_ids = cast(list[str] | Any, need[parent_relation]) + if not isinstance(parent_ids, list): + continue - for parent_id in parent_ids: + parent_ids_list = cast(list[str], parent_ids) + for parent_id in parent_ids_list: parent_need = needs_dict_all.get(parent_id) if parent_need is None: msg = f"Parent need `{parent_id}` not found in needs_dict." diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index cd1b2ee4..55deeeed 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -52,14 +52,14 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): foundfeatpart = any( featurepart.lower() in docname.lower() for featurepart in featureparts - if featureparts + if featureparts and featurepart and docname ) # allow abbreviation of the feature initials = ( "".join(fp[0].lower() for fp in featureparts) if len(featureparts) > 1 else "" ) - foundinitials = bool(initials) and initials in docname.lower() + foundinitials = bool(initials) and docname and initials in docname.lower() if not (foundfeatpart or foundinitials): log.warning_for_option( diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 881e5f9a..dba8e015 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -11,17 +11,18 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from typing import TypedDict +from typing import TypedDict, cast from unittest.mock import Mock import pytest -from attribute_plugin import add_test_properties +from attribute_plugin import add_test_properties # type: ignore[import-untyped] +from score_metamodel import CheckLogger from score_metamodel.checks.check_options import ( check_extra_options, check_options, ) from score_metamodel.tests import fake_check_logger, need -from sphinx.application import Sphinx +from sphinx.application import Sphinx # type: ignore[import-untyped] class NeedTypeDict(TypedDict, total=False): @@ -91,7 +92,7 @@ def test_unknown_directive(self): app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO # Expect that the checks pass - check_options(app, need_1, logger) + check_options(app, need_1, cast(CheckLogger, logger)) logger.assert_warning( "no type info defined for semantic check.", expect_location=False, @@ -118,7 +119,7 @@ def test_unknown_directive_extra_option(self): app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO # Expect that the checks pass - check_extra_options(app, need_1, logger) + check_extra_options(app, need_1, cast(CheckLogger, logger)) logger.assert_warning( "no type info defined for semantic check.", expect_location=False, @@ -149,7 +150,7 @@ def test_missing_mandatory_options_info(self): app.config.needs_types = self.NEED_TYPE_INFO_WITHOUT_MANDATORY_OPTIONS app.config.allowed_external_prefixes = [] # Expect that the checks pass - check_options(app, need_1, logger) + check_options(app, need_1, cast(CheckLogger, logger)) logger.assert_warning( "no type info defined for semantic check.", expect_location=False, @@ -180,7 +181,7 @@ def test_invalid_option_type(self): app.config.needs_types = self.NEED_TYPE_INFO_WITH_INVALID_OPTION_TYPE app.config.allowed_external_prefixes = [] # Expect that the checks pass - check_options(app, need_1, logger) + check_options(app, need_1, cast(CheckLogger, logger)) logger.assert_warning( "pattern `42` is not a valid regex pattern.", expect_location=False, @@ -213,7 +214,7 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): app.config.needs_types = self.NEED_TYPE_INFO_WITH_OPT_OPT app.config.allowed_external_prefixes = [] # Expect that the checks pass - check_extra_options(app, need_1, logger) + check_extra_options(app, need_1, cast(CheckLogger, logger)) logger.assert_warning( "has these extra options: `other_option`.", @@ -242,5 +243,5 @@ def test_invalid_option_value_type_raises_value_error(self): app.config.needs_types = self.NEED_TYPE_INFO app.config.allowed_external_prefixes = [] - with pytest.raises(ValueError, match="Only Strings are allowed"): - check_options(app, need_1, logger) + with pytest.raises(ValueError, match="Only Strings are allowed"): # type: ignore[attr-defined] + check_options(app, need_1, cast(CheckLogger, logger)) diff --git a/src/extensions/score_metamodel/tests/test_metamodel__init__.py b/src/extensions/score_metamodel/tests/test_metamodel__init__.py index b2d967f3..6e7c773b 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel__init__.py +++ b/src/extensions/score_metamodel/tests/test_metamodel__init__.py @@ -11,8 +11,11 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import pytest -from attribute_plugin import add_test_properties +from attribute_plugin import add_test_properties # type: ignore[import-untyped] +from sphinx.application import Sphinx +from sphinx_needs.data import NeedsInfoType, NeedsView +from src.extensions.score_metamodel import CheckLogger from src.extensions.score_metamodel.__init__ import ( graph_checks, local_checks, @@ -20,11 +23,11 @@ ) -def dummy_local_check(app, need, log): +def dummy_local_check(app: Sphinx, need: NeedsInfoType, log: CheckLogger) -> None: pass -def dummy_graph_check(app, needs_view, log): +def dummy_graph_check(app: Sphinx, needs_view: NeedsView, log: CheckLogger) -> None: pass diff --git a/src/extensions/score_metamodel/tests/test_standards.py b/src/extensions/score_metamodel/tests/test_standards.py index bda4e197..91b19790 100644 --- a/src/extensions/score_metamodel/tests/test_standards.py +++ b/src/extensions/score_metamodel/tests/test_standards.py @@ -878,35 +878,35 @@ def test_positive_case_mixed_linked_and_unlinked(self): """Test case where some needs with the tag are linked and some are not.""" # Setup mock needs data needs = [ - { - "id": "std_req__aspice40_MAN-5_REQ_001", - "tags": ["aspice40_man5", "other_tag"], - "type": "gd_requirement", - }, - { - "id": "std_req__aspice40_MAN-5_REQ_002", - "tags": ["aspice40_man5"], - "type": "std_req", - }, - { - "id": "std_req__REQ_003_test", - "tags": ["different_tag"], - "type": "std_req", - }, - { - "id": "stkh_req__aspice40_MAN-5_REQ_004", - "tags": ["aspice40_man5"], - "type": "stkh_req", - }, - { - "id": "COMP_001", - "tags": [], - "type": "gd_req", - "complies": [ + need( + id="std_req__aspice40_MAN-5_REQ_001", + tags=["aspice40_man5", "other_tag"], + type="gd_requirement", + ), + need( + id="std_req__aspice40_MAN-5_REQ_002", + tags=["aspice40_man5"], + type="std_req", + ), + need( + id="std_req__REQ_003_test", + tags=["different_tag"], + type="std_req", + ), + need( + id="stkh_req__aspice40_MAN-5_REQ_004", + tags=["aspice40_man5"], + type="stkh_req", + ), + need( + id="COMP_001", + tags=[], + type="gd_req", + complies=[ "std_req__aspice40_MAN-5_REQ_002", "std_req__aspice40_MAN-5_REQ_001", ], - }, + ), ] results = [] @@ -921,14 +921,14 @@ def test_positive_case_mixed_linked_and_unlinked(self): def test_negative_case_no_needs_with_tag(self): """Test case where no needs have the specified tag.""" needs = [ - {"id": "REQ_001", "tags": ["other_tag"], "type": "gd_requirement"}, - {"id": "REQ_002", "tags": ["different_tag"], "type": "gd_process"}, - { - "id": "COMP_001", - "tags": [], - "type": "gd_compliance", - "complies": ["REQ_001"], - }, + need(id="REQ_001", tags=["other_tag"], type="gd_requirement"), + need(id="REQ_002", tags=["different_tag"], type="gd_process"), + need( + id="COMP_001", + tags=[], + type="gd_compliance", + complies=["REQ_001"], + ), ] results = [] @@ -941,11 +941,11 @@ def test_negative_case_no_needs_with_tag(self): def test_assert_multiple_kwargs(self): """Test case that triggers the assertion error for multiple kwargs.""" - needs = [{"id": "REQ_001", "tags": ["test_tag"], "type": "gd_requirement"}] + needs = [need(id="REQ_001", tags=["test_tag"], type="gd_requirement")] results = [] # Test if our assert works - with pytest.raises( + with pytest.raises( # type: ignore[attr-defined] AssertionError, match="Can only provide one tag to " + "`my_pie_linked_standard_requirements_by_tag`", diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 415e6e88..d8693500 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -337,7 +337,8 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: for id, need in needs.items(): if need.get("source_code_link"): LOGGER.debug( - f"?? Need {id} already has source_code_link: {need.get('source_code_link')}" + f"?? Need {id} already has source_code_link: " + f"{need.get('source_code_link')}" ) if need.get("testlink"): LOGGER.debug( @@ -391,7 +392,8 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: # │ Commented out for now │ # ╰──────────────────────────────────────╯ -# source_code_link of affected needs was overwritten. Make sure it's empty in all others! +# source_code_link of affected needs was overwritten. +# Make sure it's empty in all others! # for need in needs.values(): # if need["id"] not in source_code_links_by_need: # need["source_code_link"] = "" # type: ignore diff --git a/src/extensions/score_source_code_linker/testlink.py b/src/extensions/score_source_code_linker/testlink.py index 3640201e..50066fe3 100644 --- a/src/extensions/score_source_code_linker/testlink.py +++ b/src/extensions/score_source_code_linker/testlink.py @@ -118,18 +118,20 @@ def __post_init__(self): # It's mandatory that the test either partially or fully verifies a requirement # if self.PartiallyVerifies is None and self.FullyVerifies is None: # raise ValueError( - # f"TestCase: {self.id} Error. Either 'PartiallyVerifies' or 'FullyVerifies' must be provided." + # f"TestCase: {self.id} Error. Either 'PartiallyVerifies' or " + # "'FullyVerifies' must be provided." # ) # Skipped tests should always have a reason associated with them # if "skipped" in self.result.keys() and not list(self.result.values())[0]: # raise ValueError( - # f"TestCase: {self.id} Error. Test was skipped without provided reason, reason is mandatory for skipped tests." + # f"TestCase: {self.id} Error. Test was skipped without provided " + # "reason, reason is mandatory for skipped tests." # ) def get_test_links(self) -> list[DataForTestLink]: """Convert TestCaseNeed to list of TestLink objects.""" - def parse_attributes(self, verify_field: str | None, verify_type: str): + def parse_attributes(verify_field: str | None, verify_type: str): """Process a verification field and yield TestLink objects.""" if not verify_field: return @@ -152,8 +154,8 @@ def parse_attributes(self, verify_field: str | None, verify_type: str): return list( chain( - parse_attributes(self, self.PartiallyVerifies, "partially"), - parse_attributes(self, self.FullyVerifies, "fully"), + parse_attributes(self.PartiallyVerifies, "partially"), + parse_attributes(self.FullyVerifies, "fully"), ) ) diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index a7fe0a01..7bb3897c 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -14,12 +14,13 @@ import os import subprocess import tempfile +from collections.abc import Generator from dataclasses import asdict from pathlib import Path from typing import Any import pytest -from attribute_plugin import add_test_properties +from attribute_plugin import add_test_properties # type: ignore[import-untyped] from sphinx_needs.data import NeedsMutable from src.extensions.score_metamodel.tests import need as test_need @@ -89,16 +90,16 @@ def needlink_test_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: @pytest.fixture -def temp_dir(): +def temp_dir() -> Generator[Path, None, None]: """Create a temporary directory for tests.""" with tempfile.TemporaryDirectory() as temp_dir: yield Path(temp_dir) @pytest.fixture -def git_repo(temp_dir): +def git_repo(temp_dir: Path) -> Path: """Create a real git repository for testing.""" - git_dir = temp_dir / "test_repo" + git_dir: Path = temp_dir / "test_repo" git_dir.mkdir() # Initialize git repo @@ -109,7 +110,7 @@ def git_repo(temp_dir): subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) # Create a test file and commit - test_file = git_dir / "test_file.py" + test_file: Path = git_dir / "test_file.py" test_file.write_text("# Test file\nprint('hello')\n") subprocess.run(["git", "add", "."], cwd=git_dir, check=True) subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) @@ -124,7 +125,7 @@ def git_repo(temp_dir): @pytest.fixture -def sample_needlinks(): +def sample_needlinks() -> list[NeedLink]: """Create sample NeedLink objects for testing.""" return [ NeedLink( @@ -159,15 +160,15 @@ def sample_needlinks(): @pytest.fixture -def cache_file_with_links(temp_dir, sample_needlinks): +def cache_file_with_links(temp_dir: Path, sample_needlinks: list[NeedLink]) -> Path: """Create a cache file with sample needlinks.""" - cache_file = temp_dir / "cache.json" + cache_file: Path = temp_dir / "cache.json" store_source_code_links_json(cache_file, sample_needlinks) return cache_file @pytest.fixture -def sample_needs(): +def sample_needs() -> dict[str, dict[str, str]]: """Create sample needs data for testing.""" return { "TREQ_ID_1": { @@ -202,7 +203,7 @@ def test_get_cache_filename(): assert result == expected -def make_needs(needs_dict): +def make_needs(needs_dict: dict[str, dict[str, Any]]) -> NeedsMutable: return NeedsMutable( {need_id: test_need(**params) for need_id, params in needs_dict.items()} ) @@ -289,7 +290,7 @@ def test_find_need_not_found(): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_group_by_need(sample_needlinks): +def test_group_by_need(sample_needlinks: list[NeedLink]) -> None: """Test grouping source code links by need ID.""" result = group_by_need(sample_needlinks) @@ -324,7 +325,7 @@ def test_group_by_need_empty_list(): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_get_github_link_with_real_repo(git_repo): +def test_get_github_link_with_real_repo(git_repo: Path) -> None: """Test generating GitHub link with real repository.""" # Create a needlink needlink = NeedLink( @@ -356,9 +357,11 @@ def test_get_github_link_with_real_repo(git_repo): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_cache_file_operations(temp_dir, sample_needlinks): +def test_cache_file_operations( + temp_dir: Path, sample_needlinks: list[NeedLink] +) -> None: """Test storing and loading cache files.""" - cache_file = temp_dir / "test_cache.json" + cache_file: Path = temp_dir / "test_cache.json" # Store links store_source_code_links_json(cache_file, sample_needlinks) @@ -385,7 +388,7 @@ def test_cache_file_operations(temp_dir, sample_needlinks): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_cache_file_with_encoded_comments(temp_dir): +def test_cache_file_with_encoded_comments(temp_dir: Path) -> None: """Test that cache file properly handles encoded comments.""" # Create needlinks with spaces in tags and full_line needlinks = [ @@ -398,7 +401,7 @@ def test_cache_file_with_encoded_comments(temp_dir): ) ] - cache_file = temp_dir / "encoded_cache.json" + cache_file: Path = temp_dir / "encoded_cache.json" store_source_code_links_json(cache_file, needlinks) # Check the raw JSON to verify encoding @@ -422,7 +425,9 @@ def test_cache_file_with_encoded_comments(temp_dir): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_group_by_need_and_find_need_integration(sample_needlinks): +def test_group_by_need_and_find_need_integration( + sample_needlinks: list[NeedLink], +) -> None: """Test grouping links and finding needs together.""" # Group the test links grouped = group_by_need(sample_needlinks) @@ -455,10 +460,12 @@ def test_group_by_need_and_find_need_integration(sample_needlinks): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_source_linker_end_to_end_with_real_files(temp_dir, git_repo): +def test_source_linker_end_to_end_with_real_files( + temp_dir: Path, git_repo: Path +) -> None: """Test end-to-end workflow with real files and git repo.""" # Create source files with requirement IDs - src_dir = git_repo / "src" + src_dir: Path = git_repo / "src" src_dir.mkdir() (src_dir / "implementation1.py").write_text( @@ -520,7 +527,7 @@ def another_function(): ] # Test cache operations - cache_file = temp_dir / "cache.json" + cache_file: Path = temp_dir / "cache.json" store_source_code_links_json(cache_file, needlinks) loaded_links = load_source_code_links_json(cache_file) @@ -550,13 +557,13 @@ def another_function(): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_multiple_commits_hash_consistency(git_repo): +def test_multiple_commits_hash_consistency(git_repo: Path) -> None: """Test that git hash remains consistent and links update properly.""" # Get initial hash initial_hash = get_current_git_hash(git_repo) # Create and commit a new file - new_file = git_repo / "new_file.py" + new_file: Path = git_repo / "new_file.py" new_file.write_text("# New file\nprint('new')") subprocess.run(["git", "add", "."], cwd=git_repo, check=True) subprocess.run(["git", "commit", "-m", "Add new file"], cwd=git_repo, check=True) diff --git a/src/extensions/score_source_code_linker/tests/test_need_source_links.py b/src/extensions/score_source_code_linker/tests/test_need_source_links.py index 4e1c052c..df234a0b 100644 --- a/src/extensions/score_source_code_linker/tests/test_need_source_links.py +++ b/src/extensions/score_source_code_linker/tests/test_need_source_links.py @@ -42,7 +42,9 @@ def SourceCodeLinks_TEST_JSON_Decoder( need=d["need"], links=NeedSourceLinks( CodeLinks=[ - needlink_test_decoder(cl) for cl in links.get("CodeLinks", []) + link + for cl in links.get("CodeLinks", []) + if isinstance(link := needlink_test_decoder(cl), NeedLink) ], TestLinks=[DataForTestLink(**tl) for tl in links.get("TestLinks", [])], ), @@ -51,7 +53,7 @@ def SourceCodeLinks_TEST_JSON_Decoder( class SourceCodeLinks_TEST_JSON_Encoder(json.JSONEncoder): - def default(self, o: object): + def default(self, o: object) -> Any: if isinstance(o, SourceCodeLinks): return { "need": o.need, @@ -92,20 +94,22 @@ def sample_testlink() -> DataForTestLink: @pytest.fixture -def sample_source_code_links(sample_needlink, sample_testlink) -> SourceCodeLinks: +def sample_source_code_links( + sample_needlink: NeedLink, sample_testlink: DataForTestLink +) -> SourceCodeLinks: return SourceCodeLinks( need="REQ_001", links=NeedSourceLinks(CodeLinks=[sample_needlink], TestLinks=[sample_testlink]), ) -def test_encoder_outputs_serializable_dict(sample_source_code_links): +def test_encoder_outputs_serializable_dict(sample_source_code_links: SourceCodeLinks): encoded = json.dumps(sample_source_code_links, cls=SourceCodeLinks_JSON_Encoder) assert isinstance(encoded, str) assert "REQ_001" in encoded -def test_decoder_reconstructs_object(sample_source_code_links): +def test_decoder_reconstructs_object(sample_source_code_links: SourceCodeLinks): encoded = json.dumps(sample_source_code_links, cls=SourceCodeLinks_JSON_Encoder) decoded = json.loads(encoded, object_hook=SourceCodeLinks_JSON_Decoder) assert isinstance(decoded, SourceCodeLinks) @@ -114,7 +118,7 @@ def test_decoder_reconstructs_object(sample_source_code_links): assert decoded.links.CodeLinks[0].need == "REQ_001" -def test_store_and_load_json(tmp_path: Path, sample_source_code_links): +def test_store_and_load_json(tmp_path: Path, sample_source_code_links: SourceCodeLinks): test_file = tmp_path / "combined_links.json" store_source_code_links_combined_json(test_file, [sample_source_code_links]) assert test_file.exists() diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py index d5c6af0c..52af7cc0 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -18,7 +18,7 @@ from collections import Counter from collections.abc import Callable from pathlib import Path -from typing import cast +from typing import Any, cast import pytest from pytest import TempPathFactory @@ -42,8 +42,7 @@ @pytest.fixture() def sphinx_base_dir(tmp_path_factory: TempPathFactory) -> Path: - repo_path = tmp_path_factory.mktemp("test_git_repo") - return repo_path + return tmp_path_factory.mktemp("test_git_repo") @pytest.fixture() @@ -69,7 +68,7 @@ def git_repo_setup(sphinx_base_dir: Path) -> Path: @pytest.fixture() -def create_demo_files(sphinx_base_dir: Path, git_repo_setup): +def create_demo_files(sphinx_base_dir: Path, git_repo_setup: Path): repo_path = sphinx_base_dir # Create some source files with requirement IDs @@ -190,6 +189,8 @@ def make_test_xml_1(): def make_test_xml_2(): + # ruff: This is a long xml string, so ignore the line length check for the block + # flake8: noqa: E501 (start) return """ @@ -208,6 +209,9 @@ def make_test_xml_2(): """ +# flake8: noqa: E501 (end) + + def construct_gh_url() -> str: gh = get_github_base_url() return f"{gh}/blob/" @@ -215,7 +219,7 @@ def construct_gh_url() -> str: @pytest.fixture() def sphinx_app_setup( - sphinx_base_dir: Path, create_demo_files, git_repo_setup + sphinx_base_dir: Path, create_demo_files: None, git_repo_setup: Path ) -> Callable[[], SphinxTestApp]: def _create_app(): base_dir = sphinx_base_dir @@ -411,7 +415,9 @@ def make_test_link(testlinks: list[DataForTestLink]): return ", ".join(f"{get_github_link(n)}<>{n.name}" for n in testlinks) -def compare_json_files(file1: Path, expected_file: Path, object_hook): +def compare_json_files( + file1: Path, expected_file: Path, object_hook: Callable[[dict[str, Any]], Any] +): """Golden File tests with a known good file and the one created""" with open(file1) as f1: json1 = json.load(f1, object_hook=object_hook) @@ -473,8 +479,8 @@ def test_source_link_integration_ok( example_source_link_text_all_ok: dict[str, list[NeedLink]], example_test_link_text_all_ok: dict[str, list[DataForTestLink]], sphinx_base_dir: Path, - git_repo_setup, - create_demo_files, + git_repo_setup: Path, + create_demo_files: None, ): """This is a test description""" app = sphinx_app_setup() @@ -537,8 +543,8 @@ def test_source_link_integration_non_existent_id( sphinx_app_setup: Callable[[], SphinxTestApp], example_source_link_text_non_existent: dict[str, list[str]], sphinx_base_dir: Path, - git_repo_setup, - create_demo_files, + git_repo_setup: Path, + create_demo_files: None, ): """Asserting warning if need not found""" app = sphinx_app_setup() diff --git a/src/extensions/score_source_code_linker/tests/test_testlink.py b/src/extensions/score_source_code_linker/tests/test_testlink.py index 452faff3..74becef6 100644 --- a/src/extensions/score_source_code_linker/tests/test_testlink.py +++ b/src/extensions/score_source_code_linker/tests/test_testlink.py @@ -13,7 +13,8 @@ import json from pathlib import Path -from attribute_plugin import add_test_properties +# This depends on the `attribute_plugin` in our tooling repository +from attribute_plugin import add_test_properties # type: ignore[import-untyped] from src.extensions.score_source_code_linker.testlink import ( DataForTestLink, diff --git a/src/extensions/score_source_code_linker/tests/test_xml_parser.py b/src/extensions/score_source_code_linker/tests/test_xml_parser.py index 0119a83e..c234e08b 100644 --- a/src/extensions/score_source_code_linker/tests/test_xml_parser.py +++ b/src/extensions/score_source_code_linker/tests/test_xml_parser.py @@ -21,7 +21,9 @@ from typing import Any import pytest -from attribute_plugin import add_test_properties + +# This depends on the `attribute_plugin` in our tooling repository +from attribute_plugin import add_test_properties # type: ignore[import-untyped] import src.extensions.score_source_code_linker.xml_parser as xml_parser from src.extensions.score_source_code_linker.testlink import DataOfTestCase @@ -29,10 +31,10 @@ # Unsure if I should make these last a session or not @pytest.fixture -def tmp_xml_dirs(tmp_path: Path): - root = tmp_path / "bazel-testlogs" - dir1 = root / "with_props" - dir2 = root / "no_props" +def tmp_xml_dirs(tmp_path: Path) -> tuple[Path, Path, Path]: + root: Path = tmp_path / "bazel-testlogs" + dir1: Path = root / "with_props" + dir2: Path = root / "no_props" dir1.mkdir(parents=True) dir2.mkdir(parents=True) @@ -96,11 +98,14 @@ def make_tc( test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_find_xml_files(tmp_xml_dirs): +def test_find_xml_files(tmp_xml_dirs: tuple[Path, Path, Path]): """Ensure xml files are found as expected""" + root: Path + dir1: Path + dir2: Path root, dir1, dir2 = tmp_xml_dirs found = xml_parser.find_xml_files(root) - expected = {dir1 / "test.xml", dir2 / "test.xml"} + expected: set[Path] = {dir1 / "test.xml", dir2 / "test.xml"} assert set(found) == expected @@ -147,8 +152,11 @@ def test_parse_properties(): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_read_test_xml_file(tmp_xml_dirs): +def test_read_test_xml_file(tmp_xml_dirs: tuple[Path, Path, Path]): """Ensure a whole pre-defined xml file is parsed correctly""" + _: Path + dir1: Path + dir2: Path _, dir1, dir2 = tmp_xml_dirs needs1, no_props1 = xml_parser.read_test_xml_file(dir1 / "test.xml") diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index 075a9ef8..53c18b23 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -119,10 +119,12 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str]]: # ╰──────────────────────────────────────╯ # assert test_file is not None, ( - # f"Testcase: {testname} does not have a 'file' attribute. This is mandatory" + # f"Testcase: {testname} does not have a 'file' attribute. " + # "This is mandatory" # ) # assert lineNr is not None, ( - # f"Testcase: {testname} located in {test_file} does not have a 'lineNr' attribute. This is mandator" + # f"Testcase: {testname} located in {test_file} does not have a " + # "'lineNr' attribute. This is mandatory" # ) case_properties["name"] = testname case_properties["file"] = test_file @@ -142,7 +144,9 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str]]: # ║ Disabled Temporarily ║ # ╙ ╜ # assert properties_element is not None, ( - # f"Testcase: {testname} located in {test_file}:{lineNr}, does not have any properties. Properties 'TestType', 'DerivationTechnique' and either 'PartiallyVerifies' or 'FullyVerifies' are mandatory." + # f"Testcase: {testname} located in {test_file}:{lineNr}, does not " + # "have any properties. Properties 'TestType', 'DerivationTechnique' " + # "and either 'PartiallyVerifies' or 'FullyVerifies' are mandatory." # ) case_properties = parse_properties(case_properties, properties_element) diff --git a/src/helper_lib/test_helper_lib.py b/src/helper_lib/test_helper_lib.py index d1d0a2f8..0025d1ca 100644 --- a/src/helper_lib/test_helper_lib.py +++ b/src/helper_lib/test_helper_lib.py @@ -32,9 +32,9 @@ def temp_dir(): @pytest.fixture -def git_repo(temp_dir): +def git_repo(temp_dir: Path) -> Path: """Create a real git repository for testing.""" - git_dir = temp_dir / "test_repo" + git_dir: Path = temp_dir / "test_repo" git_dir.mkdir() # Initialize git repo @@ -45,7 +45,7 @@ def git_repo(temp_dir): subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) # Create a test file and commit - test_file = git_dir / "test_file.py" + test_file: Path = git_dir / "test_file.py" test_file.write_text("# Test file\nprint('hello')\n") subprocess.run(["git", "add", "."], cwd=git_dir, check=True) subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) @@ -60,9 +60,9 @@ def git_repo(temp_dir): @pytest.fixture -def git_repo_multiple_remotes(temp_dir): +def git_repo_multiple_remotes(temp_dir: Path) -> Path: """Create a git repository with multiple remotes for testing.""" - git_dir = temp_dir / "test_repo_multiple" + git_dir: Path = temp_dir / "test_repo_multiple" git_dir.mkdir() # Initialize git repo @@ -73,7 +73,7 @@ def git_repo_multiple_remotes(temp_dir): subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) # Create a test file and commit - test_file = git_dir / "test_file.py" + test_file: Path = git_dir / "test_file.py" test_file.write_text("# Test file\nprint('hello')\n") subprocess.run(["git", "add", "."], cwd=git_dir, check=True) subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) @@ -94,9 +94,9 @@ def git_repo_multiple_remotes(temp_dir): @pytest.fixture -def git_repo_with_https_remote(temp_dir): +def git_repo_with_https_remote(temp_dir: Path) -> Path: """Create a git repository with HTTPS remote for testing.""" - git_dir = temp_dir / "test_repo_https" + git_dir: Path = temp_dir / "test_repo_https" git_dir.mkdir() # Initialize git repo @@ -107,7 +107,7 @@ def git_repo_with_https_remote(temp_dir): subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) # Create a test file and commit - test_file = git_dir / "test_file.py" + test_file: Path = git_dir / "test_file.py" test_file.write_text("# Test file\nprint('hello')\n") subprocess.run(["git", "add", "."], cwd=git_dir, check=True) subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) @@ -129,9 +129,9 @@ def git_repo_with_https_remote(temp_dir): # Test error handling -def test_git_operations_with_no_commits(temp_dir): +def test_git_operations_with_no_commits(temp_dir: Path): """Test git operations on repo with no commits.""" - git_dir = temp_dir / "empty_repo" + git_dir: Path = temp_dir / "empty_repo" git_dir.mkdir() # Initialize git repo but don't commit anything @@ -147,9 +147,9 @@ def test_git_operations_with_no_commits(temp_dir): get_current_git_hash(git_dir) -def test_git_repo_with_no_remotes(temp_dir): +def test_git_repo_with_no_remotes(temp_dir: Path): """Test git repository with no remotes.""" - git_dir = temp_dir / "no_remote_repo" + git_dir: Path = temp_dir / "no_remote_repo" git_dir.mkdir() # Initialize git repo @@ -160,7 +160,7 @@ def test_git_repo_with_no_remotes(temp_dir): subprocess.run(["git", "config", "user.name", "Test User"], cwd=git_dir, check=True) # Create a test file and commit - test_file = git_dir / "test_file.py" + test_file: Path = git_dir / "test_file.py" test_file.write_text("# Test file\nprint('hello')\n") subprocess.run(["git", "add", "."], cwd=git_dir, check=True) subprocess.run(["git", "commit", "-m", "Initial commit"], cwd=git_dir, check=True) @@ -207,25 +207,25 @@ def test_parse_git_output_empty_string(): assert result == "" -def test_get_github_repo_info_ssh_remote(git_repo): +def test_get_github_repo_info_ssh_remote(git_repo: Path): """Test getting GitHub repository information with SSH remote.""" result = get_github_repo_info(git_repo) assert result == "test-user/test-repo" -def test_get_github_repo_info_https_remote(git_repo_with_https_remote): +def test_get_github_repo_info_https_remote(git_repo_with_https_remote: Path): """Test getting GitHub repository information with HTTPS remote.""" result = get_github_repo_info(git_repo_with_https_remote) assert result == "test-user/test-repo" -def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes): +def test_get_github_repo_info_multiple_remotes(git_repo_multiple_remotes: Path): """Test GitHub repo info retrieval with multiple remotes (origin preferred).""" result = get_github_repo_info(git_repo_multiple_remotes) assert result == "test-user/test-repo" -def test_get_current_git_hash(git_repo): +def test_get_current_git_hash(git_repo: Path): """Test getting current git hash.""" result = get_current_git_hash(git_repo) @@ -234,7 +234,7 @@ def test_get_current_git_hash(git_repo): assert all(c in "0123456789abcdef" for c in result) -def test_get_current_git_hash_invalid_repo(temp_dir): +def test_get_current_git_hash_invalid_repo(temp_dir: Path): """Test getting git hash from invalid repository.""" - with pytest.raises(Exception): + with pytest.raises(subprocess.CalledProcessError): get_current_git_hash(temp_dir) diff --git a/src/incremental.py b/src/incremental.py index 10513181..5699c6ec 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -20,7 +20,9 @@ import debugpy from sphinx.cmd.build import main as sphinx_main -from sphinx_autobuild.__main__ import main as sphinx_autobuild_main +from sphinx_autobuild.__main__ import ( + main as sphinx_autobuild_main, # type: ignore[reportUnknownVariableType] # sphinx_autobuild doesn't provide complete type annotations +) logger = logging.getLogger(__name__) diff --git a/src/tests/conftest.py b/src/tests/conftest.py index 5bbd0e4a..953555da 100644 --- a/src/tests/conftest.py +++ b/src/tests/conftest.py @@ -10,7 +10,10 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -def pytest_addoption(parser): +import pytest + + +def pytest_addoption(parser: pytest.Parser): """Add custom command line options to pytest""" parser.addoption( "--repo", diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index e2dddaee..a8f8071f 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -16,8 +16,10 @@ from collections import defaultdict from dataclasses import dataclass, field from pathlib import Path +from typing import cast import pytest +from _pytest.config import Config from pytest import TempPathFactory from rich import print from rich.console import Console @@ -115,9 +117,9 @@ class Result: @pytest.fixture(scope="session") -def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig) -> Path: +def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig: Config) -> Path: """Create base directory for testing - either temporary or persistent cache""" - disable_cache = pytestconfig.getoption("--disable-cache") + disable_cache: bool = bool(pytestconfig.getoption("--disable-cache")) if disable_cache: # Use persistent cache directory for local development @@ -150,7 +152,7 @@ def filter_repos(repo_filter: str | None) -> list[ConsumerRepo]: return REPOS_TO_TEST requested_repos = [name.strip() for name in repo_filter.split(",")] - filtered_repos = [] + filtered_repos: list[ConsumerRepo] = [] for repo in REPOS_TO_TEST: if repo.name in requested_repos: @@ -212,7 +214,7 @@ def strip_ansi_codes(text: str) -> str: return ansi_escape.sub("", text) -def parse_bazel_output(BR: BuildOutput, pytestconfig) -> BuildOutput: +def parse_bazel_output(BR: BuildOutput, pytestconfig: Config) -> BuildOutput: err_lines = BR.stderr.splitlines() split_warnings = [x for x in err_lines if "WARNING: " in x] warning_dict: dict[str, list[str]] = defaultdict(list) @@ -355,7 +357,7 @@ def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]: return True, "Build successful - no critical warnings" -def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig): +def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig: Config): """ Print your existing detailed output plus a clear success/failure summary """ @@ -422,12 +424,13 @@ def stream_subprocess_output(cmd: str, repo_name: str): # Stream output line by line output_lines = [] - for line in iter(process.stdout.readline, ""): - if line: - print(line.rstrip()) # Print immediately - output_lines.append(line) + if process.stdout is not None: + for line in iter(process.stdout.readline, ""): + if line: + print(line.rstrip()) # Print immediately + output_lines.append(line) - process.stdout.close() + process.stdout.close() return_code = process.wait() return BuildOutput( @@ -438,9 +441,13 @@ def stream_subprocess_output(cmd: str, repo_name: str): def run_cmd( - cmd: str, results: list[Result], repo_name: str, local_or_git: str, pytestconfig + cmd: str, + results: list[Result], + repo_name: str, + local_or_git: str, + pytestconfig: Config, ) -> tuple[list[Result], bool]: - verbosity = pytestconfig.get_verbosity() + verbosity: int = pytestconfig.get_verbosity() if verbosity >= 3: # Level 3 (-vvv): Stream output in real-time @@ -481,7 +488,7 @@ def run_test_commands(): pass -def setup_test_environment(sphinx_base_dir, pytestconfig): +def setup_test_environment(sphinx_base_dir: Path, pytestconfig: Config): """Set up the test environment and return necessary paths and metadata.""" git_root = find_git_root() @@ -491,9 +498,9 @@ def setup_test_environment(sphinx_base_dir, pytestconfig): current_hash = get_current_git_commit(git_root) os.chdir(Path(sphinx_base_dir).absolute()) - verbosity = pytestconfig.get_verbosity() + verbosity: int = pytestconfig.get_verbosity() - def debug_print(message): + def debug_print(message: str): if verbosity >= 2: print(f"[DEBUG] {message}") @@ -507,7 +514,7 @@ def debug_print(message): f"{has_uncommitted_changes(git_root)}" ) - def recreate_symlink(dest, target): + def recreate_symlink(dest: Path, target: Path): # Create symlink for local docs-as-code if dest.exists() or dest.is_symlink(): # Remove existing symlink/directory to recreate it @@ -538,7 +545,9 @@ def has_uncommitted_changes(path: Path) -> bool: return bool(result.stdout.strip()) -def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url, use_cache=True): +def prepare_repo_overrides( + repo_name: str, git_url: str, current_hash: str, gh_url: str, use_cache: bool = True +): """Clone repo and prepare both local and git overrides.""" repo_path = Path(repo_name) @@ -573,10 +582,10 @@ def prepare_repo_overrides(repo_name, git_url, current_hash, gh_url, use_cache=T # Updated version of your test loop -def test_and_clone_repos_updated(sphinx_base_dir, pytestconfig): +def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config): # Get command line options from pytest config - repo_tests = pytestconfig.getoption("--repo") - disable_cache = pytestconfig.getoption("--disable-cache") + repo_tests: str | None = cast(str | None, pytestconfig.getoption("--repo")) + disable_cache: bool = bool(pytestconfig.getoption("--disable-cache")) repos_to_test = filter_repos(repo_tests) From e0385b053ee8489aabc291c015b5270326060685 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Mon, 8 Sep 2025 14:35:11 +0200 Subject: [PATCH 127/231] docs: Check optional links as info (#242) --- src/extensions/score_metamodel/__init__.py | 85 +++++++++++++++++++ .../score_metamodel/checks/check_options.py | 32 +++++-- 2 files changed, 111 insertions(+), 6 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 672bc4cd..feb2f30b 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -13,6 +13,7 @@ import importlib import os import pkgutil +import re from collections.abc import Callable from dataclasses import dataclass, field from pathlib import Path @@ -134,6 +135,11 @@ def is_check_enabled(check: local_check_function | graph_check_function): logger.debug(f"Running local check {check} for need {need['id']}") check(app, need, log) + # External needs: run a focused, info-only check on optional_links patterns + # so that optional link issues from imported needs are visible but do not + # fail builds with -W. + # _check_external_optional_link_patterns(app, log) + # Graph-Based checks: These warnings require a graph of all other needs to # be checked. @@ -154,6 +160,85 @@ def is_check_enabled(check: local_check_function | graph_check_function): # TODO: exit code +def _remove_prefix(word: str, prefixes: list[str]) -> str: + for prefix in prefixes or []: + if isinstance(word, str) and word.startswith(prefix): + return word.removeprefix(prefix) + return word + + +def _get_need_type_for_need(app: Sphinx, need: NeedsInfoType): + need_type = None + for nt in app.config.needs_types: + try: + if nt["directive"] == need["type"]: + need_type = nt + break + except Exception: + continue + return need_type + + +def _validate_external_need_opt_links( + need: NeedsInfoType, + opt_links: dict[str, str], + allowed_prefixes: list[str], + log: CheckLogger, +) -> None: + for link_field, pattern in opt_links.items(): + raw_value: str | list[str] | None = need.get(link_field, None) + if raw_value in [None, [], ""]: + continue + + values: list[str | Any] = ( + raw_value if isinstance(raw_value, list) else [raw_value] + ) + for value in values: + v: str | Any + if isinstance(value, str): + v = _remove_prefix(value, allowed_prefixes) + else: + v = value + + try: + if not isinstance(v, str) or not re.match(pattern, v): + log.warning_for_option( + need, + link_field, + f"does not follow pattern `{pattern}`.", + is_new_check=True, + ) + except TypeError: + log.warning_for_option( + need, + link_field, + f"pattern `{pattern}` is not a valid regex pattern.", + is_new_check=True, + ) + + +def _check_external_optional_link_patterns(app: Sphinx, log: CheckLogger) -> None: + """Validate optional link patterns on external needs and log as info-only. + + Mirrors the original inline logic from ``_run_checks`` without changing behavior. + """ + needs_external_needs = ( + SphinxNeedsData(app.env).get_needs_view().filter_is_external(True) + ) + + for need in needs_external_needs.values(): + need_type = _get_need_type_for_need(app, need) + if not need_type: + continue + + opt_links = dict(need_type.get("opt_link", [])) + if not opt_links: + continue + + allowed_prefixes = app.config.allowed_external_prefixes + _validate_external_need_opt_links(need, opt_links, allowed_prefixes, log) + + def convert_checks_to_dataclass( checks_dict: dict[str, dict[str, Any]], ) -> list[ProhibitedWordCheck]: diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 97d466b7..e962c58d 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -45,15 +45,31 @@ def _normalize_values(raw_value: str | list[str] | None) -> list[str]: def _validate_value_pattern( - value: str, pattern: str, need: NeedsInfoType, field: str, log: CheckLogger + value: str, + pattern: str, + need: NeedsInfoType, + field: str, + log: CheckLogger, + as_info: bool = False, ) -> None: - """Check if a value matches the given pattern, log warnings if not.""" + """Check if a value matches the given pattern and log the result. + + If ``as_info`` is True, mismatches are reported as info (non-failing) + messages, otherwise as warnings. + """ try: if not re.match(pattern, value): - log.warning_for_option(need, field, f"does not follow pattern `{pattern}`.") + log.warning_for_option( + need, + field, + f"does not follow pattern `{pattern}`.", + is_new_check=as_info, + ) except TypeError: log.warning_for_option( - need, field, f"pattern `{pattern}` is not a valid regex pattern." + need, + field, + f"pattern `{pattern}` is not a valid regex pattern.", ) @@ -80,6 +96,8 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: # Removes any prefix allowed by configuration, if prefix is there. return [word.removeprefix(prefix) for prefix in prefixes][0] + optional_link_as_info = (not required) and (field_type == "link") + for field, pattern in fields.items(): raw_value: str | list[str] | None = need.get(field, None) if raw_value in [None, [], ""]: @@ -101,7 +119,9 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: for value in values: if allowed_prefixes: value = remove_prefix(value, allowed_prefixes) - _validate_value_pattern(value, pattern, need, field, log) + _validate_value_pattern( + value, pattern, need, field, log, as_info=optional_link_as_info + ) # req-Id: tool_req__docs_req_attr_reqtype @@ -140,7 +160,7 @@ def check_options( ], "link": [ (dict(need_options.get("req_link", [])), True), - # (dict(need_options.get("opt_link", [])), False), + (dict(need_options.get("opt_link", [])), False), ], } From c62f3d04a15a1e592fa2a2a6d5745fab1d239b0f Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 9 Sep 2025 08:14:47 +0200 Subject: [PATCH 128/231] Add Decision Record support (#236) Co-authored-by: Frank Scholter Peres --- .../001-test-results-in-workflow.md | 12 +++++++----- docs/internals/decisions_and_concepts/index.rst | 5 +++++ .../score_metamodel/checks/id_contains_feature.py | 4 ++++ src/extensions/score_metamodel/metamodel.yaml | 14 ++++++++++++++ 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md b/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md index a2e240fe..64cf1d2b 100644 --- a/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md +++ b/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md @@ -1,8 +1,10 @@ ---- -id: Docs-As-Code-DR-001 -status: "Draft" -owner: Infrastructure Community ---- +:::{dec_rec} Decision Record 001: Test results in Docs-As-Code Workflows +:id: dec_rec__dac__001_test_results_in_workflows +:status: accepted +:context: Need to embed test results into docs, but tests are slow. +:decision: Run quick docs checks and tests in parallel, then full docs generation sequentially. +:consequences: implementation effort +::: # Decision Record 001: Test results in Docs-As-Code Workflows diff --git a/docs/internals/decisions_and_concepts/index.rst b/docs/internals/decisions_and_concepts/index.rst index 09678b30..a403e372 100644 --- a/docs/internals/decisions_and_concepts/index.rst +++ b/docs/internals/decisions_and_concepts/index.rst @@ -6,3 +6,8 @@ Decisions And Concepts :glob: * + +.. needtable:: + :style: table + :types: dec_rec + :columns: id;context;decision;consequences;status diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index 55deeeed..a657b1c3 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -35,6 +35,10 @@ def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): # No warning needed here, as this is already checked in the metamodel. return + if parts[0] == "dec_rec": + # Decision records are intentionally not located within their components + return + # Get the part of the string after the first two underscores: the path feature = parts[1] if feature == "example_feature": diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 59ac8041..84c3e77d 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -848,6 +848,20 @@ needs_types: fully_verifies: ^.*$ partially_verifies: ^.*$ + # https://eclipse-score.github.io/process_description/main/permalink.html?id=gd_temp__change_decision_record + dec_rec: + title: Decision Record + prefix: dec_rec__ + mandatory_options: + id: ^dec_rec__.*__.*$ + status: ^(proposed|accepted|deprecated|rejected|superseded)$ + context: ^.*$ + decision: ^.*$ + optional_options: + consequences: ^.*$ + optional_links: + affects: ^.*$ + # Extra link types, which shall be available and allow need types to be linked to each other. # We use a dedicated linked type for each type of a connection, for instance from # a specification to a requirement. This makes filtering and visualization of such connections From 8e46554af0ff6b0589978adacfe0cb749faf4af6 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Thu, 11 Sep 2025 09:30:23 +0200 Subject: [PATCH 129/231] tooling: Integrate CLI helper (#247) --- BUILD | 7 ++++++- docs.bzl | 12 ++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/BUILD b/BUILD index a624d0a1..21a92e42 100644 --- a/BUILD +++ b/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@score_tooling//:defs.bzl", "copyright_checker") +load("@score_tooling//:defs.bzl", "cli_helper", "copyright_checker") load("//:docs.bzl", "docs") package(default_visibility = ["//visibility:public"]) @@ -35,3 +35,8 @@ docs( ], source_dir = "docs", ) + +cli_helper( + name = "cli-help", + visibility = ["//visibility:public"], +) diff --git a/docs.bzl b/docs.bzl index 23f10052..b5a93dd1 100644 --- a/docs.bzl +++ b/docs.bzl @@ -53,6 +53,10 @@ def docs(source_dir = "docs", data = [], deps = []): """ data = data + ["@score_docs_as_code//src:docs_assets"] + call_path = native.package_name() + + if call_path != "": + fail("docs() must be called from the root package. Current package: " + call_path) deps = deps + all_requirements + [ "@score_docs_as_code//src:plantuml_for_python", @@ -68,7 +72,7 @@ def docs(source_dir = "docs", data = [], deps = []): py_binary( name = "docs", - tags = ["cli_help=Build documentation [run]"], + tags = ["cli_help=Build documentation:\nbazel run //:docs"], srcs = ["@score_docs_as_code//src:incremental.py"], data = data, deps = deps, @@ -81,7 +85,7 @@ def docs(source_dir = "docs", data = [], deps = []): py_binary( name = "docs_check", - tags = ["cli_help=Verify documentation [run]"], + tags = ["cli_help=Verify documentation:\nbazel run //:docs_check"], srcs = ["@score_docs_as_code//src:incremental.py"], data = data, deps = deps, @@ -94,7 +98,7 @@ def docs(source_dir = "docs", data = [], deps = []): py_binary( name = "live_preview", - tags = ["cli_help=Live preview documentation in the browser [run]"], + tags = ["cli_help=Live preview documentation in the browser:\nbazel run //:live_preview"], srcs = ["@score_docs_as_code//src:incremental.py"], data = data, deps = deps, @@ -107,7 +111,7 @@ def docs(source_dir = "docs", data = [], deps = []): score_virtualenv( name = "ide_support", - tags = ["cli_help=Create virtual environment (.venv_docs) for documentation support [run]"], + tags = ["cli_help=Create virtual environment (.venv_docs) for documentation support:\nbazel run //:ide_support"], venv_name = ".venv_docs", reqs = deps, # Add dependencies to ide_support, so esbonio has access to them. From 12d61285f610f641c88c9d5bef604211715b5f63 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 12 Sep 2025 13:08:34 +0200 Subject: [PATCH 130/231] update colors in score.css (#248) --- src/assets/css/score.css | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/assets/css/score.css b/src/assets/css/score.css index 2333c9df..705f3b85 100644 --- a/src/assets/css/score.css +++ b/src/assets/css/score.css @@ -12,10 +12,15 @@ html { The base color is TEAL */ - html[data-theme="light"] { - --pst-color-primary: #547980; - --pst-color-secondary: #45ADA8; + /* Menu svg items */ + --pst-color-muted: #FFF; + /* Text color within cards (same background as page header) */ + --sd-color-card-text: #a382c5; + /* Link color + menu items on hover color */ + --pst-color-primary: #7c4daa; + /* Page Header */ + --pst-color-secondary: #2D1942; --pst-color-accent: #9DE0AD; --pst-color-target: #E5FCC2; --pst-color-on-surface: #594F4F; @@ -26,8 +31,12 @@ html[data-theme="light"] { } html[data-theme="dark"] { - --pst-color-primary: #45ADA8; - --pst-color-secondary: #547980; + /* Link color */ + --pst-color-primary: #FFFFFF; + /* Link color on hover */ + --pst-color-link-hover: #FFFFFF; + /* Page Header */ + --pst-color-secondary: #2D1942; --pst-color-accent: #9DE0AD; --pst-color-target: #E5FCC2; --pst-color-on-surface: #594F4F; @@ -252,4 +261,3 @@ html[data-theme="dark"] #score-title { overflow-x: hidden; box-sizing: border-box; } - From d4b230f63120131c73654f1e995b72ce9eda5599 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Mon, 15 Sep 2025 14:16:14 +0200 Subject: [PATCH 131/231] fix color and make score_layout self-contained (#249) --- MODULE.bazel | 2 +- docs.bzl | 1 - src/BUILD | 11 ------- src/extensions/score_layout/BUILD | 17 ++++++---- src/extensions/score_layout/__init__.py | 32 +++++++++---------- .../score_layout}/assets/css/score.css | 7 ++++ .../score_layout}/assets/css/score_design.css | 0 .../score_layout}/assets/css/score_needs.css | 0 .../assets/puml-theme-score.puml | 0 9 files changed, 35 insertions(+), 35 deletions(-) rename src/{ => extensions/score_layout}/assets/css/score.css (97%) rename src/{ => extensions/score_layout}/assets/css/score_design.css (100%) rename src/{ => extensions/score_layout}/assets/css/score_needs.css (100%) rename src/{ => extensions/score_layout}/assets/puml-theme-score.puml (100%) diff --git a/MODULE.bazel b/MODULE.bazel index bf432a73..e099e186 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.2.0", + version = "1.3.0", compatibility_level = 1, ) diff --git a/docs.bzl b/docs.bzl index b5a93dd1..665637b8 100644 --- a/docs.bzl +++ b/docs.bzl @@ -52,7 +52,6 @@ def docs(source_dir = "docs", data = [], deps = []): By using this function, you'll get any and all updates for documentation targets in one place. """ - data = data + ["@score_docs_as_code//src:docs_assets"] call_path = native.package_name() if call_path != "": diff --git a/src/BUILD b/src/BUILD index 15a8b3f5..43394098 100644 --- a/src/BUILD +++ b/src/BUILD @@ -70,17 +70,6 @@ compile_pip_requirements( ], ) -filegroup( - name = "docs_assets", - srcs = glob( - [ - "assets/**/*", - ], - allow_empty = True, - ), - visibility = ["//visibility:public"], -) - # Running this executes the `collect_source_files.bzl` aspect. # Collects all source files from specified targets in 'deps', and makes them available for parsing for the source_code_linker diff --git a/src/extensions/score_layout/BUILD b/src/extensions/score_layout/BUILD index cbdc4a47..d6d47274 100644 --- a/src/extensions/score_layout/BUILD +++ b/src/extensions/score_layout/BUILD @@ -11,15 +11,20 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@pip_process//:requirements.bzl", "requirement") py_library( name = "score_layout", - srcs = glob( - ["*.py"], - ), + srcs = glob([ + "*.py", + # Adding assets as src instead of data ensures they are included in the + # library as they would normally be, and we do not need to go through bazel's + # RUNFILES_DIR mechanism to access them. This makes the code much simpler. + # And it makes the library far easier extractable from bazel into a normal + # python package if we ever want to do that. + "assets/**", + ]), imports = ["."], visibility = ["//visibility:public"], - # TODO: Figure out if all requirements are needed or if we can break it down a bit - deps = all_requirements, + deps = [requirement("sphinx")], ) diff --git a/src/extensions/score_layout/__init__.py b/src/extensions/score_layout/__init__.py index 914b5951..6d54f0b7 100644 --- a/src/extensions/score_layout/__init__.py +++ b/src/extensions/score_layout/__init__.py @@ -10,7 +10,7 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -import os +import logging from pathlib import Path from typing import Any @@ -18,8 +18,12 @@ import sphinx_options from sphinx.application import Sphinx +logger = logging.getLogger(__name__) + def setup(app: Sphinx) -> dict[str, str | bool]: + logger.debug("score_layout setup called") + app.connect("config-inited", update_config) return { "version": "0.1", @@ -29,27 +33,23 @@ def setup(app: Sphinx) -> dict[str, str | bool]: def update_config(app: Sphinx, _config: Any): + logger.debug("score_layout update_config called") + app.config.needs_layouts = sphinx_options.needs_layouts app.config.needs_global_options = sphinx_options.needs_global_options app.config.html_theme = html_options.html_theme app.config.html_context = html_options.return_html_context(app) app.config.html_theme_options = html_options.return_html_theme_options(app) - # Setting HTML static path - if r := os.getenv("RUNFILES_DIR"): - if (Path(r) / "score_docs_as_code+").exists(): - # Docs-as-code used as a module with bazel 8 - module = "score_docs_as_code+" - elif (Path(r) / "score_docs_as_code~").exists(): - # Docs-as-code used as a module with bazel 7 - module = "score_docs_as_code~" - else: - # Docs-as-code is the current module - module = "_main" - app.config.html_static_path.append(str(Path(r) / module / "src/assets")) - - puml = Path(r) / module / "src/assets/puml-theme-score.puml" - app.config.needs_flow_configs = {"score_config": f"!include {puml}"} + logger.debug(f"score_layout __file__: {__file__}") + + score_layout_path = Path(__file__).parent.resolve() + logger.debug(f"score_layout_path: {score_layout_path}") + + app.config.html_static_path.append(str(score_layout_path / "assets")) + + puml = score_layout_path / "assets" / "puml-theme-score.puml" + app.config.needs_flow_configs = {"score_config": f"!include {puml}"} app.add_css_file("css/score.css", priority=500) app.add_css_file("css/score_needs.css", priority=500) diff --git a/src/assets/css/score.css b/src/extensions/score_layout/assets/css/score.css similarity index 97% rename from src/assets/css/score.css rename to src/extensions/score_layout/assets/css/score.css index 705f3b85..191376e4 100644 --- a/src/assets/css/score.css +++ b/src/extensions/score_layout/assets/css/score.css @@ -12,7 +12,14 @@ html { The base color is TEAL */ +/* TODO: Does not work as intended */ +.version-switcher__container[data-theme="light"] { + background-color: #a382c5; +} + html[data-theme="light"] { + /* Search Box background color. */ + --bs-btn-hover-bg: #a382c5; /* Menu svg items */ --pst-color-muted: #FFF; /* Text color within cards (same background as page header) */ diff --git a/src/assets/css/score_design.css b/src/extensions/score_layout/assets/css/score_design.css similarity index 100% rename from src/assets/css/score_design.css rename to src/extensions/score_layout/assets/css/score_design.css diff --git a/src/assets/css/score_needs.css b/src/extensions/score_layout/assets/css/score_needs.css similarity index 100% rename from src/assets/css/score_needs.css rename to src/extensions/score_layout/assets/css/score_needs.css diff --git a/src/assets/puml-theme-score.puml b/src/extensions/score_layout/assets/puml-theme-score.puml similarity index 100% rename from src/assets/puml-theme-score.puml rename to src/extensions/score_layout/assets/puml-theme-score.puml From dd04c72f2dca49e12edf822906a49f4f45dacd82 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Mon, 15 Sep 2025 22:33:17 +0200 Subject: [PATCH 132/231] score_metamodel cleanup (#250) --- src/extensions/score_metamodel/__init__.py | 227 ++---------------- .../score_metamodel/metamodel_types.py | 30 +++ .../tests/test_metamodel_load.py | 41 ++-- src/extensions/score_metamodel/yaml_parser.py | 205 ++++++++++++++++ 4 files changed, 276 insertions(+), 227 deletions(-) create mode 100644 src/extensions/score_metamodel/metamodel_types.py create mode 100644 src/extensions/score_metamodel/yaml_parser.py diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index feb2f30b..b287b4a6 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -15,18 +15,29 @@ import pkgutil import re from collections.abc import Callable -from dataclasses import dataclass, field from pathlib import Path -from typing import Any, cast +from typing import Any -from ruamel.yaml import YAML from sphinx.application import Sphinx from sphinx_needs import logging -from sphinx_needs.config import NeedType from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData -from .external_needs import connect_external_needs -from .log import CheckLogger +from src.extensions.score_metamodel.external_needs import connect_external_needs +from src.extensions.score_metamodel.log import CheckLogger + +# Import and re-export some types and functions for easier access +from src.extensions.score_metamodel.metamodel_types import ( + ProhibitedWordCheck as ProhibitedWordCheck, +) +from src.extensions.score_metamodel.metamodel_types import ( + ScoreNeedType as ScoreNeedType, +) +from src.extensions.score_metamodel.yaml_parser import ( + default_options as default_options, +) +from src.extensions.score_metamodel.yaml_parser import ( + load_metamodel_data as load_metamodel_data, +) logger = logging.get_logger(__name__) @@ -37,21 +48,6 @@ graph_checks: list[graph_check_function] = [] -@dataclass -class ScoreNeedType(NeedType): - tags: list[str] - parts: int - - -@dataclass -class ProhibitedWordCheck: - name: str - option_check: dict[str, list[str]] = field( - default_factory=dict - ) # { Option: [Forbidden words]} - types: list[str] = field(default_factory=list) - - def parse_checks_filter(filter: str) -> list[str]: """ Parses a comma-separated list of check names. @@ -239,185 +235,6 @@ def _check_external_optional_link_patterns(app: Sphinx, log: CheckLogger) -> Non _validate_external_need_opt_links(need, opt_links, allowed_prefixes, log) -def convert_checks_to_dataclass( - checks_dict: dict[str, dict[str, Any]], -) -> list[ProhibitedWordCheck]: - return [ - ProhibitedWordCheck( - name=check_name, - option_check={k: v for k, v in check_config.items() if k != "types"}, - types=check_config.get("types", []), - ) - for check_name, check_config in checks_dict.items() - ] - - -def load_metamodel_data() -> dict[str, Any]: - """ - Load and process metamodel.yaml. - - Returns: - dict: A dictionary with keys: - - 'needs_types': A list of processed need types. - - 'needs_extra_links': A list of extra link definitions. - - 'needs_extra_options': A sorted list of all option keys. - """ - yaml_path = Path(__file__).resolve().parent / "metamodel.yaml" - - yaml = YAML() - with open(yaml_path, encoding="utf-8") as f: - data = cast(dict[str, Any], yaml.load(f)) - - # Access the custom validation block - - types_dict = cast(dict[str, Any], data.get("needs_types", {})) - links_dict = cast(dict[str, Any], data.get("needs_extra_links", {})) - graph_check_dict = cast(dict[str, Any], data.get("graph_checks", {})) - global_base_options = cast(dict[str, Any], data.get("needs_types_base_options", {})) - global_base_options_optional_opts = cast( - dict[str, Any], global_base_options.get("optional_options", {}) - ) - - # Get the stop_words and weak_words as separate lists - proh_checks_dict = cast( - dict[str, dict[str, Any]], data.get("prohibited_words_checks", {}) - ) - prohibited_words_checks = convert_checks_to_dataclass(proh_checks_dict) - - # Default options by sphinx, sphinx-needs or anything else we need to account for - default_options_list = default_options() - - # Convert "types" from {directive_name: {...}, ...} to a list of dicts - needs_types_list = [] - - all_options: set[str] = set() - for directive_name, directive_data in types_dict.items(): - directive_name = cast(str, directive_name) - directive_data = cast(dict[str, Any], directive_data) - # Build up a single "needs_types" item - one_type: dict[str, Any] = { - "directive": directive_name, - "title": directive_data.get("title", ""), - "prefix": directive_data.get("prefix", ""), - } - - if "color" in directive_data: - one_type["color"] = directive_data["color"] - if "style" in directive_data: - one_type["style"] = directive_data["style"] - - # Store mandatory_options and optional_options directly as a dict - mandatory_options = cast( - dict[str, Any], directive_data.get("mandatory_options", {}) - ) - one_type["mandatory_options"] = mandatory_options - tags = cast(list[str], directive_data.get("tags", [])) - one_type["tags"] = tags - parts = cast(int, directive_data.get("parts", 3)) - one_type["parts"] = parts - - optional_options = cast( - dict[str, Any], directive_data.get("optional_options", {}) - ) - optional_options.update(global_base_options_optional_opts) - one_type["opt_opt"] = optional_options - - all_options.update(list(mandatory_options.keys())) - all_options.update(list(optional_options.keys())) - - # mandatory_links => "req_link" - mand_links_yaml = cast( - dict[str, Any], directive_data.get("mandatory_links", {}) - ) - if mand_links_yaml: - one_type["req_link"] = [ - (cast(str, k), cast(Any, v)) for k, v in mand_links_yaml.items() - ] - - # optional_links => "opt_link" - opt_links_yaml = cast(dict[str, Any], directive_data.get("optional_links", {})) - if opt_links_yaml: - one_type["opt_link"] = [ - (cast(str, k), cast(Any, v)) for k, v in opt_links_yaml.items() - ] - - needs_types_list.append(one_type) - - # Convert "links" dict -> list of {"option", "incoming", "outgoing"} - needs_extra_links_list: list[dict[str, str]] = [] - for link_option, link_data in links_dict.items(): - link_option = cast(str, link_option) - link_data = cast(dict[str, Any], link_data) - needs_extra_links_list.append( - { - "option": link_option, - "incoming": link_data.get("incoming", ""), - "outgoing": link_data.get("outgoing", ""), - } - ) - - # We have to remove all 'default options' from the extra options. - # As otherwise sphinx errors, due to an option being registered twice. - # They are still inside the extra options we extract to enable - # constraint checking via regex - needs_extra_options: list[str] = sorted(all_options - set(default_options_list)) - - return { - "prohibited_words_checks": prohibited_words_checks, - # "weak_words": weak_words_list, - "needs_types": needs_types_list, - "needs_extra_links": needs_extra_links_list, - "needs_extra_options": needs_extra_options, - "needs_graph_check": graph_check_dict, - } - - -def default_options() -> list[str]: - """ - Helper function to get a list of all default options defined by - sphinx, sphinx-needs etc. - """ - return [ - "target_id", - "id", - "status", - "docname", - "lineno", - "type", - "lineno_content", - "doctype", - "content", - "type_name", - "type_color", - "type_style", - "title", - "full_title", - "layout", - "template", - "id_parent", - "id_complete", - "external_css", - "sections", - "section_name", - "type_prefix", - "constraints_passed", - "collapse", - "hide", - "delete", - "jinja_content", - "is_part", - "is_need", - "is_external", - "is_modified", - "modifications", - "has_dead_links", - "has_forbidden_dead_links", - "tags", - "arch", - "parts", - ] - - def setup(app: Sphinx) -> dict[str, str | bool]: app.add_config_value("external_needs_source", "", rebuild="env") app.add_config_value("allowed_external_prefixes", [], rebuild="env") @@ -428,11 +245,11 @@ def setup(app: Sphinx) -> dict[str, str | bool]: metamodel = load_metamodel_data() # Assign everything to Sphinx config - app.config.needs_types = metamodel["needs_types"] - app.config.needs_extra_links = metamodel["needs_extra_links"] - app.config.needs_extra_options = metamodel["needs_extra_options"] - app.config.graph_checks = metamodel["needs_graph_check"] - app.config.prohibited_words_checks = metamodel["prohibited_words_checks"] + app.config.needs_types = metamodel.needs_types + app.config.needs_extra_links = metamodel.needs_extra_links + app.config.needs_extra_options = metamodel.needs_extra_options + app.config.graph_checks = metamodel.needs_graph_check + app.config.prohibited_words_checks = metamodel.prohibited_words_checks # app.config.stop_words = metamodel["stop_words"] # app.config.weak_words = metamodel["weak_words"] diff --git a/src/extensions/score_metamodel/metamodel_types.py b/src/extensions/score_metamodel/metamodel_types.py new file mode 100644 index 00000000..95449fd6 --- /dev/null +++ b/src/extensions/score_metamodel/metamodel_types.py @@ -0,0 +1,30 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +from dataclasses import dataclass, field + +from sphinx_needs.config import NeedType + + +@dataclass +class ProhibitedWordCheck: + name: str + option_check: dict[str, list[str]] = field( + default_factory=dict + ) # { Option: [Forbidden words]} + types: list[str] = field(default_factory=list) + + +class ScoreNeedType(NeedType): + tags: list[str] + parts: int diff --git a/src/extensions/score_metamodel/tests/test_metamodel_load.py b/src/extensions/score_metamodel/tests/test_metamodel_load.py index 3411735f..2a166675 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel_load.py +++ b/src/extensions/score_metamodel/tests/test_metamodel_load.py @@ -33,49 +33,46 @@ def test_load_metamodel_data(): result = load_metamodel_data() # Assertions - assert "needs_types" in result - assert len(result["needs_types"]) == 1 - assert result["needs_types"][0]["directive"] == "type1" - assert result["needs_types"][0]["title"] == "Type 1" - assert result["needs_types"][0]["prefix"] == "T1" - assert result["needs_types"][0]["color"] == "blue" - assert result["needs_types"][0]["style"] == "bold" - assert result["needs_types"][0]["mandatory_options"] == {"opt1": "value1"} - assert result["needs_types"][0]["opt_opt"] == { + assert len(result.needs_types) == 1 + assert result.needs_types[0]["directive"] == "type1" + assert result.needs_types[0]["title"] == "Type 1" + assert result.needs_types[0]["prefix"] == "T1" + assert result.needs_types[0]["color"] == "blue" + assert result.needs_types[0]["style"] == "bold" + assert result.needs_types[0]["mandatory_options"] == {"opt1": "value1"} + assert result.needs_types[0]["opt_opt"] == { "opt2": "value2", "opt3": "value3", "global_opt": "global_value", } - assert result["needs_types"][0]["req_link"] == [("link1", "value1")] - assert result["needs_types"][0]["opt_link"] == [("link2", "value2")] + assert result.needs_types[0]["req_link"] == [("link1", "value1")] + assert result.needs_types[0]["opt_link"] == [("link2", "value2")] - assert "needs_extra_links" in result - assert len(result["needs_extra_links"]) == 1 - assert result["needs_extra_links"][0] == { + assert len(result.needs_extra_links) == 1 + assert result.needs_extra_links[0] == { "option": "link_option1", "incoming": "incoming1", "outgoing": "outgoing1", } - assert "needs_extra_options" in result - assert result["needs_extra_options"] == ["global_opt", "opt1", "opt2", "opt3"] + assert result.needs_extra_options == ["global_opt", "opt1", "opt2", "opt3"] - assert "prohibited_words_checks" in result - assert result["prohibited_words_checks"][0] == ProhibitedWordCheck( + assert result.prohibited_words_checks[0] == ProhibitedWordCheck( name="title_check", option_check={"title": ["stop_word1"]} ) - assert result["prohibited_words_checks"][1] == ProhibitedWordCheck( + assert result.prohibited_words_checks[1] == ProhibitedWordCheck( name="content_check", option_check={"content": ["weak_word1"]}, types=["req_type"], ) - assert "needs_graph_check" in result - assert result["needs_graph_check"]["needs_graph_check"]["needs"] == { + defined_graph_check = result.needs_graph_check["needs_graph_check"] + assert isinstance(defined_graph_check, dict) + assert defined_graph_check["needs"] == { "include": "type1", "condition": "opt1 == test", } - assert result["needs_graph_check"]["needs_graph_check"]["check"] == { + assert defined_graph_check["check"] == { "link1": "opt1 == test", } diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py new file mode 100644 index 00000000..d5273974 --- /dev/null +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -0,0 +1,205 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +"""Functionality related to reading in the SCORE metamodel.yaml""" + +from dataclasses import dataclass +from pathlib import Path +from typing import Any, cast + +from ruamel.yaml import YAML +from sphinx_needs import logging + +from src.extensions.score_metamodel.metamodel_types import ProhibitedWordCheck + +logger = logging.get_logger(__name__) + + +@dataclass +class MetaModelData: + needs_types: list[dict[str, object]] + needs_extra_links: list[dict[str, str]] + needs_extra_options: list[str] + prohibited_words_checks: list[ProhibitedWordCheck] + needs_graph_check: dict[str, object] + + +def convert_checks_to_dataclass( + checks_dict: dict[str, dict[str, Any]], +) -> list[ProhibitedWordCheck]: + return [ + ProhibitedWordCheck( + name=check_name, + option_check={k: v for k, v in check_config.items() if k != "types"}, + types=check_config.get("types", []), + ) + for check_name, check_config in checks_dict.items() + ] + + +def default_options() -> list[str]: + """ + Helper function to get a list of all default options defined by + sphinx, sphinx-needs etc. + """ + return [ + "target_id", + "id", + "status", + "docname", + "lineno", + "type", + "lineno_content", + "doctype", + "content", + "type_name", + "type_color", + "type_style", + "title", + "full_title", + "layout", + "template", + "id_parent", + "id_complete", + "external_css", + "sections", + "section_name", + "type_prefix", + "constraints_passed", + "collapse", + "hide", + "delete", + "jinja_content", + "is_part", + "is_need", + "is_external", + "is_modified", + "modifications", + "has_dead_links", + "has_forbidden_dead_links", + "tags", + "arch", + "parts", + ] + + +def load_metamodel_data() -> MetaModelData: + """ + Load metamodel.yaml and prepare data fields as needed for sphinx-needs. + """ + yaml_path = Path(__file__).resolve().parent / "metamodel.yaml" + + yaml = YAML() + with open(yaml_path, encoding="utf-8") as f: + data = cast(dict[str, Any], yaml.load(f)) + + # Access the custom validation block + + types_dict = cast(dict[str, Any], data.get("needs_types", {})) + links_dict = cast(dict[str, Any], data.get("needs_extra_links", {})) + graph_check_dict = cast(dict[str, Any], data.get("graph_checks", {})) + global_base_options = cast(dict[str, Any], data.get("needs_types_base_options", {})) + global_base_options_optional_opts = cast( + dict[str, Any], global_base_options.get("optional_options", {}) + ) + + # Get the stop_words and weak_words as separate lists + proh_checks_dict = cast( + dict[str, dict[str, Any]], data.get("prohibited_words_checks", {}) + ) + prohibited_words_checks = convert_checks_to_dataclass(proh_checks_dict) + + # Default options by sphinx, sphinx-needs or anything else we need to account for + default_options_list = default_options() + + # Convert "types" from {directive_name: {...}, ...} to a list of dicts + needs_types_list = [] + + all_options: set[str] = set() + for directive_name, directive_data in types_dict.items(): + directive_name = cast(str, directive_name) + directive_data = cast(dict[str, Any], directive_data) + # Build up a single "needs_types" item + one_type: dict[str, Any] = { + "directive": directive_name, + "title": directive_data.get("title", ""), + "prefix": directive_data.get("prefix", ""), + } + + if "color" in directive_data: + one_type["color"] = directive_data["color"] + if "style" in directive_data: + one_type["style"] = directive_data["style"] + + # Store mandatory_options and optional_options directly as a dict + mandatory_options = cast( + dict[str, Any], directive_data.get("mandatory_options", {}) + ) + one_type["mandatory_options"] = mandatory_options + tags = cast(list[str], directive_data.get("tags", [])) + one_type["tags"] = tags + parts = cast(int, directive_data.get("parts", 3)) + one_type["parts"] = parts + + optional_options = cast( + dict[str, Any], directive_data.get("optional_options", {}) + ) + optional_options.update(global_base_options_optional_opts) + one_type["opt_opt"] = optional_options + + all_options.update(list(mandatory_options.keys())) + all_options.update(list(optional_options.keys())) + + # mandatory_links => "req_link" + mand_links_yaml = cast( + dict[str, Any], directive_data.get("mandatory_links", {}) + ) + if mand_links_yaml: + one_type["req_link"] = [ + (cast(str, k), cast(Any, v)) for k, v in mand_links_yaml.items() + ] + + # optional_links => "opt_link" + opt_links_yaml = cast(dict[str, Any], directive_data.get("optional_links", {})) + if opt_links_yaml: + one_type["opt_link"] = [ + (cast(str, k), cast(Any, v)) for k, v in opt_links_yaml.items() + ] + + needs_types_list.append(one_type) + + # Convert "links" dict -> list of {"option", "incoming", "outgoing"} + needs_extra_links_list: list[dict[str, str]] = [] + for link_option, link_data in links_dict.items(): + link_option = cast(str, link_option) + link_data = cast(dict[str, Any], link_data) + needs_extra_links_list.append( + { + "option": link_option, + "incoming": link_data.get("incoming", ""), + "outgoing": link_data.get("outgoing", ""), + } + ) + + # We have to remove all 'default options' from the extra options. + # As otherwise sphinx errors, due to an option being registered twice. + # They are still inside the extra options we extract to enable + # constraint checking via regex + needs_extra_options: list[str] = sorted(all_options - set(default_options_list)) + + return MetaModelData( + needs_types=needs_types_list, + needs_extra_links=needs_extra_links_list, + needs_extra_options=needs_extra_options, + prohibited_words_checks=prohibited_words_checks, + needs_graph_check=graph_check_dict, + ) From d3cb0599748a89d0c6714553cde96103d4603e99 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Mon, 15 Sep 2025 22:33:33 +0200 Subject: [PATCH 133/231] fix version switcher (#251) --- src/extensions/score_layout/assets/css/score.css | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/extensions/score_layout/assets/css/score.css b/src/extensions/score_layout/assets/css/score.css index 191376e4..a3b4fb91 100644 --- a/src/extensions/score_layout/assets/css/score.css +++ b/src/extensions/score_layout/assets/css/score.css @@ -12,9 +12,14 @@ html { The base color is TEAL */ -/* TODO: Does not work as intended */ -.version-switcher__container[data-theme="light"] { - background-color: #a382c5; +/* Version Switcher Button */ +button.btn.version-switcher__button { + color: #FFF; +} + +/* Version Switcher Menu */ +.version-switcher__menu a.list-group-item { + color: #FFF; } html[data-theme="light"] { From ceae78a920e0d70caaae93ef7dd964f6faa0a7ac Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 16 Sep 2025 10:09:12 +0200 Subject: [PATCH 134/231] refactor: improve internal metamodel storage (#252) --- src/extensions/score_metamodel/__init__.py | 37 ++--- .../score_metamodel/checks/check_options.py | 83 ++++------ src/extensions/score_metamodel/log.py | 15 +- .../score_metamodel/metamodel_types.py | 4 + .../score_metamodel/tests/__init__.py | 4 +- .../tests/test_check_options.py | 142 +++--------------- .../tests/test_metamodel_load.py | 10 +- src/extensions/score_metamodel/yaml_parser.py | 73 ++++----- 8 files changed, 107 insertions(+), 261 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index b287b4a6..fc0b7590 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -143,17 +143,18 @@ def is_check_enabled(check: local_check_function | graph_check_function): logger.debug(f"Running graph check {check} for all needs") check(app, needs_all_needs, log) - if log.has_warnings: - logger.warning("Some needs have issues. See the log for more information.") + if log.warnings: + logger.warning( + f"{log.warnings} needs have issues. See the log for more information." + ) - if log.has_infos: + if log.infos: log.flush_new_checks() logger.info( - "\n\nThese next warnings are displayed as info statements for now. " - "They will become real warnings in the future. " + f"\nThe {log.infos} warnings above are non fatal for now. " + "They will become fatal in the future. " "Please fix them as soon as possible.\n" ) - # TODO: exit code def _remove_prefix(word: str, prefixes: list[str]) -> str: @@ -163,16 +164,11 @@ def _remove_prefix(word: str, prefixes: list[str]) -> str: return word -def _get_need_type_for_need(app: Sphinx, need: NeedsInfoType): - need_type = None +def _get_need_type_for_need(app: Sphinx, need: NeedsInfoType) -> ScoreNeedType: for nt in app.config.needs_types: - try: - if nt["directive"] == need["type"]: - need_type = nt - break - except Exception: - continue - return need_type + if nt["directive"] == need["type"]: + return nt + raise ValueError(f"Need type {need['type']} not found in needs_types") def _validate_external_need_opt_links( @@ -224,15 +220,10 @@ def _check_external_optional_link_patterns(app: Sphinx, log: CheckLogger) -> Non for need in needs_external_needs.values(): need_type = _get_need_type_for_need(app, need) - if not need_type: - continue - - opt_links = dict(need_type.get("opt_link", [])) - if not opt_links: - continue - allowed_prefixes = app.config.allowed_external_prefixes - _validate_external_need_opt_links(need, opt_links, allowed_prefixes, log) + if opt_links := need_type["optional_links"]: + allowed_prefixes = app.config.allowed_external_prefixes + _validate_external_need_opt_links(need, opt_links, allowed_prefixes, log) def setup(app: Sphinx) -> dict[str, str | bool]: diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index e962c58d..8ac26423 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -21,9 +21,6 @@ from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType -FieldCheck = tuple[dict[str, str], bool] -CheckingDictType = dict[str, list[FieldCheck]] - def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeedType: for need_type in needs_types: @@ -142,41 +139,28 @@ def check_options( """ production_needs_types = app.config.needs_types - try: - need_options = get_need_type(production_needs_types, need["type"]) - except ValueError: - log.warning_for_option(need, "type", "no type info defined for semantic check.") - return - - if not need_options.get("mandatory_options", {}): - log.warning_for_option(need, "type", "no type info defined for semantic check.") - return - - # Validate Options and Links - checking_dict: CheckingDictType = { - "option": [ - (need_options.get("mandatory_options", {}), True), - (need_options.get("opt_opt", {}), False), - ], - "link": [ - (dict(need_options.get("req_link", [])), True), - (dict(need_options.get("opt_link", [])), False), - ], - } + need_options = get_need_type(production_needs_types, need["type"]) # If undefined this is an empty list allowed_prefixes = app.config.allowed_external_prefixes - for field_type, check_fields in checking_dict.items(): - for field_values, is_required in check_fields: - validate_fields( - need, - log, - field_values, - required=is_required, - field_type=field_type, - allowed_prefixes=allowed_prefixes, - ) + # Validate Options and Links + field_validations = [ + ("option", need_options["mandatory_options"], True), + ("option", need_options["optional_options"], False), + ("link", need_options["mandatory_links"], True), + ("link", need_options["optional_links"], False), + ] + + for field_type, field_values, is_required in field_validations: + validate_fields( + need, + log, + field_values, + required=is_required, + field_type=field_type, + allowed_prefixes=allowed_prefixes, + ) @local_check @@ -193,25 +177,18 @@ def check_extra_options( production_needs_types = app.config.needs_types default_options_list = default_options() - try: - need_options = get_need_type(production_needs_types, need["type"]) - except ValueError: - msg = "no type info defined for semantic check." - log.warning_for_option(need, "type", msg) - return - - required_options: dict[str, str] = need_options.get("mandatory_options", {}) - optional_options: dict[str, str] = need_options.get("opt_opt", {}) - required_links: list[str] = [x[0] for x in need_options.get("req_link", ())] - optional_links: list[str] = [x[0] for x in need_options.get("opt_link", ())] - - allowed_options = ( - list(required_options.keys()) - + list(optional_options.keys()) - + required_links - + optional_links - + default_options_list - ) + need_options = get_need_type(production_needs_types, need["type"]) + + # list() creates a copy to avoid modifying the original + allowed_options = list(default_options_list) + + for o in ( + "mandatory_options", + "optional_options", + "mandatory_links", + "optional_links", + ): + allowed_options.extend(need_options[o].keys()) extra_options = [ option diff --git a/src/extensions/score_metamodel/log.py b/src/extensions/score_metamodel/log.py index 8c101cbb..d2953dec 100644 --- a/src/extensions/score_metamodel/log.py +++ b/src/extensions/score_metamodel/log.py @@ -90,12 +90,12 @@ def warning( self._log.warning(msg, type="score_metamodel", location=location) @property - def has_warnings(self): - return self._warning_count > 0 + def warnings(self): + return self._warning_count @property - def has_infos(self): - return self._info_count > 0 + def infos(self): + return self._info_count def flush_new_checks(self): """Log all new-check messages together at once.""" @@ -108,14 +108,11 @@ def make_header_line(text: str, width: int = 80) -> str: if not self._new_checks: return - info_header = make_header_line("[INFO MESSAGE]") - separator = "=" * 80 warning_header = make_header_line( - f"[New Checks] has {len(self._new_checks)} warnings" + f"{len(self._new_checks)} non-fatal warnings " + "(will become fatal in the future)" ) - logger.info(info_header) - logger.info(separator) logger.info(warning_header) for msg, location in self._new_checks: diff --git a/src/extensions/score_metamodel/metamodel_types.py b/src/extensions/score_metamodel/metamodel_types.py index 95449fd6..be26ed2e 100644 --- a/src/extensions/score_metamodel/metamodel_types.py +++ b/src/extensions/score_metamodel/metamodel_types.py @@ -28,3 +28,7 @@ class ProhibitedWordCheck: class ScoreNeedType(NeedType): tags: list[str] parts: int + mandatory_options: dict[str, str] + optional_options: dict[str, str] + mandatory_links: dict[str, str] + optional_links: dict[str, str] diff --git a/src/extensions/score_metamodel/tests/__init__.py b/src/extensions/score_metamodel/tests/__init__.py index c675ba4a..839efe2b 100644 --- a/src/extensions/score_metamodel/tests/__init__.py +++ b/src/extensions/score_metamodel/tests/__init__.py @@ -31,14 +31,14 @@ def __init__(self): super().__init__(self._mock_logger, app_path) def assert_no_warnings(self): - if self.has_warnings: + if self.warnings: warnings = "\n".join( f"* {call}" for call in self._mock_logger.warning.call_args_list ) pytest.fail(f"Expected no warnings, but got:\n{warnings}") def assert_no_infos(self): - if self.has_infos: + if self.infos: infos = "\n".join( f"* {call}" for call in self._mock_logger.info.call_args_list ) diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index dba8e015..1a072386 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -11,12 +11,12 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from typing import TypedDict, cast +from typing import cast from unittest.mock import Mock import pytest from attribute_plugin import add_test_properties # type: ignore[import-untyped] -from score_metamodel import CheckLogger +from score_metamodel import CheckLogger, ScoreNeedType from score_metamodel.checks.check_options import ( check_extra_options, check_options, @@ -25,49 +25,39 @@ from sphinx.application import Sphinx # type: ignore[import-untyped] -class NeedTypeDict(TypedDict, total=False): - directive: str - mandatory_options: dict[str, str | int] | None - opt_opt: dict[str, str] | None - - class TestCheckOptions: - NEED_TYPE_INFO: list[NeedTypeDict] = [ + NEED_TYPE_INFO: list[ScoreNeedType] = [ { + "title": "Test Type", + "prefix": "TR", + "tags": [], + "parts": 1, "directive": "tool_req", "mandatory_options": { "id": "^tool_req__.*$", "some_required_option": "^some_value__.*$", }, + "optional_options": {}, + "mandatory_links": {}, + "optional_links": {}, } ] - NEED_TYPE_INFO_WITH_OPT_OPT: list[NeedTypeDict] = [ + NEED_TYPE_INFO_WITH_OPT_OPT: list[ScoreNeedType] = [ { + "title": "Test Type", + "prefix": "TR", + "tags": [], + "parts": 1, "directive": "tool_req", "mandatory_options": { "id": "^tool_req__.*$", "some_required_option": "^some_value__.*$", }, - "opt_opt": { + "optional_options": { "some_optional_option": "^some_value__.*$", }, - } - ] - - NEED_TYPE_INFO_WITHOUT_MANDATORY_OPTIONS: list[NeedTypeDict] = [ - { - "directive": "workflow", - "mandatory_options": None, - }, - ] - - NEED_TYPE_INFO_WITH_INVALID_OPTION_TYPE: list[NeedTypeDict] = [ - { - "directive": "workflow", - "mandatory_options": { - "id": "^wf_req__.*$", - "some_invalid_option": 42, - }, + "mandatory_links": {}, + "optional_links": {}, } ] @@ -91,101 +81,9 @@ def test_unknown_directive(self): app = Mock(spec=Sphinx) app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO - # Expect that the checks pass - check_options(app, need_1, cast(CheckLogger, logger)) - logger.assert_warning( - "no type info defined for semantic check.", - expect_location=False, - ) - @add_test_properties( - partially_verifies=["tool_req__docs_metamodel"], - test_type="requirements-based", - derivation_technique="requirements-analysis", - ) - def test_unknown_directive_extra_option(self): - """Given a need an unknown/undefined type, should raise an error""" - need_1 = need( - target_id="tool_req__001", - type="unknown_type", - id="tool_req__001", - some_required_option="some_value__001", - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO - # Expect that the checks pass - check_extra_options(app, need_1, cast(CheckLogger, logger)) - logger.assert_warning( - "no type info defined for semantic check.", - expect_location=False, - ) - - @add_test_properties( - partially_verifies=["tool_req__docs_metamodel"], - test_type="requirements-based", - derivation_technique="requirements-analysis", - ) - def test_missing_mandatory_options_info(self): - """ - Given any need of known type with missing mandatory options info - it should raise an error - """ - need_1 = need( - target_id="wf_req__001", - id="wf_req__001", - type="workflow", - some_required_option=None, - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO_WITHOUT_MANDATORY_OPTIONS - app.config.allowed_external_prefixes = [] - # Expect that the checks pass - check_options(app, need_1, cast(CheckLogger, logger)) - logger.assert_warning( - "no type info defined for semantic check.", - expect_location=False, - ) - - @add_test_properties( - partially_verifies=["tool_req__docs_metamodel"], - test_type="requirements-based", - derivation_technique="requirements-analysis", - ) - def test_invalid_option_type(self): - """ - Given any need of known type with missing mandatory options info - it should raise an error - """ - need_1 = need( - target_id="wf_req__001", - id="wf_req__001", - type="workflow", - some_invalid_option="42", - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO_WITH_INVALID_OPTION_TYPE - app.config.allowed_external_prefixes = [] - # Expect that the checks pass - check_options(app, need_1, cast(CheckLogger, logger)) - logger.assert_warning( - "pattern `42` is not a valid regex pattern.", - expect_location=False, - ) + with pytest.raises(ValueError): + check_options(app, need_1, cast(CheckLogger, logger)) @add_test_properties( partially_verifies=["tool_req__docs_metamodel"], diff --git a/src/extensions/score_metamodel/tests/test_metamodel_load.py b/src/extensions/score_metamodel/tests/test_metamodel_load.py index 2a166675..42381754 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel_load.py +++ b/src/extensions/score_metamodel/tests/test_metamodel_load.py @@ -37,16 +37,16 @@ def test_load_metamodel_data(): assert result.needs_types[0]["directive"] == "type1" assert result.needs_types[0]["title"] == "Type 1" assert result.needs_types[0]["prefix"] == "T1" - assert result.needs_types[0]["color"] == "blue" - assert result.needs_types[0]["style"] == "bold" + assert result.needs_types[0].get("color") == "blue" + assert result.needs_types[0].get("style") == "bold" assert result.needs_types[0]["mandatory_options"] == {"opt1": "value1"} - assert result.needs_types[0]["opt_opt"] == { + assert result.needs_types[0]["optional_options"] == { "opt2": "value2", "opt3": "value3", "global_opt": "global_value", } - assert result.needs_types[0]["req_link"] == [("link1", "value1")] - assert result.needs_types[0]["opt_link"] == [("link2", "value2")] + assert result.needs_types[0]["mandatory_links"] == {"link1": "value1"} + assert result.needs_types[0]["optional_links"] == {"link2": "value2"} assert len(result.needs_extra_links) == 1 assert result.needs_extra_links[0] == { diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py index d5273974..de26850a 100644 --- a/src/extensions/score_metamodel/yaml_parser.py +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -19,14 +19,17 @@ from ruamel.yaml import YAML from sphinx_needs import logging -from src.extensions.score_metamodel.metamodel_types import ProhibitedWordCheck +from src.extensions.score_metamodel.metamodel_types import ( + ProhibitedWordCheck, + ScoreNeedType, +) logger = logging.get_logger(__name__) @dataclass class MetaModelData: - needs_types: list[dict[str, object]] + needs_types: list[ScoreNeedType] needs_extra_links: list[dict[str, str]] needs_extra_options: list[str] prohibited_words_checks: list[ProhibitedWordCheck] @@ -102,11 +105,7 @@ def load_metamodel_data() -> MetaModelData: with open(yaml_path, encoding="utf-8") as f: data = cast(dict[str, Any], yaml.load(f)) - # Access the custom validation block - - types_dict = cast(dict[str, Any], data.get("needs_types", {})) - links_dict = cast(dict[str, Any], data.get("needs_extra_links", {})) - graph_check_dict = cast(dict[str, Any], data.get("graph_checks", {})) + # Some options are globally enabled for all types global_base_options = cast(dict[str, Any], data.get("needs_types_base_options", {})) global_base_options_optional_opts = cast( dict[str, Any], global_base_options.get("optional_options", {}) @@ -125,14 +124,23 @@ def load_metamodel_data() -> MetaModelData: needs_types_list = [] all_options: set[str] = set() + types_dict = cast(dict[str, Any], data.get("needs_types", {})) for directive_name, directive_data in types_dict.items(): - directive_name = cast(str, directive_name) - directive_data = cast(dict[str, Any], directive_data) + assert isinstance(directive_name, str) + assert isinstance(directive_data, dict) + # Build up a single "needs_types" item - one_type: dict[str, Any] = { + one_type: ScoreNeedType = { "directive": directive_name, - "title": directive_data.get("title", ""), - "prefix": directive_data.get("prefix", ""), + "title": directive_data["title"], + "prefix": directive_data["prefix"], + "tags": directive_data.get("tags", []), + "parts": directive_data.get("parts", 3), + "mandatory_options": directive_data.get("mandatory_options", {}), + "optional_options": directive_data.get("optional_options", {}) + | global_base_options_optional_opts, + "mandatory_links": directive_data.get("mandatory_links", {}), + "optional_links": directive_data.get("optional_links", {}), } if "color" in directive_data: @@ -140,45 +148,14 @@ def load_metamodel_data() -> MetaModelData: if "style" in directive_data: one_type["style"] = directive_data["style"] - # Store mandatory_options and optional_options directly as a dict - mandatory_options = cast( - dict[str, Any], directive_data.get("mandatory_options", {}) - ) - one_type["mandatory_options"] = mandatory_options - tags = cast(list[str], directive_data.get("tags", [])) - one_type["tags"] = tags - parts = cast(int, directive_data.get("parts", 3)) - one_type["parts"] = parts - - optional_options = cast( - dict[str, Any], directive_data.get("optional_options", {}) - ) - optional_options.update(global_base_options_optional_opts) - one_type["opt_opt"] = optional_options - - all_options.update(list(mandatory_options.keys())) - all_options.update(list(optional_options.keys())) - - # mandatory_links => "req_link" - mand_links_yaml = cast( - dict[str, Any], directive_data.get("mandatory_links", {}) - ) - if mand_links_yaml: - one_type["req_link"] = [ - (cast(str, k), cast(Any, v)) for k, v in mand_links_yaml.items() - ] - - # optional_links => "opt_link" - opt_links_yaml = cast(dict[str, Any], directive_data.get("optional_links", {})) - if opt_links_yaml: - one_type["opt_link"] = [ - (cast(str, k), cast(Any, v)) for k, v in opt_links_yaml.items() - ] - needs_types_list.append(one_type) + all_options.update(set(one_type["mandatory_options"].keys())) + all_options.update(set(one_type["optional_options"].keys())) + # Convert "links" dict -> list of {"option", "incoming", "outgoing"} needs_extra_links_list: list[dict[str, str]] = [] + links_dict = cast(dict[str, Any], data.get("needs_extra_links", {})) for link_option, link_data in links_dict.items(): link_option = cast(str, link_option) link_data = cast(dict[str, Any], link_data) @@ -196,6 +173,8 @@ def load_metamodel_data() -> MetaModelData: # constraint checking via regex needs_extra_options: list[str] = sorted(all_options - set(default_options_list)) + graph_check_dict = cast(dict[str, Any], data.get("graph_checks", {})) + return MetaModelData( needs_types=needs_types_list, needs_extra_links=needs_extra_links_list, From 5f22e5fa49898b528cffb73337c69b04b37c3097 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 17 Sep 2025 12:27:33 +0200 Subject: [PATCH 135/231] drop id and prefix from metamodel.yaml when they are redundant (#254) --- docs/internals/extensions/metamodel.md | 37 ++++-- .../score_metamodel/checks/check_options.py | 64 ++++------ src/extensions/score_metamodel/metamodel.yaml | 120 +++--------------- .../tests/test_check_options.py | 25 ---- .../tests/test_metamodel_load.py | 6 +- src/extensions/score_metamodel/yaml_parser.py | 7 +- 6 files changed, 83 insertions(+), 176 deletions(-) diff --git a/docs/internals/extensions/metamodel.md b/docs/internals/extensions/metamodel.md index c14a91dc..e02201b1 100644 --- a/docs/internals/extensions/metamodel.md +++ b/docs/internals/extensions/metamodel.md @@ -1,7 +1,7 @@ (metamodel)= # score_metamodel -The `score_metamodel` extension is a core extension/component of the Docs-As-Code. +The `score_metamodel` extension is a core extension/component of the Docs-As-Code. It provides metamodel definitions, validation checks, and project layout management for Sphinx documentation. ## Overview @@ -33,7 +33,7 @@ The extension implements a multi-tier checking system: - Require access to the complete needs graph - Examples: Link validation, dependency checking, cross-reference verification -This extension comes with Docs-As-Code. +This extension comes with Docs-As-Code. Add `score_metamodel` to your extensions in `conf.py`: ```python @@ -44,6 +44,23 @@ extensions = [ ] ``` +## need types + +Each type of needs is defined in the `needs_types` section of the `metamodel.yaml` file. Each need type has attributes, links, tags, and other properties that define its structure and behavior within the documentation system. + +Each need type is introduced via `:` followed by its properties indented under it. + +Properties: +- **title**: The title of the need type. +- **prefix**: A unique prefix used to identify the need type. Default is the type name followed by `__`. +- **mandatory_options**: A list of mandatory options that must be provided for the need type. + `id` is worth mentioning as it is automatically included and must be unique. Default is the prefix followed by `[a-z0-9_]`. +- **optional_options**: A list of optional options that can be provided for the need type. +- **mandatory_links**: A list of mandatory links to other need types that must be included. +- **optional_links**: A list of optional links to other need types that can be included. +- **tags**: A list of tags associated with the need type. +- **parts**: The number of parts (separated by `__`) within the need ID. + ## Creating New Validation Checks The extension automatically discovers checks from the `checks/` directory and the metamodel.yaml config. There are several types of checks you can implement: @@ -73,23 +90,23 @@ needs_types: ``` ### 2. Generic Graph Checks (Configuration-Based) -Generic graph checks are defined in the metamodel.yaml under `graph_checks`. +Generic graph checks are defined in the metamodel.yaml under `graph_checks`. These checks all follow the same structure: ```yaml : needs: - include: , #list of your needs + include: , #list of your needs condition: check: : - explanation:
``` > *Note:* You can also use multiple conditions or negate conditions in either the needs or check part. -A complete example might look like so: +A complete example might look like so: ```yaml graph_checks: @@ -101,14 +118,14 @@ graph_checks: - safety != QM - status == valid check: - implements: + implements: and: - safety != QM - status == valid explanation: An safety architecture element can only link other safety architecture elements. ``` -What does this check do? +What does this check do? This check will go through each of the needs mentioned in 'include' that match the condition, and then for every single one of them check the needs that are linked inside the 'implements' attribute. Go inside those needs and check if they also fulfill the condition described. If one of them does not fulfill the condition the check fails and will let you know with a warning that it did so. @@ -126,7 +143,7 @@ prohibited_words_checks: - < word to forbid > ``` -An example might look like this: +An example might look like this: ```yaml prohibited_words_checks: content_check: @@ -201,7 +218,7 @@ score_metamodel/ ├── __init__.py ├── rst │ ├── attributes - │ │ └── ... + │ │ └── ... │ ├── conf.py │ ├── graph │ │ └── test_metamodel_graph.rst diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 8ac26423..c2a4719a 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -46,28 +46,18 @@ def _validate_value_pattern( pattern: str, need: NeedsInfoType, field: str, - log: CheckLogger, - as_info: bool = False, -) -> None: +): """Check if a value matches the given pattern and log the result. - If ``as_info`` is True, mismatches are reported as info (non-failing) - messages, otherwise as warnings. + Returns true if the value matches the pattern, False otherwise. """ try: - if not re.match(pattern, value): - log.warning_for_option( - need, - field, - f"does not follow pattern `{pattern}`.", - is_new_check=as_info, - ) - except TypeError: - log.warning_for_option( - need, - field, - f"pattern `{pattern}` is not a valid regex pattern.", - ) + return re.match(pattern, value) is not None + except TypeError as e: + raise TypeError( + f"Error in metamodel.yaml at {need['type']}->{field}: " + f"pattern `{pattern}` is not a valid regex pattern." + ) from e def validate_fields( @@ -102,23 +92,21 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: log.warning_for_need( need, f"is missing required {field_type}: `{field}`." ) - continue # Skip empty optional fields - # Try except used to add more context to Error without passing variables - # just for that to function - try: - values = _normalize_values(raw_value) - except ValueError as err: - raise ValueError( - f"An Attribute inside need {need['id']} is " - "not of type str. Only Strings are allowed" - ) from err - # The filter ensures that the function is only called when needed. + continue # Nothing to validate if not present + + values = _normalize_values(raw_value) + for value in values: if allowed_prefixes: value = remove_prefix(value, allowed_prefixes) - _validate_value_pattern( - value, pattern, need, field, log, as_info=optional_link_as_info - ) + if not _validate_value_pattern(value, pattern, need, field): + msg = f"does not follow pattern `{pattern}`." + log.warning_for_option( + need, + field, + msg, + is_new_check=optional_link_as_info, + ) # req-Id: tool_req__docs_req_attr_reqtype @@ -137,19 +125,17 @@ def check_options( Checks that required and optional options and links are present and follow their defined patterns. """ - production_needs_types = app.config.needs_types - - need_options = get_need_type(production_needs_types, need["type"]) + need_type = get_need_type(app.config.needs_types, need["type"]) # If undefined this is an empty list allowed_prefixes = app.config.allowed_external_prefixes # Validate Options and Links field_validations = [ - ("option", need_options["mandatory_options"], True), - ("option", need_options["optional_options"], False), - ("link", need_options["mandatory_links"], True), - ("link", need_options["optional_links"], False), + ("option", need_type["mandatory_options"], True), + ("option", need_type["optional_options"], False), + ("link", need_type["mandatory_links"], True), + ("link", need_type["optional_links"], False), ] for field_type, field_values, is_required in field_validations: diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 84c3e77d..ee19fc16 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -49,47 +49,45 @@ prohibited_words_checks: - absolutely needs_types: + # See metamodel.md for how to define a new need type + ############################################################################## # Process Metamodel ############################################################################## # TSF tsf: - title: "TSF" - prefix: "tsf__" + title: TSF mandatory_options: - id: "^tsf__[0-9a-zA-Z_-]*$" - status: "^(draft|valid)$" + id: ^tsf__[0-9a-zA-Z_-]*$ + status: ^(draft|valid)$ optional_links: - links: "^.*$" + links: ^.*$ parts: 3 tenet: - title: "Tenet" - prefix: "tenet__" + title: Tenet mandatory_options: - id: "^tenet__[0-9a-zA-Z_-]*$" - status: "^(draft|valid)$" + id: ^tenet__[0-9a-zA-Z_-]*$ + status: ^(draft|valid)$ optional_links: links: "^.*$" parts: 3 assertion: - title: "Assertion" - prefix: "^assertion__" + title: Assertion mandatory_options: - id: "assertion__[0-9a-zA-Z_-]*$" - status: "^(draft|valid)$" + id: ^assertion__[0-9a-zA-Z_-]*$ + status: ^(draft|valid)$ optional_links: - links: "^.*$" + links: ^.*$ parts: 3 # Standard Requirement and Work Product # req-Id: tool_req__docs_stdreq_types std_req: title: Standard Requirement - prefix: std_req__ mandatory_options: - id: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-zA-Z_-]*$ + id: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-zA-Z_-]*$ status: ^(valid)$ optional_links: links: ^.*$ @@ -97,9 +95,8 @@ needs_types: std_wp: title: Standard Work Product - prefix: std_wp__ mandatory_options: - id: std_wp__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-z_]*$ + id: ^std_wp__(iso26262|isosae21434|isopas8926|aspice_40)__[0-9a-z_]*$ status: ^(valid)$ parts: 3 @@ -109,7 +106,6 @@ needs_types: title: Workflow prefix: wf__ mandatory_options: - id: ^wf__[0-9a-z_]*$ status: ^(valid|draft)$ mandatory_links: input: ^wp__.*$ @@ -125,9 +121,7 @@ needs_types: # req-Id: tool_req__docs_req_types gd_req: title: Process Requirements - prefix: gd_req__ mandatory_options: - id: ^gd_req__[0-9a-z_]*$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|draft)$ content: ^[\s\S]+$ @@ -142,9 +136,7 @@ needs_types: gd_temp: title: Process Template - prefix: gd_temp__ mandatory_options: - id: ^gd_temp__[0-9a-z_]*$ status: ^(valid|draft)$ optional_links: complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ @@ -152,32 +144,27 @@ needs_types: gd_chklst: title: Process Checklist - prefix: gd_chklst__ mandatory_options: - id: ^gd_chklst__[0-9a-z_]*$ status: ^(valid|draft)$ optional_links: - complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ + complies: ^std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ parts: 2 gd_guidl: title: Process Guideline - prefix: gd_guidl__ mandatory_options: - id: ^gd_guidl__[0-9a-z_]*$ status: ^(valid|draft)$ optional_links: - complies: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ parts: 2 gd_method: title: Process Method prefix: gd_meth__ mandatory_options: - id: ^gd_meth__[0-9a-z_]*$ status: ^(valid|draft)$ optional_links: - complies: std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ parts: 2 # S-CORE Workproduct @@ -185,18 +172,15 @@ needs_types: title: Workproduct prefix: wp__ mandatory_options: - id: ^wp__[0-9a-z_]*$ status: ^(valid|draft)$ optional_links: - complies: std_(wp__iso26262|wp__isosae21434|wp__isopas8926|iic_aspice_40)__.*$ + complies: ^std_(wp__iso26262|wp__isosae21434|wp__isopas8926|iic_aspice_40)__.*$ parts: 2 # Role role: title: Role prefix: rl__ - mandatory_options: - id: ^rl__[0-9a-z_]*$ optional_links: contains: ^rl__.*$ parts: 2 @@ -204,17 +188,13 @@ needs_types: # Documents, process_description only doc_concept: title: Concept Definition - prefix: doc_concept__ mandatory_options: - id: ^doc_concept__[0-9a-z_]*$ status: ^(valid|draft)$ parts: 2 doc_getstrt: - title: Getting Startet - prefix: doc_getstrt__ + title: Getting Started mandatory_options: - id: ^doc_getstrt__[0-9a-z_]*$ status: ^(valid|draft)$ parts: 2 @@ -223,7 +203,6 @@ needs_types: title: Generic Document prefix: doc__ mandatory_options: - id: ^doc__[0-9a-z_]*$ status: ^(valid|draft|invalid)$ optional_options: safety: "^(QM|ASIL_B)$" @@ -239,9 +218,7 @@ needs_types: # req-Id: tool_req__docs_doc_types doc_tool: title: Tool Verification Report - prefix: doc_tool__ mandatory_options: - id: ^doc_tool__[0-9a-z_]*$ # req-Id: tool_req__docs_tvr_status status: ^(draft|evaluated|qualified|released|rejected)$ version: ^.*$ @@ -263,9 +240,7 @@ needs_types: # req-Id: tool_req__docs_req_types stkh_req: title: Stakeholder Requirement - prefix: stkh_req__ mandatory_options: - id: ^stkh_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_safety @@ -294,10 +269,8 @@ needs_types: # req-Id: tool_req__docs_req_types feat_req: title: Feature Requirement - prefix: feat_req__ style: node mandatory_options: - id: ^feat_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_security @@ -326,9 +299,7 @@ needs_types: # req-Id: tool_req__docs_req_types comp_req: title: Component Requirement - prefix: comp_req__ mandatory_options: - id: ^comp_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_security @@ -357,9 +328,7 @@ needs_types: # req-Id: tool_req__docs_req_types tool_req: title: Tool Requirement - prefix: tool_req__ mandatory_options: - id: ^tool_req__[0-9a-z_]*$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -390,9 +359,7 @@ needs_types: # req-Id: tool_req__docs_req_types aou_req: title: Assumption of Use Requirement - prefix: aou_req__ mandatory_options: - id: ^aou_req__[0-9a-z_]*$ # req-Id: tool_req__docs_req_attr_reqtype reqtype: ^(Functional|Interface|Process|Non-Functional)$ # req-Id: tool_req__docs_common_attr_security @@ -421,11 +388,9 @@ needs_types: # req-Id: tool_req__docs_arch_views feat_arc_sta: title: Feature & Feature Package Diagram - prefix: feat_arc_sta__ color: #FEDCD2 style: card mandatory_options: - id: ^feat_arc_sta__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -445,11 +410,9 @@ needs_types: # req-Id: tool_req__docs_arch_views feat_arc_dyn: title: Feature Sequence Diagram - prefix: feat_arc_dyn__ color: #FEDCD2 style: card mandatory_options: - id: ^feat_arc_dyn__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -468,11 +431,9 @@ needs_types: # req-Id: tool_req__docs_arch_views logic_arc_int: title: Logical Interface & Feature Interface View - prefix: logic_arc_int__ color: #FEDCD2 style: card mandatory_options: - id: ^logic_arc_int__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -491,11 +452,9 @@ needs_types: # req-Id: tool_req__docs_arch_types logic_arc_int_op: title: Logical Interface Operation - prefix: logic_arc_int_op__ color: #FEDCD2 style: card mandatory_options: - id: ^logic_arc_int_op__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -512,11 +471,8 @@ needs_types: # req-Id: tool_req__docs_arch_views mod_view_sta: title: Module Architecture Static View - prefix: mod_view_sta__ color: #FEDCD2 style: card - mandatory_options: - id: ^mod_view_sta__[0-9a-z_]+$ mandatory_links: includes: ^comp_arc_sta__.+$ tags: @@ -526,11 +482,8 @@ needs_types: # No process requirement mod_view_dyn: title: Module Architecture Dynamic View - prefix: mod_view_dyn__ color: #FEDCD2 style: card - mandatory_options: - id: ^mod_view_dyn__[0-9a-z_]+$ parts: 3 # Architecture Element & View @@ -538,11 +491,9 @@ needs_types: # req-Id: tool_req__docs_arch_views comp_arc_sta: title: Component & Component Package Diagram - prefix: comp_arc_sta__ color: #FEDCD2 style: card mandatory_options: - id: ^comp_arc_sta__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -563,11 +514,9 @@ needs_types: # req-Id: tool_req__docs_arch_views comp_arc_dyn: title: Component Sequence Diagram - prefix: comp_arc_dyn__ color: #FEDCD2 style: card mandatory_options: - id: ^comp_arc_dyn__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -586,11 +535,9 @@ needs_types: # req-Id: tool_req__docs_arch_views real_arc_int: title: Interface & Component Interface - prefix: real_arc_int__ color: #FEDCD2 style: card mandatory_options: - id: ^real_arc_int__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -609,11 +556,9 @@ needs_types: # req-Id: tool_req__docs_arch_types real_arc_int_op: title: Interface Operation - prefix: real_arc_int_op__ color: #FEDCD2 style: card mandatory_options: - id: ^real_arc_int_op__[0-9a-z_]+$ # req-Id: tool_req__docs_common_attr_security security: ^(YES|NO)$ # req-Id: tool_req__docs_common_attr_safety @@ -631,10 +576,8 @@ needs_types: # - architecture end - review_header: - prefix: review__header title: Review Header mandatory_options: - id: ^review__header__[0-9a-z_]*$ reviewers: ^.*$ approvers: ^.*$ hash: ^.*$ @@ -644,11 +587,9 @@ needs_types: # Implementation dd_sta: title: Static detailed design - prefix: dd_sta__ color: #FEDCD2 style: card mandatory_options: - id: ^dd_sta__[0-9a-z_]*$ security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ @@ -661,11 +602,9 @@ needs_types: dd_dyn: title: Dynamic detailed design - prefix: dd_dyn__ color: #FEDCD2 style: card mandatory_options: - id: ^dd_dyn__[0-9a-z_]*$ security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ @@ -676,9 +615,7 @@ needs_types: sw_unit: title: Software unit - prefix: sw_unit__ mandatory_options: - id: ^sw_unit__[0-9a-z_]*$ security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ @@ -686,11 +623,9 @@ needs_types: sw_unit_int: title: Software unit interfaces - prefix: sw_unit_int__ color: #FEDCD2 style: card mandatory_options: - id: ^sw_unit_int__[0-9a-z_]*$ security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ @@ -700,9 +635,7 @@ needs_types: # No requirement!! plat_saf_dfa: title: Feature Dependent Failure Analysis - prefix: plat_saf_dfa__ mandatory_options: - id: ^plat_saf_dfa__[0-9a-z_]+$ failure_id: ^.*$ failure_effect: ^.*$ sufficient: ^(yes|no)$ @@ -719,9 +652,7 @@ needs_types: # req-Id: tool_req__docs_saf_types feat_saf_dfa: title: Feature DFA (Dependent Failure Analysis) - prefix: feat_saf_dfa__ mandatory_options: - id: ^feat_saf_dfa__[0-9a-z_]+$ # req-Id: tool_req__docs_saf_attr_dfa_failure_id failure_id: ^.*$ failure_effect: ^.*$ @@ -748,9 +679,7 @@ needs_types: # req-Id: tool_req__docs_saf_types comp_saf_dfa: title: Component DFA (Dependent Failure Analysis) - prefix: comp_saf_dfa__ mandatory_options: - id: ^comp_saf_dfa__[0-9a-z_]+$ # req-Id: tool_req__docs_saf_attr_dfa_failure_id failure_id: ^.*$ failure_effect: ^.*$ @@ -778,9 +707,7 @@ needs_types: # req-Id: tool_req__docs_saf_types feat_saf_fmea: title: Feature FMEA (Failure Mode and Effects Analysis) - prefix: feat_saf_fmea__ mandatory_options: - id: ^feat_saf_fmea__[0-9a-z_]+$ # req-Id: tool_req__docs_saf_attr_fmea_fault_id fault_id: ^.*$ failure_effect: ^.*$ @@ -807,9 +734,7 @@ needs_types: # req-Id: tool_req__docs_saf_types comp_saf_fmea: title: Component FMEA (Failure Mode and Effects Analysis) - prefix: comp_saf_fmea__ mandatory_options: - id: ^comp_saf_fmea__[0-9a-z_]+$ # req-Id: tool_req__docs_saf_attr_fmea_fault_id fault_id: ^.*$ failure_effect: ^.*$ @@ -833,9 +758,6 @@ needs_types: testcase: title: Testcase Needs parsed from test.xml files - prefix: testcase__ - mandatory_options: - id: ^testcase__ optional_options: name: ^.*$ file: ^.*$ @@ -851,9 +773,7 @@ needs_types: # https://eclipse-score.github.io/process_description/main/permalink.html?id=gd_temp__change_decision_record dec_rec: title: Decision Record - prefix: dec_rec__ mandatory_options: - id: ^dec_rec__.*__.*$ status: ^(proposed|accepted|deprecated|rejected|superseded)$ context: ^.*$ decision: ^.*$ diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 1a072386..09603931 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -118,28 +118,3 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): "has these extra options: `other_option`.", expect_location=False, ) - - @add_test_properties( - partially_verifies=["tool_req__docs_metamodel"], - test_type="requirements-based", - derivation_technique="requirements-analysis", - ) - def test_invalid_option_value_type_raises_value_error(self): - """Given a need with an option of wrong type (list with non-str)""" - need_1 = need( - target_id="tool_req__002", - id="tool_req__002", - type="tool_req", - some_required_option=123, - docname=None, - lineno=None, - ) - - logger = fake_check_logger() - app = Mock(spec=Sphinx) - app.config = Mock() - app.config.needs_types = self.NEED_TYPE_INFO - app.config.allowed_external_prefixes = [] - - with pytest.raises(ValueError, match="Only Strings are allowed"): # type: ignore[attr-defined] - check_options(app, need_1, cast(CheckLogger, logger)) diff --git a/src/extensions/score_metamodel/tests/test_metamodel_load.py b/src/extensions/score_metamodel/tests/test_metamodel_load.py index 42381754..3cb67965 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel_load.py +++ b/src/extensions/score_metamodel/tests/test_metamodel_load.py @@ -39,7 +39,11 @@ def test_load_metamodel_data(): assert result.needs_types[0]["prefix"] == "T1" assert result.needs_types[0].get("color") == "blue" assert result.needs_types[0].get("style") == "bold" - assert result.needs_types[0]["mandatory_options"] == {"opt1": "value1"} + assert result.needs_types[0]["mandatory_options"] == { + # default id pattern: prefix + digits, lowercase letters and underscores + "id": "^T1[0-9a-z_]+$", + "opt1": "value1", + } assert result.needs_types[0]["optional_options"] == { "opt2": "value2", "opt3": "value3", diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py index de26850a..b63dcc82 100644 --- a/src/extensions/score_metamodel/yaml_parser.py +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -133,7 +133,7 @@ def load_metamodel_data() -> MetaModelData: one_type: ScoreNeedType = { "directive": directive_name, "title": directive_data["title"], - "prefix": directive_data["prefix"], + "prefix": directive_data.get("prefix", f"{directive_name}__"), "tags": directive_data.get("tags", []), "parts": directive_data.get("parts", 3), "mandatory_options": directive_data.get("mandatory_options", {}), @@ -143,6 +143,11 @@ def load_metamodel_data() -> MetaModelData: "optional_links": directive_data.get("optional_links", {}), } + # Ensure ID regex is set + if "id" not in one_type["mandatory_options"]: + prefix = one_type["prefix"] + one_type["mandatory_options"]["id"] = f"^{prefix}[0-9a-z_]+$" + if "color" in directive_data: one_type["color"] = directive_data["color"] if "style" in directive_data: From 19de31b0822ab0718c29800c3d720c807e0941af Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Tue, 23 Sep 2025 12:05:34 +0200 Subject: [PATCH 136/231] ci: Integrate verify, test and build docs in same workflow (#255) --- .github/workflows/consumer_test.yml | 41 +++++++++++++++++--- .github/workflows/test_and_docs.yml | 58 +++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/test_and_docs.yml diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 38ed7562..bb7ab18a 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -13,10 +13,7 @@ name: Consumer Tests on: - pull_request: - types: [opened, reopened, synchronize] - merge_group: - types: [checks_requested] + workflow_call: jobs: test: @@ -34,12 +31,46 @@ jobs: run: | bazel run //:ide_support + - name: Prepare report directory + run: | + mkdir -p reports + - name: Run Consumer tests run: | - .venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" + .venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" --junitxml="reports/${{ matrix.consumer }}.xml" | tee "reports/${{ matrix.consumer }}.log" env: FORCE_COLOR: "1" TERM: xterm-256color PYTHONUNBUFFERED: "1" CONSUMER: ${{ matrix.consumer }} + + - name: Upload consumer test report + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: tests-${{ matrix.consumer }} + path: reports/ + + summarize: + needs: test + runs-on: ubuntu-latest + if: ${{ always() }} + steps: + - name: Prepare consumer report directory + run: | + mkdir -p tests-report + + - name: Download individual consumer reports + uses: actions/download-artifact@v4 + with: + pattern: tests-* + path: tests-report + merge-multiple: true + + - name: Upload bundled consumer report + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: tests-report + path: tests-report diff --git a/.github/workflows/test_and_docs.yml b/.github/workflows/test_and_docs.yml new file mode 100644 index 00000000..542bc6da --- /dev/null +++ b/.github/workflows/test_and_docs.yml @@ -0,0 +1,58 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: Tests, Verify and Build Docs + +permissions: + contents: write + pages: write + pull-requests: write + id-token: write + +on: + pull_request_target: + types: [opened, reopened, synchronize] # Allows forks to trigger the docs build + push: + branches: + - main + merge_group: + types: [checks_requested] + +jobs: + docs-verify: + uses: eclipse-score/cicd-workflows/.github/workflows/docs-verify.yml@main + permissions: + pull-requests: write + contents: read + with: + bazel-docs-verify-target: "//:docs_check" + + # This is the user configurable part of the workflow + consumer-tests: + uses: ./.github/workflows/consumer_test.yml + secrets: inherit + + docs-build: + # Waits for consumer-tests but run only when docs verification succeeded + needs: [docs-verify, consumer-tests] + if: ${{ always() && needs.docs-verify.result == 'success' }} + uses: eclipse-score/cicd-workflows/.github/workflows/docs.yml@main + permissions: + contents: write + pages: write + pull-requests: write + id-token: write + with: + bazel-target: "//:docs -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}" + retention-days: 3 + tests-report-artifact: tests-report From ee6099e6bc14eb05cffeb6755a1ffbfcce5eeff6 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 23 Sep 2025 17:42:19 +0200 Subject: [PATCH 137/231] Refactor yaml parsing (#256) --- .../score_metamodel/checks/check_options.py | 14 +- src/extensions/score_metamodel/metamodel.yaml | 2 +- .../score_metamodel/metamodel_types.py | 3 + src/extensions/score_metamodel/yaml_parser.py | 176 ++++++++++-------- 4 files changed, 112 insertions(+), 83 deletions(-) diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index c2a4719a..2c23c0a6 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -85,7 +85,7 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: optional_link_as_info = (not required) and (field_type == "link") - for field, pattern in fields.items(): + for field, allowed_value in fields.items(): raw_value: str | list[str] | None = need.get(field, None) if raw_value in [None, [], ""]: if required: @@ -96,11 +96,12 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: values = _normalize_values(raw_value) + # regex based validation for value in values: if allowed_prefixes: value = remove_prefix(value, allowed_prefixes) - if not _validate_value_pattern(value, pattern, need, field): - msg = f"does not follow pattern `{pattern}`." + if not _validate_value_pattern(value, allowed_value, need, field): + msg = f"does not follow pattern `{allowed_value}`." log.warning_for_option( need, field, @@ -162,11 +163,10 @@ def check_extra_options( """ production_needs_types = app.config.needs_types - default_options_list = default_options() need_options = get_need_type(production_needs_types, need["type"]) - # list() creates a copy to avoid modifying the original - allowed_options = list(default_options_list) + # set() creates a copy to avoid modifying the original + allowed_options = set(default_options()) for o in ( "mandatory_options", @@ -174,7 +174,7 @@ def check_extra_options( "mandatory_links", "optional_links", ): - allowed_options.extend(need_options[o].keys()) + allowed_options.update(need_options[o].keys()) extra_options = [ option diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index ee19fc16..b2a28065 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -139,7 +139,7 @@ needs_types: mandatory_options: status: ^(valid|draft)$ optional_links: - complies: std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ + complies: ^std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ parts: 2 gd_chklst: diff --git a/src/extensions/score_metamodel/metamodel_types.py b/src/extensions/score_metamodel/metamodel_types.py index be26ed2e..8eee3f06 100644 --- a/src/extensions/score_metamodel/metamodel_types.py +++ b/src/extensions/score_metamodel/metamodel_types.py @@ -28,7 +28,10 @@ class ProhibitedWordCheck: class ScoreNeedType(NeedType): tags: list[str] parts: int + mandatory_options: dict[str, str] optional_options: dict[str, str] + + # Holds a regex (str) mandatory_links: dict[str, str] optional_links: dict[str, str] diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py index b63dcc82..a0a91287 100644 --- a/src/extensions/score_metamodel/yaml_parser.py +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -36,7 +36,7 @@ class MetaModelData: needs_graph_check: dict[str, object] -def convert_checks_to_dataclass( +def _parse_prohibited_words( checks_dict: dict[str, dict[str, Any]], ) -> list[ProhibitedWordCheck]: return [ @@ -49,12 +49,12 @@ def convert_checks_to_dataclass( ] -def default_options() -> list[str]: +def default_options(): """ Helper function to get a list of all default options defined by sphinx, sphinx-needs etc. """ - return [ + return { "target_id", "id", "status", @@ -92,98 +92,124 @@ def default_options() -> list[str]: "tags", "arch", "parts", + } + + +def _parse_need_type( + directive_name: str, + yaml_data: dict[str, Any], + global_base_opts: dict[str, Any], +): + """Build a single ScoreNeedType dict from the metamodel entry, incl defaults.""" + t: ScoreNeedType = { + "directive": directive_name, + "title": yaml_data["title"], + "prefix": yaml_data.get("prefix", f"{directive_name}__"), + "tags": yaml_data.get("tags", []), + "parts": yaml_data.get("parts", 3), + "mandatory_options": yaml_data.get("mandatory_options", {}), + "optional_options": yaml_data.get("optional_options", {}) | global_base_opts, + "mandatory_links": yaml_data.get("mandatory_links", {}), + "optional_links": yaml_data.get("optional_links", {}), + } + + # Ensure ID regex is set + if "id" not in t["mandatory_options"]: + prefix = t["prefix"] + t["mandatory_options"]["id"] = f"^{prefix}[0-9a-z_]+$" + + if "color" in yaml_data: + t["color"] = yaml_data["color"] + if "style" in yaml_data: + t["style"] = yaml_data["style"] + + return t + + +def _parse_needs_types( + types_dict: dict[str, Any], + global_base_options_optional_opts: dict[str, Any], +) -> dict[str, ScoreNeedType]: + """Parse the 'needs_types' section of the metamodel.yaml.""" + + needs_types: dict[str, ScoreNeedType] = {} + for directive_name, directive_data in types_dict.items(): + assert isinstance(directive_name, str) + assert isinstance(directive_data, dict) + + needs_types[directive_name] = _parse_need_type( + directive_name, directive_data, global_base_options_optional_opts + ) + + return needs_types + + +def _parse_links( + links_dict: dict[str, Any], +) -> list[dict[str, str]]: + """ + Generate 'needs_extra_links' for sphinx-needs. + + It has a slightly different structure than in our metamodel.yaml. + """ + return [ + { + "option": k, + "incoming": v["incoming"], + "outgoing": v["outgoing"], + } + for k, v in links_dict.items() ] +def _collect_all_options(needs_types: dict[str, ScoreNeedType]) -> set[str]: + all_options: set[str] = set() + for t in needs_types.values(): + all_options.update(set(t["mandatory_options"].keys())) + all_options.update(set(t["optional_options"].keys())) + return all_options + + +def _collect_all_custom_options( + needs_types: dict[str, ScoreNeedType], +): + """Generate 'needs_extra_options' for sphinx-needs.""" + + defaults = default_options() + all_options = _collect_all_options(needs_types) + + return sorted(all_options - defaults) + + def load_metamodel_data() -> MetaModelData: """ Load metamodel.yaml and prepare data fields as needed for sphinx-needs. """ yaml_path = Path(__file__).resolve().parent / "metamodel.yaml" - yaml = YAML() with open(yaml_path, encoding="utf-8") as f: - data = cast(dict[str, Any], yaml.load(f)) + data = cast(dict[str, Any], YAML().load(f)) # Some options are globally enabled for all types - global_base_options = cast(dict[str, Any], data.get("needs_types_base_options", {})) - global_base_options_optional_opts = cast( - dict[str, Any], global_base_options.get("optional_options", {}) + global_base_options_optional_opts = data.get("needs_types_base_options", {}).get( + "optional_options", {} ) # Get the stop_words and weak_words as separate lists - proh_checks_dict = cast( - dict[str, dict[str, Any]], data.get("prohibited_words_checks", {}) + prohibited_words_checks = _parse_prohibited_words( + data.get("prohibited_words_checks", {}) ) - prohibited_words_checks = convert_checks_to_dataclass(proh_checks_dict) - - # Default options by sphinx, sphinx-needs or anything else we need to account for - default_options_list = default_options() # Convert "types" from {directive_name: {...}, ...} to a list of dicts - needs_types_list = [] - - all_options: set[str] = set() - types_dict = cast(dict[str, Any], data.get("needs_types", {})) - for directive_name, directive_data in types_dict.items(): - assert isinstance(directive_name, str) - assert isinstance(directive_data, dict) - - # Build up a single "needs_types" item - one_type: ScoreNeedType = { - "directive": directive_name, - "title": directive_data["title"], - "prefix": directive_data.get("prefix", f"{directive_name}__"), - "tags": directive_data.get("tags", []), - "parts": directive_data.get("parts", 3), - "mandatory_options": directive_data.get("mandatory_options", {}), - "optional_options": directive_data.get("optional_options", {}) - | global_base_options_optional_opts, - "mandatory_links": directive_data.get("mandatory_links", {}), - "optional_links": directive_data.get("optional_links", {}), - } - # Ensure ID regex is set - if "id" not in one_type["mandatory_options"]: - prefix = one_type["prefix"] - one_type["mandatory_options"]["id"] = f"^{prefix}[0-9a-z_]+$" - - if "color" in directive_data: - one_type["color"] = directive_data["color"] - if "style" in directive_data: - one_type["style"] = directive_data["style"] - - needs_types_list.append(one_type) - - all_options.update(set(one_type["mandatory_options"].keys())) - all_options.update(set(one_type["optional_options"].keys())) - - # Convert "links" dict -> list of {"option", "incoming", "outgoing"} - needs_extra_links_list: list[dict[str, str]] = [] - links_dict = cast(dict[str, Any], data.get("needs_extra_links", {})) - for link_option, link_data in links_dict.items(): - link_option = cast(str, link_option) - link_data = cast(dict[str, Any], link_data) - needs_extra_links_list.append( - { - "option": link_option, - "incoming": link_data.get("incoming", ""), - "outgoing": link_data.get("outgoing", ""), - } - ) - - # We have to remove all 'default options' from the extra options. - # As otherwise sphinx errors, due to an option being registered twice. - # They are still inside the extra options we extract to enable - # constraint checking via regex - needs_extra_options: list[str] = sorted(all_options - set(default_options_list)) - - graph_check_dict = cast(dict[str, Any], data.get("graph_checks", {})) + needs_types = _parse_needs_types( + data.get("needs_types", {}), global_base_options_optional_opts + ) return MetaModelData( - needs_types=needs_types_list, - needs_extra_links=needs_extra_links_list, - needs_extra_options=needs_extra_options, + needs_types=list(needs_types.values()), + needs_extra_links=_parse_links(data.get("needs_extra_links", {})), + needs_extra_options=_collect_all_custom_options(needs_types), prohibited_words_checks=prohibited_words_checks, - needs_graph_check=graph_check_dict, + needs_graph_check=data.get("graph_checks", {}), ) From 3f8a6f7ea75b646e30d43528ddd546631678f313 Mon Sep 17 00:00:00 2001 From: Nicolae Dicu Date: Wed, 24 Sep 2025 11:13:13 +0200 Subject: [PATCH 138/231] Delete .github/workflows/docs.yml (#262) --- .github/workflows/docs.yml | 42 -------------------------------------- 1 file changed, 42 deletions(-) delete mode 100644 .github/workflows/docs.yml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 8ffcb83e..00000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,42 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* - -name: Documentation - -permissions: - contents: write - pages: write - pull-requests: write - id-token: write - -on: - pull_request_target: - types: [opened, reopened, synchronize] # Allows forks to trigger the docs build - push: - branches: - - main - merge_group: - types: [checks_requested] - -jobs: - build-docs: - uses: eclipse-score/cicd-workflows/.github/workflows/docs.yml@main - permissions: - contents: write - pages: write - pull-requests: write - id-token: write - - with: - bazel-target: "//:docs -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}" - retention-days: 3 From 5ea4b6cb985ef0bbdb393c0864db3fe9a299eee0 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 26 Sep 2025 10:43:41 +0200 Subject: [PATCH 139/231] better warnings and simpler config (#253) --- src/extensions/score_metamodel/__init__.py | 103 +++++++++--------- .../score_metamodel/checks/check_options.py | 75 +++++++++++-- src/extensions/score_metamodel/metamodel.yaml | 82 +++++++------- .../score_metamodel/metamodel_types.py | 9 +- .../rst/options/test_options_options.rst | 7 +- src/extensions/score_metamodel/yaml_parser.py | 4 +- 6 files changed, 169 insertions(+), 111 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index fc0b7590..5897cfec 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -13,10 +13,8 @@ import importlib import os import pkgutil -import re from collections.abc import Callable from pathlib import Path -from typing import Any from sphinx.application import Sphinx from sphinx_needs import logging @@ -102,6 +100,13 @@ def _run_checks(app: Sphinx, exception: Exception | None) -> None: if exception: return + # First of all postprocess the need links to convert + # type names into actual need types. + # This must be done before any checks are run. + # And it must be done after config was hashed, otherwise + # the config hash would include recusive linking between types. + postprocess_need_links(app.config.needs_types) + # Filter out external needs, as checks are only intended to be run # on internal needs. needs_all_needs = SphinxNeedsData(app.env).get_needs_view() @@ -171,59 +176,53 @@ def _get_need_type_for_need(app: Sphinx, need: NeedsInfoType) -> ScoreNeedType: raise ValueError(f"Need type {need['type']} not found in needs_types") -def _validate_external_need_opt_links( - need: NeedsInfoType, - opt_links: dict[str, str], - allowed_prefixes: list[str], - log: CheckLogger, -) -> None: - for link_field, pattern in opt_links.items(): - raw_value: str | list[str] | None = need.get(link_field, None) - if raw_value in [None, [], ""]: - continue - - values: list[str | Any] = ( - raw_value if isinstance(raw_value, list) else [raw_value] - ) - for value in values: - v: str | Any - if isinstance(value, str): - v = _remove_prefix(value, allowed_prefixes) - else: - v = value - - try: - if not isinstance(v, str) or not re.match(pattern, v): - log.warning_for_option( - need, - link_field, - f"does not follow pattern `{pattern}`.", - is_new_check=True, - ) - except TypeError: - log.warning_for_option( - need, - link_field, - f"pattern `{pattern}` is not a valid regex pattern.", - is_new_check=True, - ) - - -def _check_external_optional_link_patterns(app: Sphinx, log: CheckLogger) -> None: - """Validate optional link patterns on external needs and log as info-only. - - Mirrors the original inline logic from ``_run_checks`` without changing behavior. +def _resolve_linkable_types( + link_name: str, + link_value: str, + current_need_type: ScoreNeedType, + needs_types: list[ScoreNeedType], +) -> list[ScoreNeedType]: + needs_types_dict = {nt["directive"]: nt for nt in needs_types} + link_values = [v.strip() for v in link_value.split(",")] + linkable_types: list[ScoreNeedType] = [] + for v in link_values: + target_need_type = needs_types_dict.get(v) + if target_need_type is None: + logger.error( + f"In metamodel.yaml: {current_need_type['directive']}, " + f"link '{link_name}' references unknown type '{v}'." + ) + else: + linkable_types.append(target_need_type) + return linkable_types + + +def postprocess_need_links(needs_types_list: list[ScoreNeedType]): + """Convert link option strings into lists of target need types. + + If a link value starts with '^' it is treated as a regex and left + unchanged. Otherwise it is a comma-separated list of type names which + are resolved to the corresponding ScoreNeedTypes. """ - needs_external_needs = ( - SphinxNeedsData(app.env).get_needs_view().filter_is_external(True) - ) + for need_type in needs_types_list: + try: + link_dicts = ( + need_type["mandatory_links"], + need_type["optional_links"], + ) + except KeyError: + # TODO: remove the Sphinx-Needs defaults from our metamodel + # Example: {'directive': 'issue', 'title': 'Issue', 'prefix': 'IS_'} + continue - for need in needs_external_needs.values(): - need_type = _get_need_type_for_need(app, need) + for link_dict in link_dicts: + for link_name, link_value in link_dict.items(): + assert isinstance(link_value, str) # so far all of them are strings - if opt_links := need_type["optional_links"]: - allowed_prefixes = app.config.allowed_external_prefixes - _validate_external_need_opt_links(need, opt_links, allowed_prefixes, log) + if not link_value.startswith("^"): + link_dict[link_name] = _resolve_linkable_types( # pyright: ignore[reportArgumentType] + link_name, link_value, need_type, needs_types_list + ) def setup(app: Sphinx) -> dict[str, str | bool]: diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 2c23c0a6..42953c67 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -60,10 +60,49 @@ def _validate_value_pattern( ) from e +def _log_option_warning( + need: NeedsInfoType, + log: CheckLogger, + field_type: str, + allowed_directives: list[ScoreNeedType] | None, + field: str, + value: str | list[str], + allowed_value: str | list[str], + required: bool, +): + if field_type == "link": + if allowed_directives: + dirs = " or ".join( + f"{d['title']} ({d['directive']})" for d in allowed_directives + ) + msg = f"but it must reference {dirs}." + else: + msg = f"which does not follow pattern `{allowed_value}`." + + # warning_for_option will print all the values. This way the specific + # problematic value is highlighted in the message. + # This is especially useful if multiple values are given. + msg = f"references '{value}' as '{field}', {msg}" + log.warning_for_need( + need, + msg, + # TODO: Errors in optional links are non fatal for now + is_new_check=not required, + ) + else: + msg = f"does not follow pattern `{allowed_value}`." + log.warning_for_option( + need, + field, + msg, + is_new_check=False, + ) + + def validate_fields( need: NeedsInfoType, log: CheckLogger, - fields: dict[str, str], + fields: dict[str, str] | dict[str, list[ScoreNeedType]], required: bool, field_type: str, allowed_prefixes: list[str], @@ -83,8 +122,6 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: # Removes any prefix allowed by configuration, if prefix is there. return [word.removeprefix(prefix) for prefix in prefixes][0] - optional_link_as_info = (not required) and (field_type == "link") - for field, allowed_value in fields.items(): raw_value: str | list[str] | None = need.get(field, None) if raw_value in [None, [], ""]: @@ -96,17 +133,37 @@ def remove_prefix(word: str, prefixes: list[str]) -> str: values = _normalize_values(raw_value) + # Links can be configured to reference other need types instead of regex. + # However, in order to not "load" the other need, we'll check the regex as + # it does encode the need type (at least in S-CORE metamodel). + # Therefore this can remain a @local_check! + # TypedDicts cannot be used with isinstance, so check for dict and required keys + if isinstance(allowed_value, list): + assert field_type == "link" # sanity check + # patterns holds a list of allowed need types + allowed_directives = allowed_value + allowed_value = ( + "(" + + "|".join(d["mandatory_options"]["id"] for d in allowed_directives) + + ")" + ) + else: + allowed_directives = None + # regex based validation for value in values: if allowed_prefixes: value = remove_prefix(value, allowed_prefixes) if not _validate_value_pattern(value, allowed_value, need, field): - msg = f"does not follow pattern `{allowed_value}`." - log.warning_for_option( + _log_option_warning( need, + log, + field_type, + allowed_directives, field, - msg, - is_new_check=optional_link_as_info, + value, + allowed_value, + required, ) @@ -132,7 +189,9 @@ def check_options( allowed_prefixes = app.config.allowed_external_prefixes # Validate Options and Links - field_validations = [ + field_validations: list[ + tuple[str, dict[str, str] | dict[str, list[ScoreNeedType]], bool] + ] = [ ("option", need_type["mandatory_options"], True), ("option", need_type["optional_options"], False), ("link", need_type["mandatory_links"], True), diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index b2a28065..219afede 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -108,12 +108,12 @@ needs_types: mandatory_options: status: ^(valid|draft)$ mandatory_links: - input: ^wp__.*$ - output: ^wp__.*$ - approved_by: ^rl__.*$ - responsible: ^rl__.*$ + input: workproduct + output: workproduct + approved_by: role + responsible: role optional_links: - supported_by: ^rl__.*$ + supported_by: role contains: ^gd_(req|temp|chklst|guidl|meth)__.*$ has: ^doc_(getstrt|concept)__.*$ parts: 2 @@ -128,7 +128,7 @@ needs_types: optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: fix once process_description is fixed - satisfies: ^wf__.*$ + satisfies: workflow complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ tags: - requirement @@ -182,7 +182,7 @@ needs_types: title: Role prefix: rl__ optional_links: - contains: ^rl__.*$ + contains: role parts: 2 # Documents, process_description only @@ -212,7 +212,7 @@ needs_types: approver: ^.*$ reviewer: ^.*$ optional_links: - realizes: "^wp__.+$" + realizes: workproduct parts: 2 # req-Id: tool_req__docs_doc_types @@ -233,7 +233,7 @@ needs_types: approver: ^.*$ reviewer: ^.*$ optional_links: - realizes: "^wp__.+$" + realizes: workproduct parts: 2 # Requirements @@ -282,7 +282,7 @@ needs_types: content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed - satisfies: ^stkh_req__.*$ + satisfies: stkh_req optional_options: codelink: ^.*$ testlink: ^.*$ @@ -311,7 +311,7 @@ needs_types: content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed - satisfies: ^feat_req__.*$ + satisfies: feat_req optional_options: codelink: ^.*$ testlink: ^.*$ @@ -339,7 +339,7 @@ needs_types: optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: make it mandatory - satisfies: ^gd_req__.*$ + satisfies: gd_req optional_options: codelink: ^.*$ tags: ^.*$ @@ -400,7 +400,7 @@ needs_types: mandatory_links: includes: ^logic_arc_int(_op)*__.+$ optional_links: - fulfils: ^feat_req__.+$ + fulfils: feat_req tags: - architecture_element - architecture_view @@ -420,7 +420,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: - fulfils: ^feat_req__.+$ + fulfils: feat_req tags: - architecture_view - architecture_element @@ -441,8 +441,8 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - includes: ^logic_arc_int_op__.+$ - fulfils: ^comp_req__.+$ + includes: logic_arc_int_op + fulfils: comp_req tags: - architecture_element - architecture_view @@ -462,7 +462,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: - included_by: ^logic_arc_int__.+$ + included_by: logic_arc_int tags: - architecture_element parts: 3 @@ -474,7 +474,7 @@ needs_types: color: #FEDCD2 style: card mandatory_links: - includes: ^comp_arc_sta__.+$ + includes: comp_arc_sta tags: - architecture_view parts: 3 @@ -501,10 +501,10 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - implements: ^real_arc_int(_op)*__.+$ - includes: ^comp_arc_sta__.+$ - uses: ^real_arc_int(_op)*__.+$ - fulfils: ^comp_req__.+$ + implements: real_arc_int, real_arc_int_op + includes: comp_arc_sta + uses: real_arc_int, real_arc_int_op + fulfils: comp_req tags: - architecture_element - architecture_view @@ -524,7 +524,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - fulfils: ^comp_req__.+$ + fulfils: comp_req tags: - architecture_view - architecture_element @@ -546,7 +546,7 @@ needs_types: status: ^(valid|invalid)$ language: ^(cpp|rust)$ optional_links: - fulfils: ^comp_req__.+$ + fulfils: comp_req tags: - architecture_element - architecture_view @@ -566,9 +566,9 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: - included_by: ^real_arc_int__.+$ + included_by: real_arc_int optional_links: - implements: ^logic_arc_int_op__.+$ + implements: logic_arc_int_op tags: - architecture_element parts: 3 @@ -594,10 +594,10 @@ needs_types: safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: - implements: ^comp_req__.*$ - satisfies: ^comp_arc_sta__.*$ + implements: comp_req + satisfies: comp_arc_sta optional_links: - includes: ^sw_unit__.*$ + includes: sw_unit parts: 3 dd_dyn: @@ -609,8 +609,8 @@ needs_types: safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ mandatory_links: - implements: ^comp_req__.*$ - satisfies: ^comp_arc_sta__.*$ + implements: comp_req + satisfies: comp_arc_sta parts: 3 sw_unit: @@ -642,11 +642,11 @@ needs_types: status: ^(valid|invalid)$ content: ^[\s\S]+$ mandatory_links: - violates: ^feat_arc_sta__[0-9a-z_]+$ + violates: feat_arc_sta optional_options: mitigation_issue: ^https://github.com/.*$ optional_links: - mitigated_by: ^(feat_req__.*|aou_req__.*)$ + mitigated_by: feat_req, aou_req parts: 3 # req-Id: tool_req__docs_saf_types @@ -663,14 +663,14 @@ needs_types: content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_saf_attrs_violates - violates: ^feat_arc_sta__[0-9a-z_]+$ + violates: feat_arc_sta optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ optional_links: # req-Id: tool_req__docs_saf_attrs_mitigated_by # (only mandatory once valid status == valid) - mitigated_by: ^(feat_req__.*|aou_req__.*)$ + mitigated_by: feat_req, aou_req tags: - dependent_failure_analysis - safety_analysis @@ -693,11 +693,11 @@ needs_types: mitigation_issue: ^https://github.com/.*$ mandatory_links: # req-Id: tool_req__docs_saf_attrs_violates - violates: ^comp_arc_sta__[0-9a-z_]+$ + violates: comp_arc_sta optional_links: # req-Id: tool_req__docs_saf_attrs_mitigated_by # (only mandatory once valid status == valid) - mitigated_by: ^(comp_req__.*|aou_req__.*)$ + mitigated_by: comp_req, aou_req tags: - dependent_failure_analysis - safety_analysis @@ -721,11 +721,11 @@ needs_types: mitigation_issue: ^https://github.com/.*$ mandatory_links: # req-Id: tool_req__docs_saf_attrs_violates - violates: ^feat_arc_dyn__[0-9a-z_]+$ + violates: feat_arc_dyn optional_links: # req-Id: tool_req__docs_saf_attrs_mitigated_by # (only mandatory once valid status == valid) - mitigated_by: ^(feat_req__.*|aou_req__.*)$ + mitigated_by: feat_req, aou_req tags: - failure_mode_effects_analysis - safety_analysis @@ -748,9 +748,9 @@ needs_types: mitigation_issue: ^https://github.com/.*$ mandatory_links: # req-Id: tool_req__docs_saf_attrs_violates - violates: ^comp_arc_dyn__[0-9a-z_]+$ + violates: comp_arc_dyn optional_links: - mitigated_by: ^(comp_req__.*|aou_req__.*)$ + mitigated_by: comp_req, aou_req tags: - failure_mode_effects_analysis - safety_analysis diff --git a/src/extensions/score_metamodel/metamodel_types.py b/src/extensions/score_metamodel/metamodel_types.py index 8eee3f06..8a1cf7c9 100644 --- a/src/extensions/score_metamodel/metamodel_types.py +++ b/src/extensions/score_metamodel/metamodel_types.py @@ -11,6 +11,8 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +from __future__ import annotations + from dataclasses import dataclass, field from sphinx_needs.config import NeedType @@ -32,6 +34,7 @@ class ScoreNeedType(NeedType): mandatory_options: dict[str, str] optional_options: dict[str, str] - # Holds a regex (str) - mandatory_links: dict[str, str] - optional_links: dict[str, str] + # Holds either regexes (str) or a list of other need types (list of ScoreNeedType). + # One or the other for simplicity, no mixing. + mandatory_links: dict[str, str] | dict[str, list[ScoreNeedType]] + optional_links: dict[str, str] | dict[str, list[ScoreNeedType]] diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index a3cd9c07..32203f36 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -33,7 +33,7 @@ .. Required link `satisfies` refers to wrong requirement type -#EXPECT: feat_req__abce.satisfies (['std_wp__test__abce']): does not follow pattern `^stkh_req__.*$`. +#EXPECT: feat_req__abce: references 'std_wp__test__abce' as 'satisfies', but it must reference Stakeholder Requirement (stkh_req). .. feat_req:: Child requirement :id: feat_req__abce @@ -176,7 +176,7 @@ -.. +.. This Test can not be tested at the moment without enabeling that optional checks are also linked. TODO: Re-enable this check .. Negative Test: Linked to a non-allowed requirement type. @@ -191,7 +191,7 @@ .. Negative Test: Linked to a non-allowed requirement type. -#EXPECT: feat_saf_fmea__child__26.violates (['comp_req__child__ASIL_B']): does not follow pattern `^feat_arc_dyn__[0-9a-z_]+$`. +#EXPECT: feat_saf_fmea__child__26: references 'comp_req__child__ASIL_B' as 'violates', but it must reference Feature Sequence Diagram (feat_arc_dyn). .. feat_saf_fmea:: Child requirement 26 :id: feat_saf_fmea__child__26 @@ -504,4 +504,3 @@ .. std_wp:: This is a test :id: std_wp__test_content - diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py index a0a91287..de89fd9d 100644 --- a/src/extensions/score_metamodel/yaml_parser.py +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -144,9 +144,7 @@ def _parse_needs_types( return needs_types -def _parse_links( - links_dict: dict[str, Any], -) -> list[dict[str, str]]: +def _parse_links(links_dict: dict[str, dict[str, str]]) -> list[dict[str, str]]: """ Generate 'needs_extra_links' for sphinx-needs. From 305c2380733740f787cf90b0adef1d6f46ab5fc1 Mon Sep 17 00:00:00 2001 From: Oliver Mueller Date: Tue, 30 Sep 2025 13:49:52 +0200 Subject: [PATCH 140/231] Resolve issues with local RST tests (#266) Co-authored-by: Sameer Srivastava --- .../score_metamodel/external_needs.py | 30 ++++++++++++------- .../score_metamodel/tests/__init__.py | 16 ++++++---- .../score_metamodel/tests/test_standards.py | 10 +++---- 3 files changed, 34 insertions(+), 22 deletions(-) diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index 1e94388e..b30f965b 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -76,17 +76,27 @@ def parse_external_needs_sources_from_bazel_query() -> list[ExternalNeedsSource] When running with Bazel, we pass the `external_needs_source` config value from the bazel config. """ + try: + logger.debug( + "Detected execution without Bazel. Fetching external needs config..." + ) + # Currently dependencies are stored in the top level BUILD file. + # We could parse it or query bazel. + # Parsing would be MUCH faster, but querying bazel would be more robust. + p = subprocess.run( + ["bazel", "query", "labels(data, //:docs)"], + check=True, + capture_output=True, + text=True, + ) + except (subprocess.CalledProcessError, FileNotFoundError) as e: + logger.warning( + "Bazel query failed or Bazel not found. " + "Falling back to empty external needs. (%s)", + e, + ) + return [] - logger.debug("Detected execution without Bazel. Fetching external needs config...") - # Currently dependencies are stored in the top level BUILD file. - # We could parse it or query bazel. - # Parsing would be MUCH faster, but querying bazel would be more robust. - p = subprocess.run( - ["bazel", "query", "labels(data, //:docs)"], - check=True, - capture_output=True, - text=True, - ) res = [ res for line in p.stdout.splitlines() diff --git a/src/extensions/score_metamodel/tests/__init__.py b/src/extensions/score_metamodel/tests/__init__.py index 839efe2b..27055fa2 100644 --- a/src/extensions/score_metamodel/tests/__init__.py +++ b/src/extensions/score_metamodel/tests/__init__.py @@ -59,9 +59,11 @@ def assert_warning(self, expected_substring: str, expect_location: bool = True): args, kwargs = self._mock_logger.warning.call_args log_message = args[0] - assert expected_substring in log_message, f"Expected substring '{ - expected_substring - }' not found in log message: '{log_message}'" + assert expected_substring in log_message, ( + "Expected substring " + f"'{expected_substring}' " + f"not found in log message: '{log_message}'" + ) # All our checks shall report themselves as score_metamodel checks assert kwargs["type"] == "score_metamodel" @@ -84,9 +86,11 @@ def assert_info(self, expected_substring: str, expect_location: bool = True): args, kwargs = self._mock_logger.info.call_args log_message = args[0] - assert expected_substring in log_message, f"Expected substring '{ - expected_substring - }' not found in log message: '{log_message}'" + assert expected_substring in log_message, ( + "Expected substring " + f"'{expected_substring}' " + f"not found in log message: '{log_message}'" + ) # All our checks shall report themselves as score_metamodel checks assert kwargs["type"] == "score_metamodel" diff --git a/src/extensions/score_metamodel/tests/test_standards.py b/src/extensions/score_metamodel/tests/test_standards.py index 91b19790..aa6109b9 100644 --- a/src/extensions/score_metamodel/tests/test_standards.py +++ b/src/extensions/score_metamodel/tests/test_standards.py @@ -454,9 +454,8 @@ def test_my_pie_linked_standard_requirements(self): assert results == [ 1, 1, - ], f"For function my_pie_linked_standard_requirements expected [1, 1] but got { - results - }" + ], "For function my_pie_linked_standard_requirements expected [1, 1] but got " + f"{results}" def test_my_pie_linked_standard_workproducts(self): """ @@ -508,9 +507,8 @@ def test_my_pie_linked_standard_workproducts(self): assert results == [ 1, 1, - ], f"For function my_pie_linked_standard_workproducts expected [1, 1] but got { - results - }" + ], "For function my_pie_linked_standard_workproducts expected [1, 1] but got " + f"{results}" def test_my_pie_workproducts_contained_in_exactly_one_workflow(self): """ From 8c99c5adde070385339600326ffe54684f77a13d Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 7 Oct 2025 10:17:11 +0200 Subject: [PATCH 141/231] adjust complies link validation (#268) --- .../extensions/rst_filebased_testing.md | 4 +- src/extensions/score_metamodel/__init__.py | 28 +-- .../score_metamodel/checks/check_options.py | 202 +++++++----------- src/extensions/score_metamodel/log.py | 20 ++ src/extensions/score_metamodel/metamodel.yaml | 12 +- .../score_metamodel/metamodel_types.py | 10 +- .../tests/rst/options/gd_req_comp.rst | 39 ++++ .../rst/options/test_options_options.rst | 10 +- .../tests/rst/options/wp_comp.rst | 63 ++++++ .../tests/test_rules_file_based.py | 28 ++- 10 files changed, 255 insertions(+), 161 deletions(-) create mode 100644 src/extensions/score_metamodel/tests/rst/options/gd_req_comp.rst create mode 100644 src/extensions/score_metamodel/tests/rst/options/wp_comp.rst diff --git a/docs/internals/extensions/rst_filebased_testing.md b/docs/internals/extensions/rst_filebased_testing.md index 8016989b..3fa57fee 100644 --- a/docs/internals/extensions/rst_filebased_testing.md +++ b/docs/internals/extensions/rst_filebased_testing.md @@ -43,11 +43,11 @@ One or more Sphinx-Needs directives needed for the **Example:** #CHECK: check_options - #EXPECT: std_wp__test__abcd: is missing required option: `status`. + #EXPECT: std_wp__test__abcd: is missing required attribute: `status`. .. std_wp:: Test requirement :id: std_wp__test__abcd This example verifies that the warning message -*std_wp__test__abcd: is missing required option: \`status\`* +*std_wp__test__abcd: is missing required attribute: \`status\`* is shown during the Sphinx build. Only the *check_options* check is enabled. diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 5897cfec..89a27a8f 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -181,19 +181,22 @@ def _resolve_linkable_types( link_value: str, current_need_type: ScoreNeedType, needs_types: list[ScoreNeedType], -) -> list[ScoreNeedType]: +) -> list[ScoreNeedType | str]: needs_types_dict = {nt["directive"]: nt for nt in needs_types} link_values = [v.strip() for v in link_value.split(",")] - linkable_types: list[ScoreNeedType] = [] + linkable_types: list[ScoreNeedType | str] = [] for v in link_values: - target_need_type = needs_types_dict.get(v) - if target_need_type is None: - logger.error( - f"In metamodel.yaml: {current_need_type['directive']}, " - f"link '{link_name}' references unknown type '{v}'." - ) + if v.startswith("^"): + linkable_types.append(v) # keep regex as-is else: - linkable_types.append(target_need_type) + target_need_type = needs_types_dict.get(v) + if target_need_type is None: + logger.error( + f"In metamodel.yaml: {current_need_type['directive']}, " + f"link '{link_name}' references unknown type '{v}'." + ) + else: + linkable_types.append(target_need_type) return linkable_types @@ -219,10 +222,9 @@ def postprocess_need_links(needs_types_list: list[ScoreNeedType]): for link_name, link_value in link_dict.items(): assert isinstance(link_value, str) # so far all of them are strings - if not link_value.startswith("^"): - link_dict[link_name] = _resolve_linkable_types( # pyright: ignore[reportArgumentType] - link_name, link_value, need_type, needs_types_list - ) + link_dict[link_name] = _resolve_linkable_types( # pyright: ignore[reportArgumentType] + link_name, link_value, need_type, needs_types_list + ) def setup(app: Sphinx) -> dict[str, str | bool]: diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index 42953c67..a182320d 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -18,6 +18,7 @@ default_options, local_check, ) +from score_metamodel.metamodel_types import AllowedLinksType from sphinx.application import Sphinx from sphinx_needs.data import NeedsInfoType @@ -30,13 +31,20 @@ def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeed raise ValueError(f"Need type {directive} not found in needs_types") -def _normalize_values(raw_value: str | list[str] | None) -> list[str]: +def _get_normalized( + need: NeedsInfoType, key: str, remove_prefix: bool = False +) -> list[str]: """Normalize a raw value into a list of strings.""" - if raw_value is None: + raw_value = need.get(key, None) + if not raw_value: return [] if isinstance(raw_value, str): + if remove_prefix: + return [_remove_namespace_prefix_(raw_value)] return [raw_value] if isinstance(raw_value, list) and all(isinstance(v, str) for v in raw_value): + if remove_prefix: + return [_remove_namespace_prefix_(v) for v in raw_value] return raw_value raise ValueError @@ -53,118 +61,90 @@ def _validate_value_pattern( """ try: return re.match(pattern, value) is not None - except TypeError as e: + except Exception as e: raise TypeError( f"Error in metamodel.yaml at {need['type']}->{field}: " f"pattern `{pattern}` is not a valid regex pattern." ) from e -def _log_option_warning( - need: NeedsInfoType, +def _remove_namespace_prefix_(word: str) -> str: + # If the word starts with uppercase letters followed by an underscore, remove them. + return re.sub(r"^[A-Z]+_", "", word) + + +def validate_options( log: CheckLogger, - field_type: str, - allowed_directives: list[ScoreNeedType] | None, - field: str, - value: str | list[str], - allowed_value: str | list[str], - required: bool, -): - if field_type == "link": - if allowed_directives: - dirs = " or ".join( - f"{d['title']} ({d['directive']})" for d in allowed_directives - ) - msg = f"but it must reference {dirs}." - else: - msg = f"which does not follow pattern `{allowed_value}`." - - # warning_for_option will print all the values. This way the specific - # problematic value is highlighted in the message. - # This is especially useful if multiple values are given. - msg = f"references '{value}' as '{field}', {msg}" - log.warning_for_need( - need, - msg, - # TODO: Errors in optional links are non fatal for now - is_new_check=not required, - ) - else: - msg = f"does not follow pattern `{allowed_value}`." - log.warning_for_option( - need, - field, - msg, - is_new_check=False, - ) - - -def validate_fields( + need_type: ScoreNeedType, need: NeedsInfoType, - log: CheckLogger, - fields: dict[str, str] | dict[str, list[ScoreNeedType]], - required: bool, - field_type: str, - allowed_prefixes: list[str], ): """ - Validates that fields (options or links) in a need match their expected patterns. - - :param need: The need object containing the data. - :param log: Logger for warnings. - :param fields: A dictionary of field names and their regex patterns. - :param required: Whether the fields are required (True) or optional (False). - :param field_type: A string indicating the field type ('option' or 'link'). + Validates that options in a need match their expected patterns. """ - def remove_prefix(word: str, prefixes: list[str]) -> str: - # Memory and allocation wise better to use a generator here. - # Removes any prefix allowed by configuration, if prefix is there. - return [word.removeprefix(prefix) for prefix in prefixes][0] - - for field, allowed_value in fields.items(): - raw_value: str | list[str] | None = need.get(field, None) - if raw_value in [None, [], ""]: - if required: + def _validate(attributes_to_allowed_values: dict[str, str], mandatory: bool): + for attribute, allowed_regex in attributes_to_allowed_values.items(): + values = _get_normalized(need, attribute) + if mandatory and not values: log.warning_for_need( - need, f"is missing required {field_type}: `{field}`." + need, f"is missing required attribute: `{attribute}`." ) - continue # Nothing to validate if not present - - values = _normalize_values(raw_value) - - # Links can be configured to reference other need types instead of regex. - # However, in order to not "load" the other need, we'll check the regex as - # it does encode the need type (at least in S-CORE metamodel). - # Therefore this can remain a @local_check! - # TypedDicts cannot be used with isinstance, so check for dict and required keys - if isinstance(allowed_value, list): - assert field_type == "link" # sanity check - # patterns holds a list of allowed need types - allowed_directives = allowed_value - allowed_value = ( - "(" - + "|".join(d["mandatory_options"]["id"] for d in allowed_directives) - + ")" + + for value in values: + if not _validate_value_pattern(value, allowed_regex, need, attribute): + log.warning_for_option( + need, attribute, f"does not follow pattern `{allowed_regex}`." + ) + + _validate(need_type["mandatory_options"], True) + _validate(need_type["optional_options"], False) + + +def validate_links( + log: CheckLogger, + need_type: ScoreNeedType, + need: NeedsInfoType, +): + """ + Validates that links in a need match the expected types or regexes. + """ + + def _validate( + attributes_to_allowed_values: AllowedLinksType, + mandatory: bool, + treat_as_info: bool = False, + ): + for attribute, allowed_values in attributes_to_allowed_values.items(): + values = _get_normalized(need, attribute, remove_prefix=True) + if mandatory and not values: + log.warning_for_need(need, f"is missing required link: `{attribute}`.") + + allowed_regex = "|".join( + [ + v if isinstance(v, str) else v["mandatory_options"]["id"] + for v in allowed_values + ] ) - else: - allowed_directives = None - - # regex based validation - for value in values: - if allowed_prefixes: - value = remove_prefix(value, allowed_prefixes) - if not _validate_value_pattern(value, allowed_value, need, field): - _log_option_warning( - need, - log, - field_type, - allowed_directives, - field, - value, - allowed_value, - required, - ) + + # regex based validation + for value in values: + if not _validate_value_pattern(value, allowed_regex, need, attribute): + log.warning_for_link( + need, + attribute, + value, + [ + av + if isinstance(av, str) + else f"{av['title']} ({av['directive']})" + for av in allowed_values + ], + allowed_regex, + is_new_check=treat_as_info, + ) + + _validate(need_type["mandatory_links"], True) + _validate(need_type["optional_links"], False, treat_as_info=True) # req-Id: tool_req__docs_req_attr_reqtype @@ -185,28 +165,8 @@ def check_options( """ need_type = get_need_type(app.config.needs_types, need["type"]) - # If undefined this is an empty list - allowed_prefixes = app.config.allowed_external_prefixes - - # Validate Options and Links - field_validations: list[ - tuple[str, dict[str, str] | dict[str, list[ScoreNeedType]], bool] - ] = [ - ("option", need_type["mandatory_options"], True), - ("option", need_type["optional_options"], False), - ("link", need_type["mandatory_links"], True), - ("link", need_type["optional_links"], False), - ] - - for field_type, field_values, is_required in field_validations: - validate_fields( - need, - log, - field_values, - required=is_required, - field_type=field_type, - allowed_prefixes=allowed_prefixes, - ) + validate_options(log, need_type, need) + validate_links(log, need_type, need) @local_check diff --git a/src/extensions/score_metamodel/log.py b/src/extensions/score_metamodel/log.py index d2953dec..7f433053 100644 --- a/src/extensions/score_metamodel/log.py +++ b/src/extensions/score_metamodel/log.py @@ -55,6 +55,26 @@ def warning_for_option( location = CheckLogger._location(need, self._prefix) self._log_message(full_msg, location, is_new_check) + def warning_for_link( + self, + need: NeedsInfoType, + option: str, + problematic_value: str, + allowed_values: list[str], + allowed_regex: str, + is_new_check: bool = False, + ): + msg = ( + f"references '{problematic_value}' as '{option}', " + f"but it must reference {' or '.join(allowed_values)}." + ) + # Sometimes printing this helps, but most often it just clutters the log. + # Not sure yet. + # if allowed_regex: + # msg += f" (allowed pattern: `{allowed_regex}`)" + + self.warning_for_need(need, msg, is_new_check=is_new_check) + def warning_for_need( self, need: NeedsInfoType, msg: str, is_new_check: bool = False ): diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 219afede..83e4a126 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -129,7 +129,7 @@ needs_types: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: fix once process_description is fixed satisfies: workflow - complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + complies: std_req tags: - requirement parts: 2 @@ -139,7 +139,7 @@ needs_types: mandatory_options: status: ^(valid|draft)$ optional_links: - complies: ^std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ + complies: std_req parts: 2 gd_chklst: @@ -147,7 +147,7 @@ needs_types: mandatory_options: status: ^(valid|draft)$ optional_links: - complies: ^std_req__(iso26262|isodae21434|isopas8926|aspice_40)__.*$ + complies: std_req parts: 2 gd_guidl: @@ -155,7 +155,7 @@ needs_types: mandatory_options: status: ^(valid|draft)$ optional_links: - complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + complies: std_req parts: 2 gd_method: @@ -164,7 +164,7 @@ needs_types: mandatory_options: status: ^(valid|draft)$ optional_links: - complies: ^std_req__(iso26262|isosae21434|isopas8926|aspice_40)__.*$ + complies: std_req parts: 2 # S-CORE Workproduct @@ -174,7 +174,7 @@ needs_types: mandatory_options: status: ^(valid|draft)$ optional_links: - complies: ^std_(wp__iso26262|wp__isosae21434|wp__isopas8926|iic_aspice_40)__.*$ + complies: std_wp, ^std_req__aspice_40__iic.*$ parts: 2 # Role diff --git a/src/extensions/score_metamodel/metamodel_types.py b/src/extensions/score_metamodel/metamodel_types.py index 8a1cf7c9..15a405a8 100644 --- a/src/extensions/score_metamodel/metamodel_types.py +++ b/src/extensions/score_metamodel/metamodel_types.py @@ -27,6 +27,10 @@ class ProhibitedWordCheck: types: list[str] = field(default_factory=list) +# links to either regexes (str) or a other need types (list of ScoreNeedType). +AllowedLinksType = dict[str, list["str | ScoreNeedType"]] + + class ScoreNeedType(NeedType): tags: list[str] parts: int @@ -34,7 +38,5 @@ class ScoreNeedType(NeedType): mandatory_options: dict[str, str] optional_options: dict[str, str] - # Holds either regexes (str) or a list of other need types (list of ScoreNeedType). - # One or the other for simplicity, no mixing. - mandatory_links: dict[str, str] | dict[str, list[ScoreNeedType]] - optional_links: dict[str, str] | dict[str, list[ScoreNeedType]] + mandatory_links: AllowedLinksType + optional_links: AllowedLinksType diff --git a/src/extensions/score_metamodel/tests/rst/options/gd_req_comp.rst b/src/extensions/score_metamodel/tests/rst/options/gd_req_comp.rst new file mode 100644 index 00000000..ddc4ad80 --- /dev/null +++ b/src/extensions/score_metamodel/tests/rst/options/gd_req_comp.rst @@ -0,0 +1,39 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +#CHECK: check_options + +.. std_req:: Standard requirement + :id: std_req__iso26262__001 + +# Expect to warning with "complies" +#EXPECT-NOT: complies + +.. gd_req:: No Link is ok, since complies is optional + :id: gd_req__001 + +# Expect to warning with "complies" +#EXPECT-NOT: complies + +.. gd_req:: Correct link to std_req + :id: gd_req__002 + :complies: std_req__iso26262__001 + +#FIXME: this will currently be printed as an INFO, and not as a warning. +# Re-enable EXCPECT once we can enable that as a warning. +#EXP-ECT: gd_req__003: references 'gd_req__001' as 'complies', but it must reference Standard Requirement (std_req). + +.. gd_req:: Cannot refer to non std_req element + :id: gd_req__003 + :complies: gd_req__001 diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index 32203f36..78923817 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -16,7 +16,7 @@ .. Required option: `status` is missing -#EXPECT: std_wp__test__abcd: is missing required option: `status`. +#EXPECT: std_wp__test__abcd: is missing required attribute: `status`. .. std_wp:: This is a test :id: std_wp__test__abcd @@ -24,7 +24,7 @@ .. All required options are present -#EXPECT-NOT: std_wp__test__abcd: is missing required option +#EXPECT-NOT: std_wp__test__abcd: is missing required attribute .. std_wp:: This is a test :id: std_wp__test__abce @@ -478,7 +478,7 @@ .. Ensuring that empty content is detected correctly -.. #EXPECT: stkh_req__test_no_content: is missing required option: `content` +.. #EXPECT: stkh_req__test_no_content: is missing required attribute: `content` .. .. .. stkh_req:: This is a test .. :id: stkh_req__test_no_content @@ -488,7 +488,7 @@ .. Ensuring that non empty content is detected correctly -#EXPECT-NOT: stkh_req__test_content: is missing required option: `content` +#EXPECT-NOT: stkh_req__test_content: is missing required attribute: `content` .. stkh_req:: This is a test :id: stkh_req__test_content @@ -500,7 +500,7 @@ .. This should not trigger, as 'std_wp' is not checked for content -#EXPECT-NOT: std_wp__test_content: is missing required option: `content` +#EXPECT-NOT: std_wp__test_content: is missing required attribute: `content` .. std_wp:: This is a test :id: std_wp__test_content diff --git a/src/extensions/score_metamodel/tests/rst/options/wp_comp.rst b/src/extensions/score_metamodel/tests/rst/options/wp_comp.rst new file mode 100644 index 00000000..f3c7dea5 --- /dev/null +++ b/src/extensions/score_metamodel/tests/rst/options/wp_comp.rst @@ -0,0 +1,63 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +#CHECK: check_options + +.. std_wp:: Standard work product + :id: std_wp__iso26262__001 + +.. std_req:: Standard requirement + :id: std_req__iso26262__001 + +.. std_req:: Standard IIC requirement + :id: std_req__aspice_40__iic_001 + +---- + +# Expect no warning with "complies" +#EXPECT-NOT: complies + +.. workproduct:: No Link is ok, since complies is optional + :id: wp__001 + +--- + +# Expect no warning with "complies" +#EXPECT-NOT: complies + +.. workproduct:: Linking to std_wp is allowed + :id: wp__002 + :complies: std_wp__iso26262__001 + +--- + +#FIXME: this will currently be printed as an INFO, and not as a warning. +# Re-enable EXCPECT once we can enable that as a warning. +#EXP-ECT: wp__003: references 'std_req__iso26262__001' as 'complies', but it must reference Standard Work Product (std_wp) or ^std_req__aspice_40__iic.*$. + +.. workproduct:: Cannot refer to std_req element + :id: wp__003 + :complies: std_req__iso26262__001 + +--- + + +# Expect no warning with "complies" +#EXPECT-NOT: complies + +.. workproduct:: But it can refer to std_req if it is an IIC requirement + :id: wp__003 + :complies: std_req__aspice_40__iic_001 + +--- diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index 7dbd4c38..050a60b5 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -162,13 +162,13 @@ def warning_matches( warning_info: WarningInfo, expected_message: str, warnings: list[str], -) -> bool: +) -> str | None: ### Checks if any element of the warning list is includes the given warning info. - # It returns True if found otherwise False. + # It returns the matched warning or None if no match is found. for warning in filter_warnings_by_position(rst_data, warning_info, warnings): if expected_message in warning: - return True - return False + return warning + return None @pytest.mark.parametrize("rst_file", RST_FILES) @@ -193,16 +193,24 @@ def test_rst_files( # Collect the warnings warnings = app.warning.getvalue().splitlines() - # print(f"Warnings: {warnings}") # Check if the expected warnings are present for warning_info in rst_data.warning_infos: for w in warning_info.expected: if not warning_matches(rst_data, warning_info, w, warnings): actual = filter_warnings_by_position(rst_data, warning_info, warnings) - raise AssertionError( - f"Expected warning: '{w}' not found. Received: {actual}" - ) + loc = f"{rst_data.filename}:{warning_info.lineno}" + msg = f"{loc} Expected warning not found:\n" + msg += f" Expected: '{w}'\n" + msg += " Actual:\n" + for a in actual: + msg += f" - {a}\n" + pytest.fail(msg, pytrace=False) + for w in warning_info.not_expected: - if warning_matches(rst_data, warning_info, w, warnings): - raise AssertionError(f"Unexpected warning: '{w}' found") + if unexpected := warning_matches(rst_data, warning_info, w, warnings): + loc = f"{rst_data.filename}:{warning_info.lineno}" + msg = f"{loc} Unexpected warning found:\n" + msg += f" Not Expected: '{w}'\n" + msg += f" Actual: '{unexpected}'\n" + pytest.fail(msg, pytrace=False) From afae3b37c73705bc744ba8fdf71ba64bcc66e9a6 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Tue, 7 Oct 2025 13:53:08 +0200 Subject: [PATCH 142/231] Update tool requirements (#271) * Update processes to 1.2.0 * Add table for unsatisfied requirements Helpful to check for gaps. * tool requirements may satisfy stakeholder requirements as well Changed in score_process 1.1.2. --- MODULE.bazel | 2 +- docs/requirements/process_overview.rst | 23 +++++++++++++++++++ docs/requirements/requirements.rst | 8 +++---- src/extensions/score_metamodel/metamodel.yaml | 2 +- 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index e099e186..3410db25 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -97,7 +97,7 @@ http_file( # Checker rule for CopyRight checks/fixes # docs dependency -bazel_dep(name = "score_process", version = "1.1.1") +bazel_dep(name = "score_process", version = "1.2.0") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") diff --git a/docs/requirements/process_overview.rst b/docs/requirements/process_overview.rst index 64e120eb..488411c6 100644 --- a/docs/requirements/process_overview.rst +++ b/docs/requirements/process_overview.rst @@ -4,6 +4,29 @@ Process Requirements Overview =============================== +Unsatisfied Tool Requirements in Process +######################################## + +The following table lists tool requirements from our process which are not satisfied. + +.. needtable:: + :types: gd_req + :columns: id;title;satisfied by + :colwidths: 2;4;1 + :style: table + :filter_warning: No unsatisfied requirements, no table. ☺️ + + results = [] + for need in needs.filter_types(["gd_req"]): + if not need["id"].startswith("PROCESS_gd_req__tool_"): + continue + if len(need["satisfies_back"]) >= 1: + continue + results.append(need) + +All our Tool Requirements +######################### + .. needtable:: :types: tool_req :columns: satisfies as "Process Requirement" ;id as "Tool Requirement";implemented;source_code_link diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 3c142971..7a84634c 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -447,14 +447,14 @@ Mapping .. table:: :widths: auto - ================================ =========================== + ================================ =================================================== Source Type Allowed Link Target - ================================ =========================== + ================================ =================================================== Feature Requirements Stakeholder Requirements Component Requirements Feature Requirements Process Requirements Workflows - Tooling Requirements Process Requirements - ================================ =========================== + Tooling Requirements Process Requirements, Stakeholder Requirements + ================================ =================================================== .. note:: Certain tool requirements do not have a matching process requirement. diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 83e4a126..bdd84b5d 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -339,7 +339,7 @@ needs_types: optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: make it mandatory - satisfies: gd_req + satisfies: gd_req, stkh_req optional_options: codelink: ^.*$ tags: ^.*$ From 5ec612c4f4e8a4dfad80818795153256bd468043 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Tue, 7 Oct 2025 15:06:15 +0200 Subject: [PATCH 143/231] Fix flaky test (#270) --- .../test_source_code_link_integration.py | 65 ++++++++----------- 1 file changed, 27 insertions(+), 38 deletions(-) diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py index 52af7cc0..44ea35a6 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -18,7 +18,7 @@ from collections import Counter from collections.abc import Callable from pathlib import Path -from typing import Any, cast +from typing import Any import pytest from pytest import TempPathFactory @@ -307,14 +307,14 @@ def example_source_link_text_all_ok(sphinx_base_dir: Path): return { "TREQ_ID_1": [ NeedLink( - file=Path("src/implementation1.py"), - line=3, + file=Path("src/implementation2.py"), + line=5, tag="#" + " req-Id:", need="TREQ_ID_1", full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( - file=Path("src/implementation2.py"), + file=Path("src/implementation1.py"), line=3, tag="#" + " req-Id:", need="TREQ_ID_1", @@ -330,6 +330,7 @@ def example_source_link_text_all_ok(sphinx_base_dir: Path): full_line="#" + " req-Id: TREQ_ID_2", ) ], + "TREQ_ID_3": [], } @@ -359,9 +360,9 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): ), DataForTestLink( name="test_error_handling", - file=Path("src/tests/testfile_2.py"), + file=Path("src/testfile_1.py"), need="TREQ_ID_2", - line=33, + line=38, verify_type="partially", result="passed", result_text="", @@ -379,7 +380,7 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): ), DataForTestLink( name="test_error_handling", - file=Path("src/test/testfile_2.py"), + file=Path("src/testfile_1.py"), need="TREQ_ID_3", line=38, verify_type="partially", @@ -471,9 +472,6 @@ def compare_grouped_json_files(file1: Path, golden_file: Path): ) -@pytest.mark.skip( - "Flaky test, see https://github.com/eclipse-score/docs-as-code/issues/226" -) def test_source_link_integration_ok( sphinx_app_setup: Callable[[], SphinxTestApp], example_source_link_text_all_ok: dict[str, list[NeedLink]], @@ -488,9 +486,7 @@ def test_source_link_integration_ok( os.environ["BUILD_WORKSPACE_DIRECTORY"] = str(sphinx_base_dir) app.build() ws_root = find_ws_root() - if ws_root is None: - # This should never happen - pytest.fail(f"WS_root is none. WS_root: {ws_root}") + assert ws_root is not None Needs_Data = SphinxNeedsData(app.env) needs_data = {x["id"]: x for x in Needs_Data.get_needs_view().values()} compare_json_files( @@ -507,34 +503,27 @@ def test_source_link_integration_ok( app.outdir / "score_scl_grouped_cache.json", sphinx_base_dir / ".expected_grouped.json", ) - # Testing TREQ_ID_1, TREQ_ID_2, TREQ_ID_3 # TODO: Is this actually a good test, or just a weird mock? - for i in range(1, 4): - # extra_options are only available at runtime - assert f"TREQ_ID_{i}" in needs_data - need_as_dict = cast(dict[str, object], needs_data[f"TREQ_ID_{i}"]) - # TODO: This probably isn't great. Should make this better. - if i != 3: - # Excluding 3 as this is a keyerror here - expected_code_link = make_source_link( - example_source_link_text_all_ok[f"TREQ_ID_{i}"] - ) - print(f"EXPECTED LINK CODE: {expected_code_link}") - actual_source_code_link = cast( - list[str], need_as_dict["source_code_link"] - ) - print(f"ACTUALL CODE LINK: {actual_source_code_link}") - assert set(expected_code_link) == set(actual_source_code_link) - expected_test_link = make_test_link( - example_test_link_text_all_ok[f"TREQ_ID_{i}"] + for i in (1, 2, 3): + treq_id = f"TREQ_ID_{i}" + assert treq_id in needs_data + treq_info = needs_data[treq_id] + print("Needs Data for", treq_id, ":", treq_info) + + # verify codelinks + expected_code_link = make_source_link( + example_source_link_text_all_ok[treq_id] + ) + actual_source_code_link = treq_info.get( + "source_code_link", "no source link" ) - # Compare contents, regardless of order. - print(f"NEED AS DICT: {need_as_dict}") - print(f"EXPECTED LINK TEST: {expected_test_link}") - actual_test_code_link = cast(list[str], need_as_dict["testlink"]) - print(f"ACTUALL TEST LINK: {actual_test_code_link}") - assert set(expected_test_link) == set(actual_test_code_link) + assert expected_code_link == actual_source_code_link, treq_id + + # verify testlinks + expected_test_link = make_test_link(example_test_link_text_all_ok[treq_id]) + actual_test_code_link = treq_info.get("testlink", "no test link") + assert expected_test_link == actual_test_code_link, treq_id finally: app.cleanup() From 2b39282e05d099c568bb03224f1b6c4d98b30c96 Mon Sep 17 00:00:00 2001 From: Anton Krivoborodov <63401640+antonkri@users.noreply.github.com> Date: Tue, 7 Oct 2025 22:28:03 +0200 Subject: [PATCH 144/231] update metamodel to follow process requirements (#263) Due to a circular dependency this implies a certain fulfils link is still optional until score_process documents are fixed. Co-authored-by: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> --- docs/requirements/requirements.rst | 2 +- src/extensions/score_metamodel/metamodel.yaml | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 7a84634c..d047336a 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -545,7 +545,7 @@ Architecture Attributes ==================================== ========================================== feat_arc_sta feat_req feat_arc_dyn feat_req - logic_arc_int comp_req + logic_arc_int feat_req comp_arc_sta comp_req comp_arc_dyn comp_req real_arc_int comp_req diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index bdd84b5d..f58a29ae 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -399,7 +399,6 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: includes: ^logic_arc_int(_op)*__.+$ - optional_links: fulfils: feat_req tags: - architecture_element @@ -442,7 +441,7 @@ needs_types: status: ^(valid|invalid)$ optional_links: includes: logic_arc_int_op - fulfils: comp_req + fulfils: feat_req tags: - architecture_element - architecture_view @@ -501,10 +500,10 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - implements: real_arc_int, real_arc_int_op - includes: comp_arc_sta - uses: real_arc_int, real_arc_int_op fulfils: comp_req + implements: logic_arc_int, real_arc_int_op + includes: comp_arc_sta + uses: logic_arc_int, real_arc_int_op tags: - architecture_element - architecture_view From e16f7814cb62f3688d9949597736a94ba5ece4dc Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Wed, 8 Oct 2025 20:32:10 +0200 Subject: [PATCH 145/231] Implement validity attributes (#274) Derive requirements for validity attributes introduced in score_process 1.2.0. --- MODULE.bazel | 2 +- docs/requirements/requirements.rst | 26 ++++++++++++ src/extensions/score_metamodel/__init__.py | 3 +- .../score_metamodel/checks/check_options.py | 40 +++++++++++++++++++ src/extensions/score_metamodel/metamodel.yaml | 6 +++ .../tests/rst/attributes/test_validity.rst | 38 ++++++++++++++++++ .../rst/options/test_options_options.rst | 14 +++++++ .../tests/test_check_options.py | 7 ++++ 8 files changed, 134 insertions(+), 2 deletions(-) create mode 100644 src/extensions/score_metamodel/tests/rst/attributes/test_validity.rst diff --git a/MODULE.bazel b/MODULE.bazel index 3410db25..b9894f1b 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.3.0", + version = "1.4.0", compatibility_level = 1, ) diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index d047336a..6aa19ddb 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -427,6 +427,32 @@ Mapping No concept yet +.. tool_req:: Enforce validity attribute correctness + :id: tool_req__docs_req_attr_validity_correctness + :tags: Requirements + :implemented: PARTIAL + :parent_covered: YES + :satisfies: PROCESS_gd_req__req_validity + :status: valid + + Docs-as-Code shall enforce that the ``valid_from`` and ``valid_until`` attributes of stakeholder and feature requirements are correct. + + The format of a milestone is something like "v0.5" or "v1.0.1". + No suffixes like "-SNAPSHOT" or "-beta" are allowed. + +.. tool_req:: Enforce validity start is before end + :id: tool_req__docs_req_attr_validity_consistency + :tags: Requirements + :implemented: PARTIAL + :parent_covered: YES + :satisfies: PROCESS_gd_req__req_validity + :status: valid + + Docs-as-Code shall enforce that ``valid_from`` is before ``valid_until`` attribute in stakeholder and feature requirements. + We consider "from" is inclusive but "until" is exclusive, so from v0.5 until v1.0 means valid for v0.5 but not for v1.0. + If either attribute is missing, no check is performed. + + ------------------------- 🔗 Links ------------------------- diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 89a27a8f..681d72c3 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -62,7 +62,8 @@ def parse_checks_filter(filter: str) -> list[str]: } for check in checks: assert check in all_check_names, ( - f"Check: '{check}' is not one of the defined local or graph checks" + f"Check: '{check}' is not one of the defined local or graph checks: " + + ", ".join(all_check_names) ) return checks diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index a182320d..e0b95dbb 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -207,3 +207,43 @@ def check_extra_options( extra_options_str = ", ".join(f"`{option}`" for option in extra_options) msg = f"has these extra options: {extra_options_str}." log.warning_for_need(need, msg) + + +def parse_milestone(value: str) -> tuple[int, int, int]: + """Parse a string like 'v0.5' or 'v1.0.0'. No suffixes.""" + match = re.match(r"v(\d+)(\.(\d+))?(\.(\d+))?$", value) + if not match: + raise ValueError(f"Invalid milestone format: {value}") + major = int(match.group(1)) + minor = int(match.group(3) or 0) + patch = int(match.group(5) or 0) + return (major, minor, patch) + + +# req-Id: tool_req__docs_req_attr_validity_consistency +@local_check +def check_validity_consistency( + app: Sphinx, + need: NeedsInfoType, + log: CheckLogger, +): + """ + Check if the attributes valid_from < valid_until. + """ + if need["type"] not in ("stkh_req", "feat_req"): + return + + valid_from = need.get("valid_from", None) + valid_until = need.get("valid_until", None) + + if not valid_from or not valid_until: + return + + valid_from_version = parse_milestone(valid_from) + valid_until_version = parse_milestone(valid_until) + if valid_from_version >= valid_until_version: + msg = ( + "inconsistent validity: " + f"valid_from ({valid_from}) >= valid_until ({valid_until})." + ) + log.warning_for_need(need, msg) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index f58a29ae..bb6294b8 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -261,6 +261,9 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ + # req-Id: tool_req__docs_req_attr_validity_correctness + valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ tags: - requirement - requirement_excl_process @@ -291,6 +294,9 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ + # req-Id: tool_req__docs_req_attr_validity_correctness + valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ tags: - requirement - requirement_excl_process diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_validity.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_validity.rst new file mode 100644 index 00000000..825b15fc --- /dev/null +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_validity.rst @@ -0,0 +1,38 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* +#CHECK: check_validity_consistency + + +#EXPECT: feat_req__random_id1: inconsistent validity: valid_from (v1.0) >= valid_until (v0.5). + +.. feat_req:: from after until + :id: feat_req__random_id1 + :valid_from: v1.0 + :valid_until: v0.5 + + +#EXPECT-NOT: feat_req__random_id2: inconsistent validity: valid_from (v0.5) >= valid_until (v1.0). + +.. feat_req:: until after from + :id: feat_req__random_id2 + :valid_from: v0.5 + :valid_until: v1.0 + + +#EXPECT: stkh_req__random_id1: inconsistent validity: valid_from (v1.0.1) >= valid_until (v0.5). + +.. stkh_req:: from after until for stakeholder requirement + :id: stkh_req__random_id1 + :valid_from: v1.0.1 + :valid_until: v0.5 diff --git a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst index 78923817..3a587e60 100644 --- a/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst +++ b/src/extensions/score_metamodel/tests/rst/options/test_options_options.rst @@ -504,3 +504,17 @@ .. std_wp:: This is a test :id: std_wp__test_content + + +#EXPECT: feat_req__random_id3.valid_from (2035-03): does not follow pattern + +.. feat_req:: milestone must be a version + :id: feat_req__random_id3 + :valid_from: 2035-03 + + +#EXPECT: feat_req__random_id4.valid_until (2035-03): does not follow pattern + +.. feat_req:: milestone must be a version + :id: feat_req__random_id4 + :valid_until: 2035-03 diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 09603931..1814df40 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -20,6 +20,7 @@ from score_metamodel.checks.check_options import ( check_extra_options, check_options, + parse_milestone, ) from score_metamodel.tests import fake_check_logger, need from sphinx.application import Sphinx # type: ignore[import-untyped] @@ -118,3 +119,9 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): "has these extra options: `other_option`.", expect_location=False, ) + + +def test_milestone_parsing(): + assert parse_milestone("v0.5") == (0, 5, 0) + assert parse_milestone("v1.0") == (1, 0, 0) + assert parse_milestone("v1.0.1") == (1, 0, 1) From 1161235d646d1bda8102d00542f393b7dc86f5c0 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Tue, 14 Oct 2025 08:49:22 +0200 Subject: [PATCH 146/231] Doc sources as public target (#277) This should be fully backwards-compatible and only provide an additional target :docs_sources to give Bazel users access to the documentation sources. Essentially, this enables an out-of-tree doc build like building the documentation from multiple repos together. --- docs.bzl | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/docs.bzl b/docs.bzl index 665637b8..fc99f4be 100644 --- a/docs.bzl +++ b/docs.bzl @@ -39,8 +39,7 @@ load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") load("@pip_process//:requirements.bzl", "all_requirements", "requirement") -load("@rules_java//java:java_binary.bzl", "java_binary") -load("@rules_pkg//pkg:mappings.bzl", "pkg_files") +load("@rules_pkg//pkg:mappings.bzl", "pkg_files", "strip_prefix") load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") @@ -69,6 +68,27 @@ def docs(source_dir = "docs", data = [], deps = []): deps = deps, ) + pkg_files( + name = "docs_sources", + srcs = native.glob([ + source_dir + "/**/*.png", + source_dir + "/**/*.svg", + source_dir + "/**/*.md", + source_dir + "/**/*.rst", + source_dir + "/**/*.html", + source_dir + "/**/*.css", + source_dir + "/**/*.puml", + source_dir + "/**/*.need", + source_dir + "/**/*.yaml", + source_dir + "/**/*.json", + source_dir + "/**/*.csv", + source_dir + "/**/*.inc", + "more_docs/**/*.rst", + ], allow_empty = True), + strip_prefix = strip_prefix.from_pkg(), # avoid flattening of folders + visibility = ["//visibility:public"], + ) + py_binary( name = "docs", tags = ["cli_help=Build documentation:\nbazel run //:docs"], @@ -117,26 +137,9 @@ def docs(source_dir = "docs", data = [], deps = []): data = data, ) - # creates 'needs.json' build target sphinx_docs( name = "needs_json", - srcs = native.glob([ - # TODO: we do not need images etc to generate the json file. - "**/*.png", - "**/*.svg", - "**/*.md", - "**/*.rst", - "**/*.html", - "**/*.css", - "**/*.puml", - "**/*.need", - # Include the docs src itself - # Note: we don't use py_library here to make it as close as possible to docs:incremental. - "**/*.yaml", - "**/*.json", - "**/*.csv", - "**/*.inc", - ], exclude = ["**/tests/*"], allow_empty = True), + srcs = [":docs_sources"], config = ":" + source_dir + "/conf.py", extra_opts = [ "-W", From be78ba583478bf094c064d986af30296a9512b82 Mon Sep 17 00:00:00 2001 From: Marco Heinemann Date: Thu, 23 Oct 2025 12:10:53 +0200 Subject: [PATCH 147/231] =?UTF-8?q?=F0=9F=93=9A=20Docs=20for=20updating=20?= =?UTF-8?q?requirements=20(#281)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/BUILD | 8 ++++---- src/README.md | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/src/BUILD b/src/BUILD index 43394098..ddf506de 100644 --- a/src/BUILD +++ b/src/BUILD @@ -53,11 +53,11 @@ py_library( visibility = ["//visibility:public"], ) -# In order to update the requirements, change the `requirements.txt` file and run: -# `bazel run //src:requirements`. -# This will update the `requirements_lock.txt` file. +# In order to update the requirements, change the `requirements.in` file and run: +# `bazel run //src:requirements.update`. +# This will update the `requirements.txt` file. # To upgrade all dependencies to their latest versions, run: -# `bazel run //src:requirements -- --upgrade`. +# `bazel run //src:requirements.update -- --upgrade`. compile_pip_requirements( name = "requirements", srcs = [ diff --git a/src/README.md b/src/README.md index 2a0ba8a6..e84f9279 100644 --- a/src/README.md +++ b/src/README.md @@ -80,6 +80,23 @@ Find everything related to testing and how to add your on test suite [here](/too > If you want to develop your own sphinx extension, check out the [extensions guide](/src/extensions/README.md) +## Updating dependencies + +The file [requirements.in](./requirements.in) is a [PIP requirements file](https://pip.pypa.io/en/stable/reference/requirements-file-format/) that describe first level dependencies. + +The file [requirements.txt](./requirements.txt) is a [pip-compile lock file](https://pip-tools.readthedocs.io/en/latest/cli/pip-compile/) that holds +the pinned dependency tree calculated from [requirements.in](./requirements.in). + +To update dependencies (e.g. after adding a dependency), run: +``` +bazel run //src:requirements.update +``` + +To update the full dependency tree, run +``` +bazel run //src:requirements.update -- --upgrade +``` + ## Best Practices 1. **Documentation** From 5e8b013741bfe06292713061fecfbfe5e60ee5a8 Mon Sep 17 00:00:00 2001 From: Andreas Kaluza Date: Mon, 27 Oct 2025 08:59:08 +0100 Subject: [PATCH 148/231] update plantuml theme green to purple (#280) --- src/extensions/score_layout/assets/puml-theme-score.puml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/extensions/score_layout/assets/puml-theme-score.puml b/src/extensions/score_layout/assets/puml-theme-score.puml index c0317fed..719e09b5 100644 --- a/src/extensions/score_layout/assets/puml-theme-score.puml +++ b/src/extensions/score_layout/assets/puml-theme-score.puml @@ -34,7 +34,7 @@ skinparam useBetaStyle true -!$SCORE = "#45ADA8" +!$SCORE = "#9551DA" !$BLUE = "#2196F3" !$INDIGO = "#6610f2" !$PURPLE = "#6f42c1" From 52390625110470f6e4bc8b931e55b01e793545b4 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 4 Nov 2025 14:04:28 +0100 Subject: [PATCH 149/231] Cleanup (#290) * Prepare 1.5.0 release * fix: plantuml dependency * remove useless extra index url from python requirements * refactor: split huge function --- MODULE.bazel | 2 +- docs/conf.py | 6 +- .../score_metamodel/external_needs.py | 113 ++++++++++-------- src/requirements.in | 2 - src/requirements.txt | 2 - 5 files changed, 67 insertions(+), 58 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index b9894f1b..cc37719f 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "1.4.0", + version = "1.5.0", compatibility_level = 1, ) diff --git a/docs/conf.py b/docs/conf.py index 8051d9f2..8cb2d195 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,4 +16,8 @@ project_prefix = "DOCS_" version = "0.1" -extensions = ["score_sphinx_bundle"] +extensions = [ + # TODO remove plantuml here once docs-as-code is updated to sphinx-needs 6 + "sphinxcontrib.plantuml", + "score_sphinx_bundle", +] diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index b30f965b..9e0ecec8 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -137,64 +137,73 @@ def temp(self: NeedsList): NeedsList._finalise = temp # pyright: ignore[reportPrivateUsage] -def connect_external_needs(app: Sphinx, config: Config): - extend_needs_json_exporter(config, ["project_url", "project_prefix"]) - - bazel = app.config.external_needs_source or os.getenv("RUNFILES_DIR") +def get_external_needs_source(external_needs_source: str) -> list[ExternalNeedsSource]: + bazel = external_needs_source or os.getenv("RUNFILES_DIR") if bazel: - external_needs = parse_external_needs_sources_from_DATA( - app.config.external_needs_source - ) # pyright: ignore[reportAny] + external_needs = parse_external_needs_sources_from_DATA(external_needs_source) else: external_needs = parse_external_needs_sources_from_bazel_query() # pyright: ignore[reportAny] - for e in external_needs: - assert not e.path_to_target # path_to_target is always empty - json_file = f"{e.bazel_module}+/{e.target}/_build/needs/needs.json" - if r := os.getenv("RUNFILES_DIR"): - logger.debug("Using runfiles to determine external needs JSON file.") - fixed_json_file = Path(r) / json_file - else: - logger.debug( - "Running outside bazel. " - "Determining git root for external needs JSON file." - ) - git_root = Path.cwd().resolve() - while not (git_root / ".git").exists(): - git_root = git_root.parent - if git_root == Path("/"): - sys.exit("Could not find git root.") - logger.debug(f"Git root found: {git_root}") - fixed_json_file = ( - git_root / "bazel-bin" / "ide_support.runfiles" / json_file - ) + return external_needs - logger.debug(f"Fixed JSON file path: {json_file} -> {fixed_json_file}") - json_file = fixed_json_file - try: - needs_json_data = json.loads(Path(json_file).read_text(encoding="utf-8")) # pyright: ignore[reportAny] - except FileNotFoundError: - logger.error( - f"Could not find external needs JSON file at {json_file}. " - + "Something went terribly wrong. " - + "Try running `bazel clean --async && rm -rf _build`." - ) - continue - - assert isinstance(app.config.needs_external_needs, list) # pyright: ignore[reportUnknownMemberType] - app.config.needs_external_needs.append( # pyright: ignore[reportUnknownMemberType] - { - "id_prefix": needs_json_data["project_prefix"], - "base_url": needs_json_data["project_url"] - + "/main", # for now always "main" - "json_path": json_file, - } +def add_external_needs_json(e: ExternalNeedsSource, config: Config): + json_file = f"{e.bazel_module}+/{e.target}/_build/needs/needs.json" + if r := os.getenv("RUNFILES_DIR"): + logger.debug("Using runfiles to determine external needs JSON file.") + fixed_json_file = Path(r) / json_file + else: + logger.debug( + "Running outside bazel. " + + "Determining git root for external needs JSON file." ) - # Making the prefixes uppercase here to match sphinx_needs, - # as it does this internally too. - assert isinstance(app.config.allowed_external_prefixes, list) # pyright: ignore[reportAny] - app.config.allowed_external_prefixes.append( # pyright: ignore[reportUnknownMemberType] - needs_json_data["project_prefix"].upper() # pyright: ignore[reportAny] + git_root = Path.cwd().resolve() + while not (git_root / ".git").exists(): + git_root = git_root.parent + if git_root == Path("/"): + sys.exit("Could not find git root.") + logger.debug(f"Git root found: {git_root}") + fixed_json_file = git_root / "bazel-bin" / "ide_support.runfiles" / json_file + + logger.debug(f"Fixed JSON file path: {json_file} -> {fixed_json_file}") + json_file = fixed_json_file + + try: + needs_json_data = json.loads(Path(json_file).read_text(encoding="utf-8")) # pyright: ignore[reportAny] + except FileNotFoundError: + logger.error( + f"Could not find external needs JSON file at {json_file}. " + + "Something went terribly wrong. " + + "Try running `bazel clean --async && rm -rf _build`." ) + # Attempt to continue, exit code will be non-zero after a logged error anyway. + return + + assert isinstance(config.needs_external_needs, list) # pyright: ignore[reportUnknownMemberType] + config.needs_external_needs.append( # pyright: ignore[reportUnknownMemberType] + { + "id_prefix": needs_json_data["project_prefix"], + "base_url": needs_json_data["project_url"] + + "/main", # for now always "main" + "json_path": json_file, + } + ) + # Making the prefixes uppercase here to match sphinx_needs, + # as it does this internally too. + assert isinstance(config.allowed_external_prefixes, list) # pyright: ignore[reportAny] + config.allowed_external_prefixes.append( # pyright: ignore[reportUnknownMemberType] + needs_json_data["project_prefix"].upper() # pyright: ignore[reportAny] + ) + + +def connect_external_needs(app: Sphinx, config: Config): + extend_needs_json_exporter(config, ["project_url", "project_prefix"]) + + external_needs = get_external_needs_source(app.config.external_needs_source) + + for e in external_needs: + assert not e.path_to_target # path_to_target is always empty + assert e.target == "needs_json" + + add_external_needs_json(e, app.config) diff --git a/src/requirements.in b/src/requirements.in index 31e43bd6..e2e58dc8 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -1,5 +1,3 @@ ---extra-index-url https://pypi.org/simple/ - Sphinx # At least 4.2.0, as it fixes a bug in combination with esbonio live preview: diff --git a/src/requirements.txt b/src/requirements.txt index d0b8d5a0..2d11788d 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -4,8 +4,6 @@ # # bazel run //src:requirements.update # ---extra-index-url https://pypi.org/simple/ - accessible-pygments==0.0.5 \ --hash=sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872 \ --hash=sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7 From 33eda7a1cfc4f5f6ecb0f6806f5974bd805b8d93 Mon Sep 17 00:00:00 2001 From: Jan Gueth Date: Fri, 7 Nov 2025 18:05:33 +0100 Subject: [PATCH 150/231] Remove id prefixes for mega-build (DR-004-Infra) (#293) * Remove id prefixes for mega-build (DR-004-Infra) - Remove all PROCESS_ prefixes from requirements and related files. - Eliminate id_prefix from external needs configuration and code. - Update tests and extensions to work without prefix logic. - Make id_prefix handling optional in score_source_code_linker. - Remove project_prefix from docs/conf.py - Remove allowed_external_prefixes config value from score_metamodel - Simplify find_need() by removing prefix parameter and logic - Update tests to match new function signatures - Remove prefix-related test cases from test_codelink.py Why: S-CORE requires bi-directional traceability for compliance (see gd_req__req_traceability). Previously, modules used prefixes for external needs, but this breaks integration when building a single, unified documentation site ("mega-build"). This change implements "Option S: Single documentation build" from decision record DR-004-Infra: Bi-directional traceability in docs (https://eclipse-score.github.io/score/main/design_decisions/DR-004-infra.html). Option S was chosen because it enables quick integration and unified navigation, even though it results in longer build times and requires dropping prefixes. Since our id schemas already provide namespacing, removing prefixes does not risk id clashes. This refactoring aligns with the decision to prioritize integration speed and documentation consistency for releases. --- docs/conf.py | 1 - docs/requirements/process_overview.rst | 2 +- docs/requirements/requirements.rst | 164 +++++++++--------- src/extensions/score_metamodel/__init__.py | 1 - .../score_metamodel/external_needs.py | 9 +- .../test_attributes_external_prefix.rst | 15 +- .../rst/attributes/test_prohibited_words.rst | 2 +- .../score_metamodel/tests/rst/conf.py | 1 - .../tests/test_check_options.py | 1 - .../score_source_code_linker/__init__.py | 24 +-- .../tests/test_codelink.py | 54 +----- 11 files changed, 100 insertions(+), 174 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 8cb2d195..fffe55be 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,6 @@ project = "Score Docs-as-Code" project_url = "https://eclipse-score.github.io/docs-as-code/" -project_prefix = "DOCS_" version = "0.1" extensions = [ diff --git a/docs/requirements/process_overview.rst b/docs/requirements/process_overview.rst index 488411c6..93962313 100644 --- a/docs/requirements/process_overview.rst +++ b/docs/requirements/process_overview.rst @@ -18,7 +18,7 @@ The following table lists tool requirements from our process which are not satis results = [] for need in needs.filter_types(["gd_req"]): - if not need["id"].startswith("PROCESS_gd_req__tool_"): + if not need["id"].startswith("gd_req__tool_"): continue if len(need["satisfies_back"]) >= 1: continue diff --git a/docs/requirements/requirements.rst b/docs/requirements/requirements.rst index 6aa19ddb..1ded7531 100644 --- a/docs/requirements/requirements.rst +++ b/docs/requirements/requirements.rst @@ -56,10 +56,10 @@ This section provides an overview of current process requirements and their clar :implemented: YES :tags: Common Attributes :satisfies: - PROCESS_gd_req__req_attr_uid, - PROCESS_gd_req__tool_attr_uid, - PROCESS_gd_req__arch_attribute_uid, - PROCESS_gd_req__saf_attr_uid, + gd_req__req_attr_uid, + gd_req__tool_attr_uid, + gd_req__arch_attribute_uid, + gd_req__saf_attr_uid, :parent_covered: NO Docs-as-Code shall enforce that all Need IDs are globally unique across all included @@ -74,10 +74,10 @@ This section provides an overview of current process requirements and their clar :implemented: PARTIAL :tags: Common Attributes :satisfies: - PROCESS_gd_req__req_attr_uid, - PROCESS_gd_req__arch_attribute_uid, - PROCESS_gd_req__saf_attr_uid, - :parent_covered: NO: cannot check non-existent "doc__naming_conventions" in PROCESS_gd_req__req_attr_uid + gd_req__req_attr_uid, + gd_req__arch_attribute_uid, + gd_req__saf_attr_uid, + :parent_covered: NO: cannot check non-existent "doc__naming_conventions" in gd_req__req_attr_uid Docs-as-Code shall enforce that Need IDs follow the following naming scheme: @@ -98,8 +98,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :tags: Common Attributes :satisfies: - PROCESS_gd_req__req_attr_title, - PROCESS_gd_req__saf_attr_title, + gd_req__req_attr_title, + gd_req__saf_attr_title, :parent_covered: NO: Can not ensure summary Docs-as-Code shall enforce that all needs have titles and titles do not contain the following words: @@ -118,7 +118,7 @@ This section provides an overview of current process requirements and their clar :tags: Common Attributes :parent_covered: NO: Can not cover 'ISO/IEC/IEEE/29148' :implemented: YES - :satisfies: PROCESS_gd_req__req_attr_description + :satisfies: gd_req__req_attr_description Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has a description (content) @@ -128,7 +128,7 @@ This section provides an overview of current process requirements and their clar :tags: Common Attributes :implemented: YES :satisfies: - PROCESS_gd_req__req_desc_weak, + gd_req__req_desc_weak, :parent_covered: YES Docs-as-Code shall enforce that requirement descriptions do not contain the following weak words: @@ -150,8 +150,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :tags: Common Attributes :satisfies: - PROCESS_gd_req__req_attr_security, - PROCESS_gd_req__arch_attr_security, + gd_req__req_attr_security, + gd_req__arch_attr_security, Docs-as-Code shall enforce that the ``security`` attribute has one of the following values: @@ -176,8 +176,8 @@ This section provides an overview of current process requirements and their clar :implemented: YES :parent_covered: YES :satisfies: - PROCESS_gd_req__req_attr_safety, - PROCESS_gd_req__arch_attr_safety + gd_req__req_attr_safety, + gd_req__arch_attr_safety Docs-as-Code shall enforce that the ``safety`` attribute has one of the following values: @@ -202,9 +202,9 @@ This section provides an overview of current process requirements and their clar :implemented: YES :parent_covered: NO: gd_req__saf_attr_status has additional constraints :satisfies: - PROCESS_gd_req__req_attr_status, - PROCESS_gd_req__arch_attr_status, - PROCESS_gd_req__saf_attr_status, + gd_req__req_attr_status, + gd_req__arch_attr_status, + gd_req__saf_attr_status, Docs-as-Code shall enforce that the ``status`` attribute has one of the following values: @@ -228,7 +228,7 @@ Versioning :tags: Common Attributes :implemented: NO :parent_covered: NO: to be checked after demo - :satisfies: PROCESS_gd_req__req_attr_version + :satisfies: gd_req__req_attr_version Docs-As-Code shall enable and enforce a versioning attribute for all needs. @@ -242,7 +242,7 @@ Versioning :tags: Common Attributes :implemented: NO :parent_covered: NO: parent talks about setting covered to false, but we want to issue a build error. - :satisfies: PROCESS_gd_req__req_suspicious + :satisfies: gd_req__req_suspicious :status: invalid Docs-as-Code shall check if linked parent needs have different versions, compared to @@ -259,7 +259,7 @@ Versioning :tags: Documents :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__doc_types + :satisfies: gd_req__doc_types Docs-as-Code shall support the following document types: @@ -272,9 +272,9 @@ Versioning :tags: Documents :implemented: NO :satisfies: - PROCESS_gd_req__doc_author, - PROCESS_gd_req__doc_approver, - PROCESS_gd_req__doc_reviewer, + gd_req__doc_author, + gd_req__doc_approver, + gd_req__doc_reviewer, :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -290,7 +290,7 @@ Versioning :id: tool_req__docs_doc_attr_author_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc_author + :satisfies: gd_req__doc_author :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -308,7 +308,7 @@ Versioning :id: tool_req__docs_doc_attr_approver_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc_approver + :satisfies: gd_req__doc_approver :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -322,7 +322,7 @@ Versioning :id: tool_req__docs_doc_attr_reviewer_autofill :tags: Documents :implemented: NO - :satisfies: PROCESS_gd_req__doc_reviewer + :satisfies: gd_req__doc_reviewer :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid @@ -340,7 +340,7 @@ Mapping :style: table :types: gd_req :columns: id;satisfies_back as "tool_req" - :filter: "PROCESS_gd_req__doc" in id + :filter: "gd_req__doc" in id 📋 Requirements @@ -354,7 +354,7 @@ Mapping :id: tool_req__docs_req_types :tags: Requirements :implemented: YES - :satisfies: PROCESS_gd_req__req_structure + :satisfies: gd_req__req_structure :parent_covered: YES: Together with tool_req__docs_linkage Docs-as-Code shall support the following requirement types: @@ -375,7 +375,7 @@ Mapping :tags: Requirements :implemented: YES :parent_covered: NO: Can not ensure correct reasoning - :satisfies: PROCESS_gd_req__req_attr_rationale + :satisfies: gd_req__req_attr_rationale Docs-as-Code shall enforce that each stakeholder requirement (stkh_req) contains a ``rationale`` attribute. @@ -383,7 +383,7 @@ Mapping :id: tool_req__docs_req_attr_reqtype :tags: Requirements :implemented: YES - :satisfies: PROCESS_gd_req__req_attr_type + :satisfies: gd_req__req_attr_type Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` except process and tool requirements has a ``reqtype`` attribute with one of the @@ -398,7 +398,7 @@ Mapping :id: tool_req__docs_req_attr_reqcov :tags: Requirements :implemented: PARTIAL - :satisfies: PROCESS_gd_req__req_attr_req_cov + :satisfies: gd_req__req_attr_req_cov Docs as code shall shall enable marking requirements as covered by their linked children. @@ -414,7 +414,7 @@ Mapping :tags: Requirements :implemented: PARTIAL :parent_covered: YES - :satisfies: PROCESS_gd_req__req_attr_test_covered + :satisfies: gd_req__req_attr_test_covered :status: invalid Docs-As-Code shall allow for every need of type :need:`tool_req__docs_req_types` to @@ -432,7 +432,7 @@ Mapping :tags: Requirements :implemented: PARTIAL :parent_covered: YES - :satisfies: PROCESS_gd_req__req_validity + :satisfies: gd_req__req_validity :status: valid Docs-as-Code shall enforce that the ``valid_from`` and ``valid_until`` attributes of stakeholder and feature requirements are correct. @@ -445,7 +445,7 @@ Mapping :tags: Requirements :implemented: PARTIAL :parent_covered: YES - :satisfies: PROCESS_gd_req__req_validity + :satisfies: gd_req__req_validity :status: valid Docs-as-Code shall enforce that ``valid_from`` is before ``valid_until`` attribute in stakeholder and feature requirements. @@ -461,7 +461,7 @@ Mapping :id: tool_req__docs_req_link_satisfies_allowed :tags: Requirements :implemented: PARTIAL - :satisfies: PROCESS_gd_req__req_linkage, PROCESS_gd_req__req_traceability + :satisfies: gd_req__req_linkage, gd_req__req_traceability :parent_covered: YES :status: invalid @@ -490,7 +490,7 @@ Mapping :tags: Common Attributes :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__req_linkage_safety + :satisfies: gd_req__req_linkage_safety QM requirements (safety == QM) shall not be linked to safety requirements (safety != QM) via the ``satisfies`` attribute. @@ -505,8 +505,8 @@ Mapping :id: tool_req__docs_arch_types :tags: Architecture :satisfies: - PROCESS_gd_req__arch_hierarchical_structure, - PROCESS_gd_req__arch_build_blocks, + gd_req__arch_hierarchical_structure, + gd_req__arch_build_blocks, :implemented: YES :parent_covered: NO :status: invalid @@ -528,8 +528,8 @@ Architecture Attributes :id: tool_req__docs_arch_attr_mandatory :tags: Architecture :satisfies: - PROCESS_gd_req__arch_attr_mandatory, - PROCESS_gd_req__arch_attr_fulfils, + gd_req__arch_attr_mandatory, + gd_req__arch_attr_fulfils, :implemented: PARTIAL :parent_covered: YES :parent_has_problem: YES: Metamodel & Process aren't the same. Some definitions are not consistent in Process @@ -553,10 +553,10 @@ Architecture Attributes :tags: Architecture :implemented: PARTIAL :satisfies: - PROCESS_gd_req__arch_linkage_requirement_type, - PROCESS_gd_req__arch_attr_fulfils, - PROCESS_gd_req__arch_traceability, - PROCESS_gd_req__req_linkage_fulfill + gd_req__arch_linkage_requirement_type, + gd_req__arch_attr_fulfils, + gd_req__arch_traceability, + gd_req__req_linkage_fulfill :parent_covered: YES Docs-as-Code shall enforce that linking via the ``fulfils`` attribute follows defined rules. @@ -582,7 +582,7 @@ Architecture Attributes :id: tool_req__docs_arch_link_safety_to_req :tags: Architecture :implemented: PARTIAL - :satisfies: PROCESS_gd_req__arch_linkage_requirement + :satisfies: gd_req__arch_linkage_requirement :parent_covered: YES Docs-as-Code shall enforce that architecture elements of type @@ -594,7 +594,7 @@ Architecture Attributes :id: tool_req__docs_arch_link_qm_to_safety_req :tags: Architecture :implemented: PARTIAL - :satisfies: PROCESS_gd_req__arch_linkage_requirement + :satisfies: gd_req__arch_linkage_requirement :parent_covered: YES Docs-as-Code shall enforce that architecture elements of type @@ -607,8 +607,8 @@ Architecture Attributes :tags: Architecture :implemented: PARTIAL :satisfies: - PROCESS_gd_req__arch_linkage_safety_trace, - PROCESS_gd_req__req_linkage_safety, + gd_req__arch_linkage_safety_trace, + gd_req__req_linkage_safety, :parent_covered: NO Docs-as-Code shall enforce that valid safety architectural elements (Safety != QM) can @@ -619,7 +619,7 @@ Architecture Attributes :tags: Architecture :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__arch_linkage_security_trace + :satisfies: gd_req__arch_linkage_security_trace Docs-as-Code shall enforce that security relevant :need:`tool_req__docs_arch_types` (Security == YES) can only be linked against security relevant :need:`tool_req__docs_arch_types`. @@ -633,7 +633,7 @@ Architecture Attributes :tags: Architecture :implemented: YES :satisfies: - PROCESS_gd_req__arch_viewpoints, + gd_req__arch_viewpoints, :parent_covered: YES Docs-as-Code shall enable the rendering of diagrams for the following architecture views: @@ -663,8 +663,8 @@ Architecture Attributes :implemented: YES :parent_covered: NO: we only enable linking, we do not link :satisfies: - PROCESS_gd_req__req_attr_impl, - PROCESS_gd_req__impl_design_code_link, + gd_req__req_attr_impl, + gd_req__impl_design_code_link, Docs-as-Code shall allow source code to link to needs. @@ -678,12 +678,12 @@ Architecture Attributes :tags: Detailed Design & Code :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__req_linkage_architecture_switch + :satisfies: gd_req__req_linkage_architecture_switch Docs-as-Code shall allow for a to-be-defined list of checks to be non-fatal for non release builds. These are typically better suited for metrics than for checks. - e.g. PROCESS_gd_req__req_linkage_architecture + e.g. gd_req__req_linkage_architecture .. tool_req:: Enable Creation of Dependency Graphs @@ -691,7 +691,7 @@ Architecture Attributes :tags: Detailed Design & Code :implemented: NO :parent_covered: YES - :satisfies: PROCESS_gd_req__impl_dependency_analysis + :satisfies: gd_req__impl_dependency_analysis :status: invalid Docs-As-Code shall support generation and rendering of dependency graphs for @@ -712,7 +712,7 @@ Testing :tags: Testing :implemented: PARTIAL :parent_covered: YES - :satisfies: PROCESS_gd_req__req_attr_testlink + :satisfies: gd_req__req_attr_testlink Docs-as-Code shall allow requirements of type :need:`tool_req__docs_req_types` to include a ``testlink`` attribute. @@ -725,7 +725,7 @@ Testing :tags: Testing :implemented: NO :parent_covered: NO - :satisfies: PROCESS_gd_req__verification_checks + :satisfies: gd_req__verification_checks Docs-as-Code shall ensure that each test case has TestType and DerivationTechnique set. @@ -734,7 +734,7 @@ Testing :tags: Testing :implemented: NO :parent_covered: NO - :satisfies: PROCESS_gd_req__verification_checks + :satisfies: gd_req__verification_checks :status: invalid Docs-as-Code shall ensure that each test case has a non empty description. @@ -746,7 +746,7 @@ Testing :tags: Testing :implemented: NO :parent_covered: NO - :satisfies: PROCESS_gd_req__verification_checks + :satisfies: gd_req__verification_checks :status: invalid Docs-as-Code shall ensure that test cases link to requirements on the correct level: @@ -766,7 +766,7 @@ Testing :tags: Tool Verification Reports :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__tool_attr_safety_affected, PROCESS_gd_req__tool_check_mandatory + :satisfies: gd_req__tool_attr_safety_affected, gd_req__tool_check_mandatory Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a ``safety_affected`` attribute with one of the following values: @@ -779,7 +779,7 @@ Testing :tags: Tool Verification Reports :implemented: YES :parent_covered: YES - :satisfies: PROCESS_gd_req__tool_attr_security_affected, PROCESS_gd_req__tool_check_mandatory + :satisfies: gd_req__tool_attr_security_affected, gd_req__tool_check_mandatory Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a `security_affected` attribute with one of the following values: @@ -792,7 +792,7 @@ Testing :id: tool_req__docs_tvr_status :tags: Tool Verification Reports :implemented: YES - :satisfies: PROCESS_gd_req__tool_attr_status, PROCESS_gd_req__tool_check_mandatory + :satisfies: gd_req__tool_attr_status, gd_req__tool_check_mandatory :parent_covered: YES Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a @@ -835,8 +835,8 @@ Testing :implemented: NO :tags: Safety Analysis :satisfies: - PROCESS_gd_req__saf_structure, - PROCESS_gd_req__saf_attr_uid, + gd_req__saf_structure, + gd_req__saf_attr_uid, :parent_covered: YES Docs-As-Code shall support the following need types: @@ -852,9 +852,9 @@ Testing :implemented: NO :tags: Safety Analysis :satisfies: - PROCESS_gd_req__saf_attr_mitigated_by, - PROCESS_gd_req__saf_attr_requirements, - PROCESS_gd_req__saf_attr_requirements_check, + gd_req__saf_attr_mitigated_by, + gd_req__saf_attr_requirements, + gd_req__saf_attr_requirements_check, :parent_covered: YES Docs-As-Code shall enforce valid needs (`status` == `valid`) of type @@ -866,7 +866,7 @@ Testing :id: tool_req__docs_saf_attrs_mitigation_issue :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_attr_mitigation_issue + :satisfies: gd_req__saf_attr_mitigation_issue :parent_covered: NO Docs-As-Code shall allow needs of type :need:`tool_req__docs_saf_types` to have a @@ -877,7 +877,7 @@ Testing :id: tool_req__docs_saf_attrs_sufficient :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_attr_sufficient + :satisfies: gd_req__saf_attr_sufficient :parent_covered: YES Docs-As-Code shall enforce needs of type :need:`tool_req__docs_saf_types` to @@ -890,7 +890,7 @@ Testing :id: tool_req__docs_saf_attrs_sufficient_check :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_attr_sufficient + :satisfies: gd_req__saf_attr_sufficient :parent_covered: YES Docs-As-Code shall ensure needs of type :need:`tool_req__docs_saf_types` with @@ -901,7 +901,7 @@ Testing :id: tool_req__docs_saf_attrs_content :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_argument + :satisfies: gd_req__saf_argument :parent_covered: NO Docs-As-Code shall enforce needs of type :need:`tool_req__docs_saf_types` to have a @@ -914,8 +914,8 @@ Testing :implemented: NO :tags: Safety Analysis :satisfies: - PROCESS_gd_req__saf_linkage_check, - PROCESS_gd_req__saf_linkage, + gd_req__saf_linkage_check, + gd_req__saf_linkage, :parent_covered: YES Docs-As-Code shall enforce that needs of type :need:`tool_req__docs_saf_types` have a @@ -934,33 +934,33 @@ Testing :id: tool_req__docs_saf_attr_fmea_fault_id :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_attr_fault_id + :satisfies: gd_req__saf_attr_fault_id :parent_covered: NO Docs-As-Code shall enforce that needs of type DFA (see :need:`tool_req__docs_saf_types`) have a `fault_id` attribute. - Allowed values are listed as ID in tables at :need:`PROCESS_gd_guidl__dfa_failure_initiators`. + Allowed values are listed as ID in tables at :need:`gd_guidl__dfa_failure_initiators`. .. tool_req:: DFA: failure id attribute :id: tool_req__docs_saf_attr_dfa_failure_id :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_attr_failure_id + :satisfies: gd_req__saf_attr_failure_id :parent_covered: NO Docs-As-Code shall enforce that needs of type DFA (see :need:`tool_req__docs_saf_types`) have a `fault_id` attribute. - Allowed values are listed as ID in tables at :need:`PROCESS_gd_guidl__dfa_failure_initiators`. + Allowed values are listed as ID in tables at :need:`gd_guidl__dfa_failure_initiators`. .. tool_req:: Failure Effect :id: tool_req__docs_saf_attr_fmea_failure_effect :implemented: NO :tags: Safety Analysis - :satisfies: PROCESS_gd_req__saf_attr_feffect + :satisfies: gd_req__saf_attr_feffect :parent_covered: NO :status: invalid @@ -974,7 +974,7 @@ Mapping :style: table :types: gd_req :columns: id;satisfies_back as "tool_req" - :filter: "PROCESS_gd_req__saf" in id + :filter: "gd_req__saf" in id 🗺️ Full Mapping @@ -992,7 +992,7 @@ Overview of Tool to Process Requirements .. needtable:: :types: tool_req - :filter: any(s.startswith("PROCESS_gd_req") for s in satisfies) + :filter: any(s.startswith("gd_req") for s in satisfies) :columns: satisfies as "Process Requirement" ;id as "Tool Requirement";implemented;source_code_link :style: table diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 681d72c3..858a179e 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -230,7 +230,6 @@ def postprocess_need_links(needs_types_list: list[ScoreNeedType]): def setup(app: Sphinx) -> dict[str, str | bool]: app.add_config_value("external_needs_source", "", rebuild="env") - app.add_config_value("allowed_external_prefixes", [], rebuild="env") app.config.needs_id_required = True app.config.needs_id_regex = "^[A-Za-z0-9_-]{6,}" diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index 9e0ecec8..43adf1d6 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -183,22 +183,15 @@ def add_external_needs_json(e: ExternalNeedsSource, config: Config): assert isinstance(config.needs_external_needs, list) # pyright: ignore[reportUnknownMemberType] config.needs_external_needs.append( # pyright: ignore[reportUnknownMemberType] { - "id_prefix": needs_json_data["project_prefix"], "base_url": needs_json_data["project_url"] + "/main", # for now always "main" "json_path": json_file, } ) - # Making the prefixes uppercase here to match sphinx_needs, - # as it does this internally too. - assert isinstance(config.allowed_external_prefixes, list) # pyright: ignore[reportAny] - config.allowed_external_prefixes.append( # pyright: ignore[reportUnknownMemberType] - needs_json_data["project_prefix"].upper() # pyright: ignore[reportAny] - ) def connect_external_needs(app: Sphinx, config: Config): - extend_needs_json_exporter(config, ["project_url", "project_prefix"]) + extend_needs_json_exporter(config, ["project_url"]) external_needs = get_external_needs_source(app.config.external_needs_source) diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst index a3df11b9..5ec9deeb 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_attributes_external_prefix.rst @@ -14,23 +14,24 @@ #CHECK: check_options -.. Cleaning of 'external prefix' before checking regex confirmity -#EXPECT-NOT tool_req__test_abcd.satisfies (PROCESS_doc_getstrt__req__process): does not follow pattern `^doc_.+$`. +.. Test: No external prefixes (single documentation mega-build) +.. Verifies links work when all needs are loaded in one Sphinx instance, without prefix logic. +#EXPECT-NOT tool_req__test_abcd.satisfies (doc_getstrt__req__process): does not follow pattern `^doc_.+$`. .. tool_req:: This is a test :id: tool_req__test_abcd - :satisfies: PROCESS_doc_getstrt__req__process + :satisfies: doc_getstrt__req__process This should not give a warning -.. Also make sure it works wit lists of links +.. Also make sure it works with lists of links -#EXPECT-NOT: tool_req__test_aaaa.satisfies (PROCESS_doc_getstrt__req__process): does not follow pattern `^doc_.+$`. -#EXPECT-NOT: tool_req__test_aaaa.satisfies (PROCESS_gd_guidl__req__engineering): does not follow pattern `^gd_.+$`. +#EXPECT-NOT: tool_req__test_aaaa.satisfies (doc_getstrt__req__process): does not follow pattern `^doc_.+$`. +#EXPECT-NOT: tool_req__test_aaaa.satisfies (gd_guidl__req__engineering): does not follow pattern `^gd_.+$`. .. tool_req:: This is a test :id: tool_req__test_aaaa - :satisfies: PROCESS_doc_getstrt__req__process;PROCESS_gd_guidl__req__engineering + :satisfies: doc_getstrt__req__process;gd_guidl__req__engineering This should give a warning diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst index 240de822..40546c00 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst @@ -82,7 +82,7 @@ :tags: Common Attributes :implemented: YES :satisfies: - PROCESS_gd_req__req_desc_weak, + gd_req__req_desc_weak, :parent_covered: YES Docs-as-Code shall enforce that requirement descriptions do not contain the following weak words: diff --git a/src/extensions/score_metamodel/tests/rst/conf.py b/src/extensions/score_metamodel/tests/rst/conf.py index 69e04bf1..e68ed77c 100644 --- a/src/extensions/score_metamodel/tests/rst/conf.py +++ b/src/extensions/score_metamodel/tests/rst/conf.py @@ -25,6 +25,5 @@ { "base_url": "https://eclipse-score.github.io/process_description/main/", "json_url": "https://eclipse-score.github.io/process_description/main/needs.json", - "id_prefix": "process_", } ] diff --git a/src/extensions/score_metamodel/tests/test_check_options.py b/src/extensions/score_metamodel/tests/test_check_options.py index 1814df40..5e81d64e 100644 --- a/src/extensions/score_metamodel/tests/test_check_options.py +++ b/src/extensions/score_metamodel/tests/test_check_options.py @@ -111,7 +111,6 @@ def test_unknown_option_present_in_neither_req_opt_neither_opt_opt(self): app = Mock(spec=Sphinx) app.config = Mock() app.config.needs_types = self.NEED_TYPE_INFO_WITH_OPT_OPT - app.config.allowed_external_prefixes = [] # Expect that the checks pass check_extra_options(app, need_1, cast(CheckLogger, logger)) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index d8693500..876e4fcc 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -292,24 +292,11 @@ def setup(app: Sphinx) -> dict[str, str | bool]: } -def find_need( - all_needs: NeedsMutable, id: str, prefixes: list[str] -) -> NeedsInfoType | None: +def find_need(all_needs: NeedsMutable, id: str) -> NeedsInfoType | None: """ - Checks all possible external 'prefixes' for an ID - So that the linker can add the link to the correct NeedsInfoType object. + Finds a need by ID in the needs collection. """ - if id in all_needs: - return all_needs[id] - - # Try all possible prefixes - for prefix in prefixes: - prefixed_id = f"{prefix}{id}" - if prefixed_id in all_needs: - LOGGER.warning("linking to external needs is not supported!") - return all_needs[prefixed_id] - - return None + return all_needs.get(id) # re-qid: gd_req__req__attr_impl @@ -349,11 +336,8 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: get_cache_filename(app.outdir, "score_scl_grouped_cache.json") ) - # For some reason the prefix 'sphinx_needs internally' is CAPSLOCKED. - # So we have to make sure we uppercase the prefixes - prefixes = [x["id_prefix"].upper() for x in app.config.needs_external_needs] for source_code_links in source_code_links_by_need: - need = find_need(needs_copy, source_code_links.need, prefixes) + need = find_need(needs_copy, source_code_links.need) if need is None: # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 for n in source_code_links.links.CodeLinks: diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index 7bb3897c..9e360d1a 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -222,52 +222,11 @@ def test_find_need_direct_match(): "REQ_002": {"id": "REQ_002", "title": "Another requirement"}, } ) - result = find_need(all_needs, "REQ_001", []) + result = find_need(all_needs, "REQ_001") assert result is not None assert result["id"] == "REQ_001" -@add_test_properties( - partially_verifies=["tool_req__docs_dd_link_source_code_link"], - test_type="requirements-based", - derivation_technique="requirements-analysis", -) -def test_find_need_with_prefix(): - """Test finding a need with prefix matching.""" - - all_needs = make_needs( - { - "PREFIX_REQ_001": {"id": "PREFIX_REQ_001", "title": "Prefixed requirement"}, - "REQ_002": {"id": "REQ_002", "title": "Another requirement"}, - } - ) - result = find_need(all_needs, "REQ_001", ["PREFIX_"]) - assert result is not None - assert result["id"] == "PREFIX_REQ_001" - - -@add_test_properties( - partially_verifies=["tool_req__docs_dd_link_source_code_link"], - test_type="requirements-based", - derivation_technique="requirements-analysis", -) -def test_find_need_multiple_prefixes(): - """Test finding a need with multiple prefixes.""" - all_needs = make_needs( - { - "SECOND_REQ_001": { - "id": "SECOND_REQ_001", - "title": "Second prefixed requirement", - }, - "REQ_002": {"id": "REQ_002", "title": "Another requirement"}, - } - ) - - result = find_need(all_needs, "REQ_001", ["FIRST_", "SECOND_"]) - assert result is not None - assert result["id"] == "SECOND_REQ_001" - - @add_test_properties( partially_verifies=["tool_req__docs_dd_link_source_code_link"], test_type="requirements-based", @@ -281,7 +240,7 @@ def test_find_need_not_found(): } ) - result = find_need(all_needs, "REQ_999", ["PREFIX_"]) + result = find_need(all_needs, "REQ_999") assert result is None @@ -437,22 +396,15 @@ def test_group_by_need_and_find_need_integration( { "TREQ_ID_1": {"id": "TREQ_ID_1", "title": "Test requirement 1"}, "TREQ_ID_2": {"id": "TREQ_ID_2", "title": "Test requirement 2"}, - "PREFIX_TREQ_ID_200": { - "id": "PREFIX_TREQ_ID_200", - "title": "Prefixed requirement", - }, } ) # Test finding needs for each group for found_link in grouped: - found_need = find_need(all_needs, found_link.need, ["PREFIX_"]) + found_need = find_need(all_needs, found_link.need) if found_link.need in ["TREQ_ID_1", "TREQ_ID_2"]: assert found_need is not None assert found_need["id"] == found_link.need - elif found_link.need == "TREQ_ID_200": - assert found_need is not None - assert found_need["id"] == "PREFIX_TREQ_ID_200" @add_test_properties( From 723bee8c73ee2a81138a8b91ab3f666b33acd745 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 7 Nov 2025 18:14:01 +0100 Subject: [PATCH 151/231] Add experimental combo/mega/full build (see DR-004) (#285) --- docs.bzl | 39 +++++++++++++++++++ docs/how-to/commands.md | 22 ++++++----- docs/index.rst | 23 +++++++++++ src/extensions/score_metamodel/__init__.py | 5 ++- .../score_metamodel/external_needs.py | 32 +++++++++++++-- .../score_sphinx_bundle/__init__.py | 1 + src/requirements.in | 3 ++ src/requirements.txt | 19 +++++++++ 8 files changed, 130 insertions(+), 14 deletions(-) diff --git a/docs.bzl b/docs.bzl index fc99f4be..8de8b2dc 100644 --- a/docs.bzl +++ b/docs.bzl @@ -45,6 +45,17 @@ load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_tooling//:defs.bzl", "score_virtualenv") +def _rewrite_needs_json_to_docs_sources(labels): + """Replace '@repo//:needs_json' -> '@repo//:docs_sources' for every item.""" + out = [] + for x in labels: + s = str(x) + if s.endswith("//:needs_json"): + out.append(s.replace("//:needs_json", "//:docs_sources")) + else: + out.append(s) + return out + def docs(source_dir = "docs", data = [], deps = []): """ Creates all targets related to documentation. @@ -89,6 +100,8 @@ def docs(source_dir = "docs", data = [], deps = []): visibility = ["//visibility:public"], ) + data_with_docs_sources = _rewrite_needs_json_to_docs_sources(data) + py_binary( name = "docs", tags = ["cli_help=Build documentation:\nbazel run //:docs"], @@ -102,6 +115,19 @@ def docs(source_dir = "docs", data = [], deps = []): }, ) + py_binary( + name = "docs_combo", + tags = ["cli_help=Build full documentation with all dependencies:\nbazel run //:docs_combo_experimental"], + srcs = ["@score_docs_as_code//src:incremental.py"], + data = data_with_docs_sources, + deps = deps, + env = { + "SOURCE_DIRECTORY": source_dir, + "DATA": str(data_with_docs_sources), + "ACTION": "incremental", + }, + ) + py_binary( name = "docs_check", tags = ["cli_help=Verify documentation:\nbazel run //:docs_check"], @@ -128,6 +154,19 @@ def docs(source_dir = "docs", data = [], deps = []): }, ) + py_binary( + name = "live_preview_combo_experimental", + tags = ["cli_help=Live preview full documentation with all dependencies in the browser:\nbazel run //:live_preview_combo_experimental"], + srcs = ["@score_docs_as_code//src:incremental.py"], + data = data_with_docs_sources, + deps = deps, + env = { + "SOURCE_DIRECTORY": source_dir, + "DATA": str(data_with_docs_sources), + "ACTION": "live_preview", + }, + ) + score_virtualenv( name = "ide_support", tags = ["cli_help=Create virtual environment (.venv_docs) for documentation support:\nbazel run //:ide_support"], diff --git a/docs/how-to/commands.md b/docs/how-to/commands.md index b96fced2..2250f237 100644 --- a/docs/how-to/commands.md +++ b/docs/how-to/commands.md @@ -1,15 +1,17 @@ # Commands -⚠️ Only valid for docs-as-code v1.x.x. - -| Target | What it does | -| --------------------------- | ---------------------------------------------------------------------- | -| `bazel run //:docs` | Builds documentation | -| `bazel run //:live_preview` | Creates a live_preview of the documentation viewable in a local server | -| `bazel run //:ide_support` | Sets up a Python venv for esbonio (Remember to restart VS Code!) | +| Target | What it does | +| ---------------------------------------------- | ------------------------------------------------------------------------------------------------- | +| `bazel run //:docs` | Builds documentation | +| `bazel run //:docs_check` | Verifies documentation correctness | +| `bazel run //:docs_combo_experimental` | Builds combined documentation with all external dependencies included | +| `bazel run //:live_preview` | Creates a live_preview of the documentation viewable in a local server | +| `bazel run //:live_preview_combo_experimental` | Creates a live_preview of the full documentation with all dependencies viewable in a local server | +| `bazel run //:ide_support` | Sets up a Python venv for esbonio (Remember to restart VS Code!) | ## Internal targets (do not use directly) -| Target | What it does | -| --------------------------- | --------------------------- | -| `bazel build //:needs_json` | Creates a 'needs.json' file | +| Target | What it does | +| ----------------------------- | ------------------------------------------- | +| `bazel build //:needs_json` | Creates a 'needs.json' file | +| `bazel build //:docs_sources` | Provides all the documentation source files | diff --git a/docs/index.rst b/docs/index.rst index c00b1d14..8edeca5a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -39,6 +39,18 @@ Welcome to the docs-as-code documentation, it is organized into several key sect ^^^ The official tool requirements for docs-as-code. + +.. if-collection:: score_process + + .. grid:: 1 1 3 3 + :class-container: score-grid + + .. grid-item-card:: + + :ref:`score_process <_collections/score_process/process/index>` + ^^^ + Documentation for the score_process that docs-as-code is based on, including backlinks to docs-as-code. + .. dropdown:: Sitemap .. toctree:: @@ -49,3 +61,14 @@ Welcome to the docs-as-code documentation, it is organized into several key sect how-to/index internals/index requirements/index + + docs-as-code is based on score_process: + + .. if-collection:: score_process + + .. toctree:: + :maxdepth: 5 + :includehidden: + :titlesonly: + + _collections/score_process/process/index diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 858a179e..e7eed11d 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -250,7 +250,10 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.needs_reproducible_json = True app.config.needs_json_remove_defaults = True - _ = app.connect("config-inited", connect_external_needs) + # sphinx-collections runs on default prio 500. + # We need to populate the sphinx-collections config before that happens. + # --> 499 + _ = app.connect("config-inited", connect_external_needs, priority=499) discover_checks() diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index 43adf1d6..a9f85c87 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -48,7 +48,7 @@ def _parse_bazel_external_need(s: str) -> ExternalNeedsSource | None: repo, path_to_target = repo_and_path.split("//", 1) repo = repo.lstrip("@") - if path_to_target == "" and target == "needs_json": + if path_to_target == "" and target in ("needs_json", "docs_sources"): return ExternalNeedsSource( bazel_module=repo, path_to_target=path_to_target, target=target ) @@ -190,6 +190,26 @@ def add_external_needs_json(e: ExternalNeedsSource, config: Config): ) +def add_external_docs_sources(e: ExternalNeedsSource, config: Config): + # Note that bazel does NOT write the files under e.target! + # {e.bazel_module}+ matches the original git layout! + if r := os.getenv("RUNFILES_DIR"): + docs_source_path = Path(r) / f"{e.bazel_module}+" + else: + logger.error("Combo builds are currently only supported with Bazel.") + return + + if "collections" not in config: + config.collections = {} + config.collections[e.bazel_module] = { + "driver": "symlink", + "source": str(docs_source_path), + "target": e.bazel_module, + } + + logger.info(f"Added external docs source: {docs_source_path} -> {e.bazel_module}") + + def connect_external_needs(app: Sphinx, config: Config): extend_needs_json_exporter(config, ["project_url"]) @@ -197,6 +217,12 @@ def connect_external_needs(app: Sphinx, config: Config): for e in external_needs: assert not e.path_to_target # path_to_target is always empty - assert e.target == "needs_json" - add_external_needs_json(e, app.config) + if e.target == "needs_json": + add_external_needs_json(e, app.config) + elif e.target == "docs_sources": + add_external_docs_sources(e, app.config) + else: + raise ValueError( + f"Internal Error. Unknown external needs target: {e.target}" + ) diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index 815062d5..0a4ea5a2 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -25,6 +25,7 @@ "score_source_code_linker", "score_draw_uml_funcs", "score_layout", + "sphinx_collections", "sphinxcontrib.mermaid", ] diff --git a/src/requirements.in b/src/requirements.in index e2e58dc8..e18b3513 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -4,6 +4,9 @@ Sphinx # https://github.com/useblocks/sphinx-needs/issues/1350 sphinx-needs>=4.2.0 +# Due to needed bugfix in 0.3.1 +sphinx-collections>=0.3.1 + sphinxcontrib-plantuml pydata-sphinx-theme sphinx-design diff --git a/src/requirements.txt b/src/requirements.txt index 2d11788d..0b33fb5d 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -426,6 +426,14 @@ fonttools==4.59.1 \ --hash=sha256:efbec204fa9f877641747f2d9612b2b656071390d7a7ef07a9dbf0ecf9c7195c \ --hash=sha256:fb13823a74b3a9204a8ed76d3d6d5ec12e64cc5bc44914eb9ff1cdac04facd43 # via matplotlib +gitdb==4.0.12 \ + --hash=sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571 \ + --hash=sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf + # via gitpython +gitpython==3.1.45 \ + --hash=sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c \ + --hash=sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77 + # via sphinx-collections h11==0.16.0 \ --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \ --hash=sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86 @@ -452,6 +460,7 @@ jinja2==3.1.6 \ # via # myst-parser # sphinx + # sphinx-collections jsonschema==4.25.1 \ --hash=sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63 \ --hash=sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85 @@ -805,6 +814,7 @@ packaging==25.0 \ # matplotlib # pytest # sphinx + # sphinx-collections pillow==11.3.0 \ --hash=sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2 \ --hash=sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214 \ @@ -1277,6 +1287,10 @@ six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 # via python-dateutil +smmap==5.0.2 \ + --hash=sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5 \ + --hash=sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e + # via gitdb sniffio==1.3.1 \ --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc @@ -1298,6 +1312,7 @@ sphinx==8.2.3 \ # myst-parser # pydata-sphinx-theme # sphinx-autobuild + # sphinx-collections # sphinx-data-viewer # sphinx-design # sphinx-needs @@ -1308,6 +1323,10 @@ sphinx-autobuild==2024.10.3 \ --hash=sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa \ --hash=sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1 # via -r src/requirements.in +sphinx-collections==0.3.1 \ + --hash=sha256:4dda762479d2ad2163ccb074b15f36f72810d9cd08be4daa69854a6e34c99f92 \ + --hash=sha256:fb93b979cc9275bd2ad980a71fd57be5521c0f879f90f8189917a8f7ca0436ab + # via -r src/requirements.in sphinx-data-viewer==0.1.5 \ --hash=sha256:a7d5e58613562bb745380bfe61ca8b69997998167fd6fa9aea55606c9a4b17e4 \ --hash=sha256:b74b1d304c505c464d07c7b225ed0d84ea02dcc88bc1c49cdad7c2275fbbdad4 From bb36f74f177b3decc195f59529a0be49f21218a7 Mon Sep 17 00:00:00 2001 From: Marco Heinemann Date: Fri, 7 Nov 2025 20:11:07 +0100 Subject: [PATCH 152/231] =?UTF-8?q?=E2=9C=A8=20Introduce=20needs-config-wr?= =?UTF-8?q?iter=20(#283)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Users of docs will get a ubproject.toml generated into their confdir. This is a declarative duplicate of all the dynamically generated Sphinx-Needs configuration. It makes it possible to use tools such as ubCode or ubc without any further setup. --- .gitignore | 1 + docs/how-to/faq.md | 12 +++- docs/internals/extensions/index.rst | 16 ++++- docs/internals/extensions/sync_toml.rst | 39 +++++++++++ .../score_metamodel/external_needs.py | 4 +- src/extensions/score_sphinx_bundle/BUILD | 1 + .../score_sphinx_bundle/__init__.py | 2 + src/extensions/score_sync_toml/BUILD | 28 ++++++++ src/extensions/score_sync_toml/__init__.py | 64 +++++++++++++++++++ src/extensions/score_sync_toml/shared.toml | 20 ++++++ src/requirements.in | 5 ++ src/requirements.txt | 57 ++++++++++++++++- 12 files changed, 241 insertions(+), 8 deletions(-) create mode 100644 docs/internals/extensions/sync_toml.rst create mode 100644 src/extensions/score_sync_toml/BUILD create mode 100644 src/extensions/score_sync_toml/__init__.py create mode 100644 src/extensions/score_sync_toml/shared.toml diff --git a/.gitignore b/.gitignore index 8ec9ef6d..b4c5bb79 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ user.bazelrc # docs build artifacts /_build* +docs/ubproject.toml # Vale - editorial style guide .vale.ini diff --git a/docs/how-to/faq.md b/docs/how-to/faq.md index 11c2f3b4..dbfc5bdb 100644 --- a/docs/how-to/faq.md +++ b/docs/how-to/faq.md @@ -34,6 +34,12 @@ If this does not work, please use the live preview feature: `bazel run //:live_p This provides immediate metamodel feedback (although only on the console) and IDE-agnostic preview capabilities. +Another option is to use the [ubCode](https://ubcode.useblocks.com) extension for +VS Code. It provides lightning-fast linting, formatting, previewing, navigation and +analytical features for large Sphinx and Sphinx-Needs projects. +The `ubproject.toml` configuration file gets generated automatically when a Sphinx +build is running once. The file can safely be committed to the repository to enable +IDE support without requiring a Sphinx build first. ### Esbonio @@ -46,13 +52,13 @@ Known issues: For now please run `bazel run //:ide_support` and restart VS Code. -### uBc +### ubc -Currently, uBc is not aware of our metamodel. As a result, checks and auto-completion +Currently, ubc is not aware of our metamodel. As a result, checks and auto-completion features are not available. We plan to explore improvements in this area in the future together with useblocks. - +These features are on the immediate roadmap. ## Do we need to write custom Python code for every Metamodel check? diff --git a/docs/internals/extensions/index.rst b/docs/internals/extensions/index.rst index 38e9f17a..2ecec2d2 100644 --- a/docs/internals/extensions/index.rst +++ b/docs/internals/extensions/index.rst @@ -21,7 +21,7 @@ Extensions Hello there -.. grid:: 1 1 3 3 +.. grid:: 1 1 3 3 :class-container: score-grid .. grid-item-card:: @@ -52,16 +52,25 @@ Hello there RST Filebased testing ^^^ A new testing approach that we have integrated. It makes it easy to ensure that the metamodel and it's checks - work as intended. Create new checks simply by writing RST files. + work as intended. Create new checks simply by writing RST files. Head over to :ref:`File Based Testing ` to learn more. .. grid-item-card:: Extension Guide ^^^ - Want to learn how to write your own sphinx extension, or see how others have done it? + Want to learn how to write your own sphinx extension, or see how others have done it? Head over to :ref:`Building an Extension` to dive in. + .. grid-item-card:: + + Sync TOML + ^^^ + Learn about the :ref:`config sync ` extension that generates the + ``ubproject.toml`` file needed by the + `ubCode `__ VS Code extension. + Getting IDE support for Sphinx-Needs in a Bazel context made easy. + .. toctree:: @@ -73,3 +82,4 @@ Hello there Header Service Source Code Linker Extension Guide + Sync TOML diff --git a/docs/internals/extensions/sync_toml.rst b/docs/internals/extensions/sync_toml.rst new file mode 100644 index 00000000..96a494d7 --- /dev/null +++ b/docs/internals/extensions/sync_toml.rst @@ -0,0 +1,39 @@ +.. _`toml_sync`: + +ubproject.toml sync +=================== + +The extension ``score_sync_toml`` provides configuration for the newly developed +Sphinx extension `needs-config-writer `__. + +The extension allows to write a declarative configuration file ``ubproject.toml`` +that is required by the `ubCode VS Code extension `__ +and its companion CLI app `ubc `__ +to get hold of the Sphinx-Needs configuration. + +The solution outlined here can serve as a template for how to build IDE extensions +in a Bazel environment. IDE extensions need to know the configuration and file sets +that are part of the project. This information is hidden in the build system, so a call +is needed to extract this information and make it available to the IDE extension. + +The basic idea is to stay with the programmed configuration system for Sphinx and +Sphinx-Needs as it exists in S-CORE, but use it to generate the ``ubproject.toml`` file. +The ``ubproject.toml`` file is generated into the directory holding the ``conf.py`` file +(called ``confdir`` in Sphinx) and should be checked into the version control system +alongside ``conf.py``. + +The ``ubproject.toml`` file is generated on each Sphinx build, so any changes to the +Sphinx-Needs configuration are automatically reflected in the generated file. +If changes are detected, a warning is emitted during the Sphinx build to remind the user +to commit the updated ``ubproject.toml`` file. Changes may occur because docs-as-code +updated a configuration or a new Sphinx-Needs version added configuration. + +Committing the generated ``ubproject.toml`` file allows the IDE extension to work +without requiring any Sphinx build to be performed first. For a fully complete network +of need items, required external or imported ``needs.json`` files must first be +generated by Bazel. + +The command line tool ``ubc`` uses the same configuration as ``ubCode`` and can be +used to lint and format all RST files in any of the S-CORE documentations. +``ubc`` also has features for exporting traceability data to specific file formats or +performing diff and impact analysis. diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index a9f85c87..8e8208a0 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -179,7 +179,9 @@ def add_external_needs_json(e: ExternalNeedsSource, config: Config): ) # Attempt to continue, exit code will be non-zero after a logged error anyway. return - + # this sets the default value - required for the needs-config-writer + # setting 'needscfg_exclude_defaults = True' to see the diff + config.needs_external_needs = [] assert isinstance(config.needs_external_needs, list) # pyright: ignore[reportUnknownMemberType] config.needs_external_needs.append( # pyright: ignore[reportUnknownMemberType] { diff --git a/src/extensions/score_sphinx_bundle/BUILD b/src/extensions/score_sphinx_bundle/BUILD index c7f96d0d..10aa50a3 100644 --- a/src/extensions/score_sphinx_bundle/BUILD +++ b/src/extensions/score_sphinx_bundle/BUILD @@ -24,6 +24,7 @@ py_library( "@score_docs_as_code//src/extensions/score_layout", "@score_docs_as_code//src/extensions/score_metamodel", "@score_docs_as_code//src/extensions/score_source_code_linker", + "@score_docs_as_code//src/extensions/score_sync_toml", "@score_docs_as_code//src/find_runfiles", "@score_docs_as_code//src/helper_lib", ], diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index 0a4ea5a2..5f8318e6 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -27,6 +27,8 @@ "score_layout", "sphinx_collections", "sphinxcontrib.mermaid", + "needs_config_writer", + "score_sync_toml", ] diff --git a/src/extensions/score_sync_toml/BUILD b/src/extensions/score_sync_toml/BUILD new file mode 100644 index 00000000..e9be6926 --- /dev/null +++ b/src/extensions/score_sync_toml/BUILD @@ -0,0 +1,28 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@aspect_rules_py//py:defs.bzl", "py_library") +load("@pip_process//:requirements.bzl", "requirement") + +py_library( + name = "score_sync_toml", + srcs = [ + "__init__.py", + "shared.toml", + ], + imports = ["."], + visibility = ["//visibility:public"], + deps = [ + requirement("sphinx"), + requirement("needs-config-writer"), + ], +) diff --git a/src/extensions/score_sync_toml/__init__.py b/src/extensions/score_sync_toml/__init__.py new file mode 100644 index 00000000..f767e81d --- /dev/null +++ b/src/extensions/score_sync_toml/__init__.py @@ -0,0 +1,64 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +from pathlib import Path + +from sphinx.application import Sphinx + + +def setup(app: Sphinx) -> dict[str, str | bool]: + """ + Extension to configure needs-config-writer for syncing needs configuration to TOML. + + See https://needs-config-writer.useblocks.com + """ + + app.config.needscfg_outpath = "ubproject.toml" + """Write to the confdir directory.""" + + app.config.needscfg_overwrite = True + """Any changes to the shared/local configuration updates the generated config.""" + + app.config.needscfg_write_all = True + """Write full config, so the final configuration is visible in one file.""" + + app.config.needscfg_exclude_defaults = True + """Exclude default values from the generated configuration.""" + + app.config.needscfg_warn_on_diff = True + """Running Sphinx with -W will fail the CI for uncommitted TOML changes.""" + + app.config.needscfg_merge_toml_files = [ + str(Path(__file__).parent / "shared.toml"), + ] + """Merge the static TOML file into the generated configuration.""" + + app.config.needscfg_relative_path_fields = [ + "needs_external_needs[*].json_path", + { + "field": "needs_flow_configs.score_config", + "prefix": "!include ", + }, + ] + """Relative paths to confdir for Bazel provided absolute paths.""" + + app.config.suppress_warnings += [ + "needs_config_writer.unsupported_type", + "needs_config_writer.path_conversion", + ] + # TODO remove the suppress_warnings once fixed + + return { + "version": "0.1", + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/src/extensions/score_sync_toml/shared.toml b/src/extensions/score_sync_toml/shared.toml new file mode 100644 index 00000000..9daf6ba0 --- /dev/null +++ b/src/extensions/score_sync_toml/shared.toml @@ -0,0 +1,20 @@ +[parse.extend_directives.grid] +argument = true +options = true +content = true +parse_content = true +content_required = true + +[parse.extend_directives.grid-item-card] +argument = false +options = true +content = true +parse_content = true +content_required = true + +[parse.extend_directives.uml] +argument = true +options = true +content = true +parse_content = false +content_required = false diff --git a/src/requirements.in b/src/requirements.in index e18b3513..789af6b1 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -25,3 +25,8 @@ esbonio<1 # Although not required in all targets, we want pytest within ide_support to run tests from the IDE. debugpy rich + +# write out dynamically assembled Sphinx-Needs configuration in TOML format +needs-config-writer == 0.2.4 +# use this for a specific commit for fast development iterations +# needs-config-writer @ https://github.com/useblocks/needs-config-writer/archive/032a5f8.zip diff --git a/src/requirements.txt b/src/requirements.txt index 0b33fb5d..a405c168 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -715,6 +715,10 @@ myst-parser==4.0.1 \ --hash=sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4 \ --hash=sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d # via -r src/requirements.in +needs-config-writer==0.2.4 \ + --hash=sha256:0f0702574081bb8ed7d896aadfb73c0e48af099dc0d4227cc2bac957ed8ea4f6 \ + --hash=sha256:7c89375848c822e891b3cca48783f3cc3f7cbd3c02cba19418de146ca077f212 + # via -r src/requirements.in nodejs-wheel-binaries==22.16.0 \ --hash=sha256:2728972d336d436d39ee45988978d8b5d963509e06f063e80fe41b203ee80b28 \ --hash=sha256:2fffb4bf1066fb5f660da20819d754f1b424bca1b234ba0f4fa901c52e3975fb \ @@ -1310,6 +1314,7 @@ sphinx==8.2.3 \ # -r src/requirements.in # esbonio # myst-parser + # needs-config-writer # pydata-sphinx-theme # sphinx-autobuild # sphinx-collections @@ -1338,7 +1343,9 @@ sphinx-design==0.6.1 \ sphinx-needs[plotting]==5.1.0 \ --hash=sha256:23a0ca1dfe733a0a58e884b59ce53a8b63a530f0ac87ae5ab0d40f05f853fbe7 \ --hash=sha256:7adf3763478e91171146918d8af4a22aa0fc062a73856f1ebeb6822a62cbe215 - # via -r src/requirements.in + # via + # -r src/requirements.in + # needs-config-writer sphinxcontrib-applehelp==2.0.0 \ --hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \ --hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5 @@ -1378,6 +1385,54 @@ starlette==0.47.2 \ --hash=sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8 \ --hash=sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b # via sphinx-autobuild +tomli==2.3.0 \ + --hash=sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456 \ + --hash=sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845 \ + --hash=sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999 \ + --hash=sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0 \ + --hash=sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878 \ + --hash=sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf \ + --hash=sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3 \ + --hash=sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be \ + --hash=sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52 \ + --hash=sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b \ + --hash=sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67 \ + --hash=sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549 \ + --hash=sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba \ + --hash=sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22 \ + --hash=sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c \ + --hash=sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f \ + --hash=sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6 \ + --hash=sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba \ + --hash=sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45 \ + --hash=sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f \ + --hash=sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77 \ + --hash=sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606 \ + --hash=sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441 \ + --hash=sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0 \ + --hash=sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f \ + --hash=sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530 \ + --hash=sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05 \ + --hash=sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8 \ + --hash=sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005 \ + --hash=sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879 \ + --hash=sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae \ + --hash=sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc \ + --hash=sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b \ + --hash=sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b \ + --hash=sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e \ + --hash=sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf \ + --hash=sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac \ + --hash=sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8 \ + --hash=sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b \ + --hash=sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf \ + --hash=sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463 \ + --hash=sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876 + # via needs-config-writer +tomli-w==1.2.0 \ + --hash=sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90 \ + --hash=sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021 + # via needs-config-writer typing-extensions==4.14.1 \ --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \ --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76 From 19e57a308ddcf61e481e98cc523407346d0438b2 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Mon, 10 Nov 2025 11:33:40 +0100 Subject: [PATCH 153/231] set version and fix command name (#294) Co-authored-by: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> --- MODULE.bazel | 4 ++-- docs.bzl | 2 +- src/extensions/score_sphinx_bundle/__init__.py | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index cc37719f..07459daa 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,8 +13,8 @@ module( name = "score_docs_as_code", - version = "1.5.0", - compatibility_level = 1, + version = "2.0.0", + compatibility_level = 2, ) ############################################################################### diff --git a/docs.bzl b/docs.bzl index 8de8b2dc..00f1c676 100644 --- a/docs.bzl +++ b/docs.bzl @@ -116,7 +116,7 @@ def docs(source_dir = "docs", data = [], deps = []): ) py_binary( - name = "docs_combo", + name = "docs_combo_experimental", tags = ["cli_help=Build full documentation with all dependencies:\nbazel run //:docs_combo_experimental"], srcs = ["@score_docs_as_code//src:incremental.py"], data = data_with_docs_sources, diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index 5f8318e6..222d2447 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -69,6 +69,8 @@ def setup(app: Sphinx) -> dict[str, object]: return { "version": "0.1", + # Keep this in sync with the score_docs_as_code version in MODULE.bazel + "env_version": 200, # 2.0.0 "parallel_read_safe": True, "parallel_write_safe": True, } From a5f3fb646b1f7fd9853afb5ece88d60c58839f78 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 11 Nov 2025 12:07:12 +0100 Subject: [PATCH 154/231] Add Nikola as code owner (#299) --- .github/CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6918bf3c..8db10ee0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,4 +6,5 @@ # For more information about CODEOWNERS, see: # https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners -* @AlexanderLanin @MaximilianSoerenPollak @dcalavrezo-qorix +# https://github.com/orgs/eclipse-score/teams/infrastructure-maintainers +* @AlexanderLanin @MaximilianSoerenPollak @dcalavrezo-qorix @nradakovic From 87b43e172765b899a6e37fe30422ed6439437519 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 11 Nov 2025 12:07:32 +0100 Subject: [PATCH 155/231] Fix: allow multiple dependencies + bump version (#298) --- MODULE.bazel | 2 +- src/extensions/score_metamodel/external_needs.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 07459daa..3f9eeefc 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "2.0.0", + version = "2.0.1", compatibility_level = 2, ) diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index 8e8208a0..0c48a56a 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -179,9 +179,6 @@ def add_external_needs_json(e: ExternalNeedsSource, config: Config): ) # Attempt to continue, exit code will be non-zero after a logged error anyway. return - # this sets the default value - required for the needs-config-writer - # setting 'needscfg_exclude_defaults = True' to see the diff - config.needs_external_needs = [] assert isinstance(config.needs_external_needs, list) # pyright: ignore[reportUnknownMemberType] config.needs_external_needs.append( # pyright: ignore[reportUnknownMemberType] { @@ -217,6 +214,10 @@ def connect_external_needs(app: Sphinx, config: Config): external_needs = get_external_needs_source(app.config.external_needs_source) + # this sets the default value - required for the needs-config-writer + # setting 'needscfg_exclude_defaults = True' to see the diff + config.needs_external_needs = [] + for e in external_needs: assert not e.path_to_target # path_to_target is always empty From 60db8fef1670c3dcbbe372fbba854f50ad61c1a4 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 12 Nov 2025 12:04:06 +0100 Subject: [PATCH 156/231] Bump version to 2.0.2 and update score_process dependency to 1.3.1 (#301) --- MODULE.bazel | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 3f9eeefc..24273cae 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "2.0.1", + version = "2.0.2", compatibility_level = 2, ) @@ -97,7 +97,9 @@ http_file( # Checker rule for CopyRight checks/fixes # docs dependency -bazel_dep(name = "score_process", version = "1.2.0") +# Note: requirements were last aligned with 1.2.0, +# the switch to 1.3.1 is purely to drop the dependency on docs-as-code 1.x. +bazel_dep(name = "score_process", version = "1.3.1") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") From 3a00e3dce811845d8562f142d6af1a8583e8df38 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Wed, 12 Nov 2025 12:28:15 +0100 Subject: [PATCH 157/231] color change to red border for architecture elements in diagrams which are ASIL_B (#300) Signed-off-by: jhr2hi@bosch.com --- src/extensions/score_draw_uml_funcs/helpers.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index 1ef552dc..11a2e5fe 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -98,9 +98,7 @@ def gen_header() -> str: def gen_sytle_header() -> str: """Create PlantUML Header for Style definition""" - return ( - """""" + "\n" - ) + return """""" + "\n" ######################################################################## From 8fe17a3e3b31a804417264a948ba2739095be3a9 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Mon, 17 Nov 2025 12:46:39 +0100 Subject: [PATCH 158/231] documentation for cross-referencing between modules (#302) Signed-off-by: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> --- docs/concepts/bidirectional_traceability.rst | 105 ++++++++++++++++++ docs/concepts/index.rst | 11 ++ docs/how-to/get_started.rst | 12 ++ docs/how-to/index.rst | 10 +- docs/how-to/other_modules.rst | 64 +++++++++++ docs/how-to/setup.md | 26 +++-- docs/how-to/source_to_doc_links.rst | 15 +++ docs/how-to/test_to_doc_links.rst | 34 ++++++ docs/how-to/write_docs.rst | 11 ++ docs/index.rst | 18 +-- .../001-test-results-in-workflow.md | 0 .../index.rst | 0 docs/internals/index.rst | 5 +- .../requirements/capabilities.md | 0 docs/{ => internals}/requirements/index.rst | 0 .../requirements/process_overview.rst | 0 .../requirements/requirements.rst | 0 .../requirements/test_overview.rst | 0 docs/reference/bazel_macros.rst | 54 +++++++++ docs/{how-to => reference}/commands.md | 0 docs/reference/index.rst | 12 ++ .../generate_source_code_links_json.py | 12 +- 22 files changed, 365 insertions(+), 24 deletions(-) create mode 100644 docs/concepts/bidirectional_traceability.rst create mode 100644 docs/concepts/index.rst create mode 100644 docs/how-to/get_started.rst create mode 100644 docs/how-to/other_modules.rst create mode 100644 docs/how-to/source_to_doc_links.rst create mode 100644 docs/how-to/test_to_doc_links.rst create mode 100644 docs/how-to/write_docs.rst rename docs/internals/{decisions_and_concepts => decisions}/001-test-results-in-workflow.md (100%) rename docs/internals/{decisions_and_concepts => decisions}/index.rst (100%) rename docs/{ => internals}/requirements/capabilities.md (100%) rename docs/{ => internals}/requirements/index.rst (100%) rename docs/{ => internals}/requirements/process_overview.rst (100%) rename docs/{ => internals}/requirements/requirements.rst (100%) rename docs/{ => internals}/requirements/test_overview.rst (100%) create mode 100644 docs/reference/bazel_macros.rst rename docs/{how-to => reference}/commands.md (100%) create mode 100644 docs/reference/index.rst diff --git a/docs/concepts/bidirectional_traceability.rst b/docs/concepts/bidirectional_traceability.rst new file mode 100644 index 00000000..7255bc6d --- /dev/null +++ b/docs/concepts/bidirectional_traceability.rst @@ -0,0 +1,105 @@ +.. _bidirectional_traceability: + +Bi-directional Traceability +=========================== + +The S-CORE project uses a multi-repository layout for source and documentation. +To satisfy standards such as ASPICE and to support engineers and reviewers, documentation must provide reliable bi-directional traceability between needs, requirements, design artifacts and code. + +Traceability in docs means two things in practice, e.g. if one requirement satisfies another one: + +- forward links: from satisfier to satisfied need/requirement. +- backward links: satisfied requirement references all requirements it is satisfied by. + +We support two complimentary strategies for providing these links in the published documentation: +build-with-links (fast build but only forward links) and +build-with-copies (self-contained release docs with backward links). +The Bazel-based doc build exposes both approaches via dedicated targets. + +Strategies: links vs copies +--------------------------- + +Build with links +~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + bazel run //:docs + +The documentation build depends on the needs.json files from other modules. +The built site contains hyperlinks that point at the other repositories' documentation at https://eclipse-score.github.io. +For individual modules this means the build is relatively fast and this can be done in every pull request. + +The tradeoff is that the target of the hyperlink is unaware. +That other module's need elements will not have backlinks. +At least not immediately. +In a later revision they can update their dependency on the first module and then the references are updated in their documentation. + +Build with copies +~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + bazel run //:docs_combo_experimental + +The documentation build does not depend on the needs.json but on whole documentation source code. + +Using `sphinx_collections `_ +not just the current module is built but all referenced modules are included. + +The advantage is that the produced documentation is consistent and stays that way. +There is no outwards hyperlink which could break or be outdated. + +The tradeoff is that build takes longer and the output needs more space. +At the very least for release builds this is acceptable. + + +Module links vs Bazel target deps +--------------------------------- + +Remember: Bazel target dependencies must not be circular. +Module and document references may be circular. +Use the combo build and the copy strategy to produce release documentation that contains all needed pages while keeping Bazel's graph acyclic. + + + +.. plantuml:: + + @startuml + ' Overall component style + skinparam componentStyle rectangle + + ' Module-level (conceptual links can be circular) + component "<> @A" as MA + component "<> @B" as MB + ' Style bazel_dep edges: strong red, solid + MA =[#DarkRed]=> MB : bazel_dep + MB =[#DarkRed]=> MA : bazel_dep + + ' Build-level (Bazel targets must be acyclic) + usecase "<> @A:needs_json" as AT + usecase "<> @B:needs_json" as BT + ' Style depends edge: blue dashed + AT .[#DodgerBlue].> BT : depends + ' Note: no BT --> AT allowed + + ' Modules provide the targets used by the build + MA -[#ForestGreen]-> AT : provides + MB -[#ForestGreen]-> BT : provides + + note left of MA + Module-level + references may be + bi-directional + end note + + note right of BT + Bazel target deps + must be acyclic + end note + + @enduml + +The diagram above shows the difference between module-level references (which may be circular) and Bazel target dependencies (which must remain acyclic). +Module A and Module B may reference each other in documentation or design (bi-directional links). +Their corresponding Bazel targets must be arranged so the build dependency graph has no cycles. diff --git a/docs/concepts/index.rst b/docs/concepts/index.rst new file mode 100644 index 00000000..6357bdf5 --- /dev/null +++ b/docs/concepts/index.rst @@ -0,0 +1,11 @@ +.. _concepts: + +Concepts +======== + +Here you find explanations how and why docs-as-code works the way it does. + +.. toctree:: + :maxdepth: 1 + + bidirectional_traceability diff --git a/docs/how-to/get_started.rst b/docs/how-to/get_started.rst new file mode 100644 index 00000000..d76a4671 --- /dev/null +++ b/docs/how-to/get_started.rst @@ -0,0 +1,12 @@ +Getting started +=============== + +In an existing S-CORE repository, you can build the documentation using Bazel: + +.. code-block:: bash + + bazel run //:docs + +Open the generated site at ``_build/index.html`` in your browser. + +In a new S-CORE repository, see :ref:`setup`. diff --git a/docs/how-to/index.rst b/docs/how-to/index.rst index a7c8236f..4bc0ae7b 100644 --- a/docs/how-to/index.rst +++ b/docs/how-to/index.rst @@ -3,9 +3,15 @@ How To ====== +Here you find practical guides on how to use docs-as-code. + .. toctree:: :maxdepth: 1 - faq - commands + get_started setup + write_docs + faq + other_modules + source_to_doc_links + test_to_doc_links diff --git a/docs/how-to/other_modules.rst b/docs/how-to/other_modules.rst new file mode 100644 index 00000000..cb35c51f --- /dev/null +++ b/docs/how-to/other_modules.rst @@ -0,0 +1,64 @@ +Reference Other Modules +======================= + +This document explains how to enable cross-module (bi-directional) linking between documentation modules with Sphinx-Needs in this project. +In short: + +1. Make the other module available to Bazel via the `MODULE` (aka `MODULE.bazel`) file. +2. Add the external module's documentation targets to your `docs(data=[...])` target so Sphinx can see the other module's built inventory. +3. Reference remote needs using the normal Sphinx-Needs referencing syntax. + +Details and Example +------------------- + +1) Include the other module in `MODULE.bazel` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The consumer module must declare the other modules as dependencies in the `MODULE.bazel` file so Bazel can fetch them. +There are multiple ways to do this depending on how you manage third-party/local modules (git, local overrides, etc.). + +A minimal example (add or extend the existing `bazel_deps` stanza): + +.. code-block:: starlark + + bazel_dep(name = "score_process", version = "1.3.0") + +2) Extend your `docs` rule so Sphinx picks up the other module's inventory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The documentation build in this project is exposed via a Bazel macro/rule that accepts a `data` parameter. +Add the external module's ``:needs_json`` target to that list +to have their needs elements available for cross-referencing. + +Example `BUILD` snippet (consumer module): + +.. code-block:: starlark + + load("@rules_docs//:docs.bzl", "docs") + docs( + data = [ + "@score_process//:needs_json", + ], + source_dir = "docs", + ) + +More details in :ref:`bidirectional_traceability`. + +3) Reference needs across modules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once the other module's are defined as dependencies as explained above, you can reference their needs IDs in the usual Sphinx-Needs way. +The important part is that the inventory name that Sphinx-Needs looks up matches the module that produced the needs entries. + +Example in reStructuredText: + +.. code-block:: rst + + See the requirement :need:`gd_req__req_traceability`, for example. + +Which results in: + + See the requirement :need:`gd_req__req_traceability`, for example. + +See the `Sphinx-Needs documentation `_ +for more details on cross-referencing needs. diff --git a/docs/how-to/setup.md b/docs/how-to/setup.md index b82fd5b2..7fe573a2 100644 --- a/docs/how-to/setup.md +++ b/docs/how-to/setup.md @@ -1,7 +1,6 @@ -(getting_started)= +(setup)= # Setup -⚠️ Only valid for docs-as-code v1.x.x. ## Overview @@ -19,7 +18,7 @@ designed to enhance documentation capabilities in S-CORE. ## Installation -### 1. /MODULE.bazel file +### 1. MODULE.bazel file Add the module to your `MODULE.bazel` file: @@ -34,9 +33,20 @@ common --registry=https://raw.githubusercontent.com/eclipse-score/bazel_registry common --registry=https://bcr.bazel.build ``` -______________________________________________________________________ +### 2. .bazelrc file -### 2. /BUILD file +Since we use `PlantUML `_ for diagrams, we need some Java. +If there is no Java on your system, Bazel can download a remote JDK for you +but that requires some configuration in your `.bazelrc` file: + +``` +build --java_language_version=17 +build --java_runtime_version=remotejdk_17 +build --tool_java_language_version=17 +build --tool_java_runtime_version=remotejdk_17 +``` + +### 3. BUILD file ```starlark @@ -61,18 +71,18 @@ The `docs()` macro accepts the following arguments: | `data` | List of `needs_json` targets that should be included in the documentation| No | -### 3. Copy conf.py +### 4. Copy conf.py Copy the `conf.py` file from the `docs-as-code` module to your `source_dir`. -#### 4. Run a documentation build: +#### 5. Run a documentation build: ```bash bazel run //:docs ``` -#### 5. Access your documentation at +#### 6. Access your documentation at `/_build/index.html` diff --git a/docs/how-to/source_to_doc_links.rst b/docs/how-to/source_to_doc_links.rst new file mode 100644 index 00000000..f36866a3 --- /dev/null +++ b/docs/how-to/source_to_doc_links.rst @@ -0,0 +1,15 @@ +Reference Docs in Source Code +============================= + +In your C++/Rust/Python source code, you want to reference requirements (needs). +The docs-as-code tool will create backlinks in the documentation. + +Use a comment and start with ``req-Id:`` or ``req-traceability:`` followed by the need ID. + +.. code-block:: python + + # req-Id: TOOL_REQ__EXAMPLE_ID + # req-traceability: TOOL_REQ__EXAMPLE_ID + +For an example, look at the attribute ``source_code_link`` +of :need:`tool_req__docs_common_attr_title`. diff --git a/docs/how-to/test_to_doc_links.rst b/docs/how-to/test_to_doc_links.rst new file mode 100644 index 00000000..82dd7904 --- /dev/null +++ b/docs/how-to/test_to_doc_links.rst @@ -0,0 +1,34 @@ +Reference Docs in Tests +======================= + +In tests, you want to reference requirements (needs). +The docs-as-code tool will create backlinks in the documentation. + +Docs-as-code parses `test.xml` files produced by Bazel under `bazel-testlogs/`. +To attach metadata to tests use the project tooling decorator (provided by the +attribute plugin). Example usage: + +.. code-block:: python + + from attribute_plugin import add_test_properties + + @add_test_properties( + partially_verifies=["tool_req__docs_common_attr_title", "tool_req__docs_common_attr_description"], + test_type="interface-test", + derivation_technique="boundary-values", + ) + def test_feature(): + """Short description of what the test does.""" + ... + +TestLink will extract test name, file, line, result and verification lists +(`PartiallyVerifies`, `FullyVerifies`) and create external needs from tests +and `testlink` attributes on requirements that reference the test. + + +Limitations +----------- + +- Not compatible with Esbonio/Live_preview. +- Tags and XML must match the expected format exactly for parsing to work. +- Tests must be executed by Bazel first so `test.xml` files exist. diff --git a/docs/how-to/write_docs.rst b/docs/how-to/write_docs.rst new file mode 100644 index 00000000..034a322b --- /dev/null +++ b/docs/how-to/write_docs.rst @@ -0,0 +1,11 @@ +Write Documentation +=================== + +`Sphinx `_: +the documentation generator we use. + +`reStructuredText (reST) `_: +the plain-text markup language used for most source files in this project. + +`Sphinx-Needs `_: +a Sphinx extension that models requirements, tests, tasks and other "needs" inside the docs. diff --git a/docs/index.rst b/docs/index.rst index 8edeca5a..78564f8a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,10 +12,10 @@ # ******************************************************************************* Docs-As-Code -===================== - -Welcome to the docs-as-code documentation, it is organized into several key sections. +============ +This is the tool / toolchain for documentation in the S-CORE ecosystem. +It provides documentation, requirements, and traceability. .. grid:: 1 1 3 3 :class-container: score-grid @@ -29,15 +29,16 @@ Welcome to the docs-as-code documentation, it is organized into several key sect .. grid-item-card:: - :ref:`Internals ` + :ref:`Reference ` ^^^ - How does docs-as-code work? This section provides an overview of the architecture and design decisions behind the tooling. + API and usage reference. + .. grid-item-card:: - :ref:`Requirements ` + :ref:`Concepts ` ^^^ - The official tool requirements for docs-as-code. + Key concepts, models and explanatory material to understand the system. .. if-collection:: score_process @@ -59,8 +60,9 @@ Welcome to the docs-as-code documentation, it is organized into several key sect :titlesonly: how-to/index + reference/index + concepts/index internals/index - requirements/index docs-as-code is based on score_process: diff --git a/docs/internals/decisions_and_concepts/001-test-results-in-workflow.md b/docs/internals/decisions/001-test-results-in-workflow.md similarity index 100% rename from docs/internals/decisions_and_concepts/001-test-results-in-workflow.md rename to docs/internals/decisions/001-test-results-in-workflow.md diff --git a/docs/internals/decisions_and_concepts/index.rst b/docs/internals/decisions/index.rst similarity index 100% rename from docs/internals/decisions_and_concepts/index.rst rename to docs/internals/decisions/index.rst diff --git a/docs/internals/index.rst b/docs/internals/index.rst index 8df6d8cd..aaa703aa 100644 --- a/docs/internals/index.rst +++ b/docs/internals/index.rst @@ -3,9 +3,12 @@ Internals ========= +This section is not relevant for users of docs-as-code but for developers extending or maintaining it. + .. toctree:: :maxdepth: 1 extensions/index benchmark_results - decisions_and_concepts/index + decisions/index + requirements/index diff --git a/docs/requirements/capabilities.md b/docs/internals/requirements/capabilities.md similarity index 100% rename from docs/requirements/capabilities.md rename to docs/internals/requirements/capabilities.md diff --git a/docs/requirements/index.rst b/docs/internals/requirements/index.rst similarity index 100% rename from docs/requirements/index.rst rename to docs/internals/requirements/index.rst diff --git a/docs/requirements/process_overview.rst b/docs/internals/requirements/process_overview.rst similarity index 100% rename from docs/requirements/process_overview.rst rename to docs/internals/requirements/process_overview.rst diff --git a/docs/requirements/requirements.rst b/docs/internals/requirements/requirements.rst similarity index 100% rename from docs/requirements/requirements.rst rename to docs/internals/requirements/requirements.rst diff --git a/docs/requirements/test_overview.rst b/docs/internals/requirements/test_overview.rst similarity index 100% rename from docs/requirements/test_overview.rst rename to docs/internals/requirements/test_overview.rst diff --git a/docs/reference/bazel_macros.rst b/docs/reference/bazel_macros.rst new file mode 100644 index 00000000..d5f65fdf --- /dev/null +++ b/docs/reference/bazel_macros.rst @@ -0,0 +1,54 @@ +.. _bazel-macros: + +Bazel macro: ``docs`` +===================== + +The ``docs`` macro defined in ``docs.bzl`` is a convenience wrapper +that creates a small set of Bazel targets +to build, verify and preview the project's Sphinx documentation, +and to create a Python virtual environment for IDE support (Esbonio). + +See :doc:`commands ` for the targets/commands it creates. + +The macro must be called from the repository root package. + +Minimal example (root ``BUILD``) +-------------------------------- + +.. code-block:: python + + load("//:docs.bzl", "docs") + + docs( + source_dir = "docs", + data = [ + # labels to any extra tools or data you want included + # e.g. "//:needs_json" or other tool targets + ], + deps = [ + # additional bazel labels providing Python deps or other runfiles + ], + ) + +- ``source_dir`` (string, default: ``"docs"``) + Path (relative to repository root) to your Sphinx source directory. This is the folder + that contains your ``conf.py`` and the top-level ReST/markdown sources. + +- ``data`` (list of bazel labels) + Extra runfiles / data targets that should be made available to the documentation targets. + Typical entries are targets that generate or provide external data used by the docs, for + example a ``:needs_json`` producer. The items in ``data`` are added to the py_binaries and + to the Sphinx tooling so they are available at build time. + +- ``deps`` (list of bazel labels) + Additional Bazel dependencies to add to the Python binaries and the virtual environment + target. Use this to add project-specific Python packages or extension libraries the docs + build requires. + +Edge cases +---------- + +- If your Sphinx ``conf.py`` expects files generated by other Bazel targets, make sure those + targets are included in the ``data`` list so they are available to the build driver. +- The experimental "combo" targets rewrite some ``data`` labels for combined builds; those + are intended for advanced use and are optional for normal doc workflows. diff --git a/docs/how-to/commands.md b/docs/reference/commands.md similarity index 100% rename from docs/how-to/commands.md rename to docs/reference/commands.md diff --git a/docs/reference/index.rst b/docs/reference/index.rst new file mode 100644 index 00000000..140fb183 --- /dev/null +++ b/docs/reference/index.rst @@ -0,0 +1,12 @@ +.. _reference: + +Reference +========= + +Here you find the API and usage reference for docs-as-code. + +.. toctree:: + :maxdepth: 1 + + commands + bazel_macros diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py index 18f5ee28..abedc2db 100644 --- a/src/extensions/score_source_code_linker/generate_source_code_links_json.py +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -80,11 +80,13 @@ def iterate_files_recursively(search_path: Path): def _should_skip_file(file_path: Path) -> bool: """Check if a file should be skipped during scanning.""" # TODO: consider using .gitignore - return ( - file_path.is_dir() - or file_path.name.startswith((".", "_")) - or file_path.suffix in [".pyc", ".so", ".exe", ".bin"] - ) + if file_path.is_dir(): + return True + if file_path.suffix in [".pyc", ".so", ".exe", ".bin"]: + return True # skip binaries + if file_path.suffix in [".rst", ".md"]: + return True # skip documentation + return file_path.name.startswith((".", "_")) for root, dirs, files in os.walk(search_path): root_path = Path(root) From 14d18a25adbef7e0253eda8fd4fc73b59bcdcfa7 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Fri, 21 Nov 2025 15:46:23 +0100 Subject: [PATCH 159/231] Do not use sphinx-collections (#306) It breaks reference-integration because collections don't nest. See #305 --- docs/index.rst | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 78564f8a..046f4eef 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -41,17 +41,6 @@ It provides documentation, requirements, and traceability. Key concepts, models and explanatory material to understand the system. -.. if-collection:: score_process - - .. grid:: 1 1 3 3 - :class-container: score-grid - - .. grid-item-card:: - - :ref:`score_process <_collections/score_process/process/index>` - ^^^ - Documentation for the score_process that docs-as-code is based on, including backlinks to docs-as-code. - .. dropdown:: Sitemap .. toctree:: @@ -63,14 +52,3 @@ It provides documentation, requirements, and traceability. reference/index concepts/index internals/index - - docs-as-code is based on score_process: - - .. if-collection:: score_process - - .. toctree:: - :maxdepth: 5 - :includehidden: - :titlesonly: - - _collections/score_process/process/index From 703af50d6f565d8d9d6d2b874c42bedabdb5193f Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Tue, 25 Nov 2025 15:49:57 +0100 Subject: [PATCH 160/231] Process safety requirements (#307) * Better req overview * gd_req__saf_attr_aou already covered * gd_req__saf_attr_mandatory already covered * Cover gd_req__saf_linkage_safety requirement --- docs/internals/requirements/index.rst | 2 - .../requirements/process_overview.rst | 51 ++++++++++++++++--- docs/internals/requirements/requirements.rst | 36 +++++++++++-- 3 files changed, 75 insertions(+), 14 deletions(-) diff --git a/docs/internals/requirements/index.rst b/docs/internals/requirements/index.rst index fde7eb58..ea58f858 100644 --- a/docs/internals/requirements/index.rst +++ b/docs/internals/requirements/index.rst @@ -1,5 +1,3 @@ -.. _requirements: - Requirements ============ diff --git a/docs/internals/requirements/process_overview.rst b/docs/internals/requirements/process_overview.rst index 93962313..3225131c 100644 --- a/docs/internals/requirements/process_overview.rst +++ b/docs/internals/requirements/process_overview.rst @@ -4,26 +4,63 @@ Process Requirements Overview =============================== -Unsatisfied Tool Requirements in Process -######################################## +Unsatisfied Process Requirements +################################ -The following table lists tool requirements from our process which are not satisfied. +The following table lists tool requirements from our process +which are not (yet) satisfied, +i.e. covered by tool requirements. .. needtable:: :types: gd_req - :columns: id;title;satisfied by - :colwidths: 2;4;1 + :columns: id;title;tags + :colwidths: 2;4;2 :style: table - :filter_warning: No unsatisfied requirements, no table. ☺️ results = [] + prio = "prio_1" for need in needs.filter_types(["gd_req"]): - if not need["id"].startswith("gd_req__tool_"): + if not any(prio in tag for tag in need["tags"]): continue if len(need["satisfies_back"]) >= 1: continue results.append(need) +.. needtable:: + :types: gd_req + :columns: id;title;tags + :colwidths: 2;4;2 + :style: table + + results = [] + prio = "prio_2" + for need in needs.filter_types(["gd_req"]): + if not any(prio in tag for tag in need["tags"]): + continue + if len(need["satisfies_back"]) >= 1: + continue + results.append(need) + +.. TODO: add prio_3 once prio_1 is done + +Requirements not fully implemented +################################## + +Just because a process requirement is covered by tool requirements +does not mean it is implemented. + +.. needtable:: + :types: gd_req + :columns: id as "Process Requirement";implemented;satisfies + :colwidths: 1;1;2 + :style: table + + results = [] + for need in needs.filter_types(["tool_req"]): + if need["implemented"] == "YES": + continue + results.append(need) + All our Tool Requirements ######################### diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index 1ded7531..410fbba3 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -832,7 +832,7 @@ Testing .. tool_req:: Safety Analysis Need Types :id: tool_req__docs_saf_types - :implemented: NO + :implemented: YES :tags: Safety Analysis :satisfies: gd_req__saf_structure, @@ -841,10 +841,29 @@ Testing Docs-As-Code shall support the following need types: - * Feature FMEA (Failure Modes and Effect Analysis) -> feat_saf_fmea - * Component FMEA (Failure Modes and Effect Analysis) -> comp_saf_fmea - * Feature DFA (Dependend Failure Analysis) -> feat_saf_dfa - * Component DFA (Dependent Failure Analysis) -> comp_saf_dfa + * Feature FMEA (Failure Modes and Effect Analysis) -> ``feat_saf_fmea`` + * Component FMEA (Failure Modes and Effect Analysis) -> ``comp_saf_fmea`` + * Feature DFA (Dependend Failure Analysis) -> ``feat_saf_dfa`` + * Component DFA (Dependent Failure Analysis) -> ``comp_saf_dfa`` + +.. tool_req:: Safety Analysis Mandatory Attributes + :id: tool_req__docs_saf_attrs_mandatory + :implemented: YES + :tags: Safety Analysis + :satisfies: + gd_req__saf_attr_mandatory, + :parent_covered: YES + + All safety analysis elements in :need:`tool_req__docs_saf_types` + shall have the following mandatory attributes: + + * DFA-only attribute: ``failure_id`` + * FMEA-only attribute: ``fault_id`` + * attribute: ``failure_effect`` + * attribute: ``status`` + * attribute: ``sufficient`` + * attribute: ``title`` (all Needs elements have a title) + * attribute: ``id`` (all Needs elements have an id) .. tool_req:: Safety Analysis Mitigation Attribute @@ -855,12 +874,19 @@ Testing gd_req__saf_attr_mitigated_by, gd_req__saf_attr_requirements, gd_req__saf_attr_requirements_check, + gd_req__saf_attr_aou, + gd_req__saf_linkage_safety, :parent_covered: YES Docs-As-Code shall enforce valid needs (`status` == `valid`) of type :need:`tool_req__docs_saf_types` to have at least one `mitigated_by` link to a requirement on the corresponding level. + At least one of the linked requirements must have + the same ASIL level or a higher one. + + It can be ``comp_req`` or ``aou_req``. + .. tool_req:: Safety Analysis Mitigation Issue Attribute :id: tool_req__docs_saf_attrs_mitigation_issue From d77b6622820ba885f0bb3fdd4b42b912d7aa154f Mon Sep 17 00:00:00 2001 From: Oliver Pajonk Date: Thu, 27 Nov 2025 11:56:29 +0100 Subject: [PATCH 161/231] Use devcontainer 1.0.0 release (#309) Signed-off-by: Oliver Pajonk --- .devcontainer/devcontainer.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index bf388945..e6bd08c5 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,5 @@ { "name": "eclipse-s-core", - "image": "ghcr.io/eclipse-score/devcontainer:latest", - "initializeCommand": "mkdir -p ${localEnv:HOME}/.cache/bazel", + "image": "ghcr.io/eclipse-score/devcontainer:1.0.0", "updateContentCommand": "bazel run //:ide_support" } From 7a8022768da41107fc7f6adeb5ff2e9b57821696 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Thu, 27 Nov 2025 12:09:08 +0100 Subject: [PATCH 162/231] bump version (#311) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 24273cae..ab7673de 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "2.0.2", + version = "2.0.3", compatibility_level = 2, ) From accad51c54be42a11091df3945d9e826e0b2cfad Mon Sep 17 00:00:00 2001 From: Andrey Babanin Date: Mon, 1 Dec 2025 10:08:23 +0100 Subject: [PATCH 163/231] fix: draw_component with external elements (#316) --- src/extensions/score_draw_uml_funcs/helpers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index 11a2e5fe..37b1eddf 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -111,7 +111,10 @@ def get_alias(need: dict[str, str]) -> str: def get_need_link(need: dict[str, str]) -> str: """Generate the link to the need element from the PlantUML Diagram""" - link = ".." + "/" + need["docname"] + ".html#" + need["id_parent"] + if need["is_external"]: + link = need["external_url"] + else: + link = f"../{need['docname']}.html#{need['id_parent']}" # Reminder: Link is displayed via triple braces inside a interface if "int_op" in need["type"]: From 3ca45ea39bb7530bab829160d28a6d5fa81c8b25 Mon Sep 17 00:00:00 2001 From: Jan Gueth Date: Tue, 2 Dec 2025 11:42:09 +0100 Subject: [PATCH 164/231] Update how-to setup.md (#317) Update version to latest 2.0.3 version of docs-as-code. Fix missing ":" in load code snippet example. --- docs/how-to/setup.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/how-to/setup.md b/docs/how-to/setup.md index 7fe573a2..e632ba91 100644 --- a/docs/how-to/setup.md +++ b/docs/how-to/setup.md @@ -23,7 +23,7 @@ designed to enhance documentation capabilities in S-CORE. Add the module to your `MODULE.bazel` file: ```starlark -bazel_dep(name = "score_docs_as_code", version = "0.2.7") +bazel_dep(name = "score_docs_as_code", version = "2.0.3") ``` And make sure to also add the S-core Bazel registry to your `.bazelrc` file @@ -50,7 +50,7 @@ build --tool_java_runtime_version=remotejdk_17 ```starlark -load("@score_docs_as_code//docs.bzl", "docs") +load("@score_docs_as_code//:docs.bzl", "docs") docs( source_dir = "", From 3d4dadd4e299190b42612c75ed9fe3108b87c4f5 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:14:52 +0100 Subject: [PATCH 165/231] Document requirements coverage (#308) --- MODULE.bazel | 4 +- .../requirements/process_overview.rst | 22 ++++++ docs/internals/requirements/requirements.rst | 74 +++++++++++++++---- src/extensions/score_metamodel/metamodel.yaml | 10 ++- 4 files changed, 93 insertions(+), 17 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index ab7673de..df019fe9 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "2.0.3", + version = "2.1.0", compatibility_level = 2, ) @@ -99,7 +99,7 @@ http_file( # docs dependency # Note: requirements were last aligned with 1.2.0, # the switch to 1.3.1 is purely to drop the dependency on docs-as-code 1.x. -bazel_dep(name = "score_process", version = "1.3.1") +bazel_dep(name = "score_process", version = "1.3.2") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") diff --git a/docs/internals/requirements/process_overview.rst b/docs/internals/requirements/process_overview.rst index 3225131c..2177fe84 100644 --- a/docs/internals/requirements/process_overview.rst +++ b/docs/internals/requirements/process_overview.rst @@ -18,8 +18,30 @@ i.e. covered by tool requirements. :style: table results = [] + ignored_ids = [ + # Impact Analysis is free form text, thus not in scope of docs-as-code + "gd_req__change_attr_impact_description", + # Problem Reports are Github issues not docs-as-code + "gd_req__problem_attr_anaylsis_results", + "gd_req__problem_attr_classification", + "gd_req__problem_attr_impact_description", + "gd_req__problem_attr_milestone", + "gd_req__problem_attr_safety_affected", + "gd_req__problem_attr_security_affected", + "gd_req__problem_attr_stakeholder", + "gd_req__problem_attr_status", + "gd_req__problem_attr_title", + "gd_req__problem_check_closing", + # Requirements for test frameworks are not in scope of docs-as-code + "gd_req__verification_link_tests", + "gd_req__verification_link_tests_cpp", + "gd_req__verification_link_tests_python", + "gd_req__verification_link_tests_rust", + ] prio = "prio_1" for need in needs.filter_types(["gd_req"]): + if need["id"] in ignored_ids: + continue if not any(prio in tag for tag in need["tags"]): continue if len(need["satisfies_back"]) >= 1: diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index 410fbba3..6679ee2f 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -77,6 +77,7 @@ This section provides an overview of current process requirements and their clar gd_req__req_attr_uid, gd_req__arch_attribute_uid, gd_req__saf_attr_uid, + gd_req__req_check_mandatory, :parent_covered: NO: cannot check non-existent "doc__naming_conventions" in gd_req__req_attr_uid Docs-as-Code shall enforce that Need IDs follow the following naming scheme: @@ -118,7 +119,7 @@ This section provides an overview of current process requirements and their clar :tags: Common Attributes :parent_covered: NO: Can not cover 'ISO/IEC/IEEE/29148' :implemented: YES - :satisfies: gd_req__req_attr_description + :satisfies: gd_req__req_attr_description, gd_req__req_check_mandatory Docs-as-Code shall enforce that each need of type :need:`tool_req__docs_req_types` has a description (content) @@ -152,6 +153,7 @@ This section provides an overview of current process requirements and their clar :satisfies: gd_req__req_attr_security, gd_req__arch_attr_security, + gd_req__req_check_mandatory, Docs-as-Code shall enforce that the ``security`` attribute has one of the following values: @@ -176,6 +178,7 @@ This section provides an overview of current process requirements and their clar :implemented: YES :parent_covered: YES :satisfies: + gd_req__req_check_mandatory, gd_req__req_attr_safety, gd_req__arch_attr_safety @@ -205,6 +208,7 @@ This section provides an overview of current process requirements and their clar gd_req__req_attr_status, gd_req__arch_attr_status, gd_req__saf_attr_status, + gd_req__req_check_mandatory, Docs-as-Code shall enforce that the ``status`` attribute has one of the following values: @@ -265,7 +269,23 @@ Versioning * Generic Document (document) * Tool Verification Report (doc_tool) + * Change Request is also a generic document +.. tool_req:: Mandatory attributes of Generic Documents + :id: tool_req__docs_doc_generic_mandatory + :tags: Documents + :implemented: PARTIAL + :satisfies: + gd_req__doc_attributes_manual, + gd_req__change_attr_impact_safety + :parent_covered: YES + + Docs-as-Code shall enforce that each Generic Document ``doc__*`` has the following attributes: + + * status + * security + * safety + * realizes .. tool_req:: Mandatory Document attributes :id: tool_req__docs_doc_attr @@ -275,12 +295,14 @@ Versioning gd_req__doc_author, gd_req__doc_approver, gd_req__doc_reviewer, + gd_req__change_attr_title, :parent_covered: NO, process requirement has changed and we do not understand the new wording. :status: invalid Docs-as-Code shall enforce that each :need:`tool_req__docs_doc_types` has the following attributes: + * title (implicitly enforced by sphinx-needs) * author * approver * reviewer @@ -375,7 +397,7 @@ Mapping :tags: Requirements :implemented: YES :parent_covered: NO: Can not ensure correct reasoning - :satisfies: gd_req__req_attr_rationale + :satisfies: gd_req__req_attr_rationale, gd_req__req_check_mandatory Docs-as-Code shall enforce that each stakeholder requirement (stkh_req) contains a ``rationale`` attribute. @@ -686,21 +708,23 @@ Architecture Attributes e.g. gd_req__req_linkage_architecture -.. tool_req:: Enable Creation of Dependency Graphs - :id: tool_req__docs_dd_dependency_graph +.. tool_req:: Static Diagram for Unit Interactions + :id: tool_req__docs_dd_sta :tags: Detailed Design & Code - :implemented: NO + :implemented: YES :parent_covered: YES - :satisfies: gd_req__impl_dependency_analysis - :status: invalid + :satisfies: gd_req__impl_static_diagram - Docs-As-Code shall support generation and rendering of dependency graphs for - components. It shall show all dependencies of a component incl transitive - dependencies. + Provide needs type ``dd_sta`` for static diagrams showing unit interactions as UML. - .. note:: - Components are defined in `comp_arc_sta`. - A component is also a bazel target. We can use bazel dependency graphs. +.. tool_req:: Dynamic Diagram for Unit Interactions + :id: tool_req__docs_dd_dyn + :tags: Detailed Design & Code + :implemented: YES + :parent_covered: YES + :satisfies: gd_req__impl_dynamic_diagram + + Provide needs type ``dd_dyn`` for dynamic diagrams showing unit interactions as UML. Testing @@ -804,6 +828,29 @@ Testing * released * rejected +.. tool_req:: Enforce version attribute + :id: tool_req__docs_tvr_version + :tags: Tool Verification Reports + :implemented: YES + :satisfies: gd_req__tool_attr_version + :parent_covered: YES + + Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a + `version` attribute. + +.. tool_req:: Enforce confidence level classification + :id: tool_req__docs_tvr_confidence_level + :tags: Tool Verification Reports + :implemented: YES + :satisfies: gd_req__tool_attr_tcl + :parent_covered: YES + + Docs-as-Code shall enforce that every Tool Verification Report (`doc_tool`) includes a + `tcl` attribute with one of the following values: + + * LOW + * HIGH + ⚙️ Process / Other ################### @@ -811,6 +858,7 @@ Testing :id: tool_req__docs_wf_types :tags: Process / Other :implemented: YES + :satisfies: gd_req__process_management_build_blocks_attr, gd_req__process_management_build_blocks_link Docs-as-Code shall support the following workflow types: diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index bb6294b8..1f94eeec 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -202,16 +202,18 @@ needs_types: document: title: Generic Document prefix: doc__ + # req-Id: tool_req__docs_doc_generic_mandatory mandatory_options: status: ^(valid|draft|invalid)$ - optional_options: safety: "^(QM|ASIL_B)$" security: "^(YES|NO)$" + optional_options: # req-Id: tool_req__docs_doc_attr author: ^.*$ approver: ^.*$ reviewer: ^.*$ - optional_links: + # req-Id: tool_req__docs_doc_generic_mandatory + mandatory_links: realizes: workproduct parts: 2 @@ -221,11 +223,13 @@ needs_types: mandatory_options: # req-Id: tool_req__docs_tvr_status status: ^(draft|evaluated|qualified|released|rejected)$ + # req-Id: tool_req__docs_tvr_version version: ^.*$ # req-Id: tool_req__docs_tvr_safety safety_affected: "^(YES|NO)$" # req-Id: tool_req__docs_tvr_security security_affected: "^(YES|NO)$" + # req-Id: tool_req__docs_tvr_confidence_level tcl: "^(LOW|HIGH)$" optional_options: # req-Id: tool_req__docs_doc_attr @@ -590,6 +594,7 @@ needs_types: parts: 3 # Implementation + # req-Id: tool_req__docs_dd_sta dd_sta: title: Static detailed design color: #FEDCD2 @@ -605,6 +610,7 @@ needs_types: includes: sw_unit parts: 3 + # req-Id: tool_req__docs_dd_dyn dd_dyn: title: Dynamic detailed design color: #FEDCD2 From b7b082a711ca4a4b779813c93e4fed2c892bf736 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Tue, 2 Dec 2025 17:44:53 +0100 Subject: [PATCH 166/231] Adapt requirements tables in docs (#319) * Mark tool_req__docs_arch_link_security implemented * Add source link * Mark implemented stuff as implemented * Split off page tool_req_overview --- docs/internals/requirements/index.rst | 1 + docs/internals/requirements/process_overview.rst | 11 +++-------- docs/internals/requirements/requirements.rst | 6 +++--- docs/internals/requirements/tool_req_overview.rst | 12 ++++++++++++ src/extensions/score_metamodel/metamodel.yaml | 2 ++ 5 files changed, 21 insertions(+), 11 deletions(-) create mode 100644 docs/internals/requirements/tool_req_overview.rst diff --git a/docs/internals/requirements/index.rst b/docs/internals/requirements/index.rst index ea58f858..0fecb8a2 100644 --- a/docs/internals/requirements/index.rst +++ b/docs/internals/requirements/index.rst @@ -6,5 +6,6 @@ Requirements capabilities process_overview + tool_req_overview requirements test_overview diff --git a/docs/internals/requirements/process_overview.rst b/docs/internals/requirements/process_overview.rst index 2177fe84..73cb4f6f 100644 --- a/docs/internals/requirements/process_overview.rst +++ b/docs/internals/requirements/process_overview.rst @@ -4,6 +4,9 @@ Process Requirements Overview =============================== +This page shall provide an overview +how well this tool implements process requirements. + Unsatisfied Process Requirements ################################ @@ -82,11 +85,3 @@ does not mean it is implemented. if need["implemented"] == "YES": continue results.append(need) - -All our Tool Requirements -######################### - -.. needtable:: - :types: tool_req - :columns: satisfies as "Process Requirement" ;id as "Tool Requirement";implemented;source_code_link - :style: table diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index 6679ee2f..a476ab15 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -639,7 +639,7 @@ Architecture Attributes .. tool_req:: Security: Restrict linkage :id: tool_req__docs_arch_link_security :tags: Architecture - :implemented: NO + :implemented: YES :parent_covered: YES :satisfies: gd_req__arch_linkage_security_trace @@ -938,7 +938,7 @@ Testing .. tool_req:: Safety Analysis Mitigation Issue Attribute :id: tool_req__docs_saf_attrs_mitigation_issue - :implemented: NO + :implemented: YES :tags: Safety Analysis :satisfies: gd_req__saf_attr_mitigation_issue :parent_covered: NO @@ -949,7 +949,7 @@ Testing .. tool_req:: Safety Analysis Sufficient Attribute :id: tool_req__docs_saf_attrs_sufficient - :implemented: NO + :implemented: YES :tags: Safety Analysis :satisfies: gd_req__saf_attr_sufficient :parent_covered: YES diff --git a/docs/internals/requirements/tool_req_overview.rst b/docs/internals/requirements/tool_req_overview.rst new file mode 100644 index 00000000..198d9b5a --- /dev/null +++ b/docs/internals/requirements/tool_req_overview.rst @@ -0,0 +1,12 @@ +=============================== +Tool Requirements Overview +=============================== + +Here are all our tool requirements +tersely packed in a table +with some hopefully useful meta information. + +.. needtable:: + :types: tool_req + :columns: satisfies as "Process Requirement" ;id as "Tool Requirement";implemented;source_code_link + :style: table diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 1f94eeec..b67adfeb 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -937,6 +937,7 @@ graph_checks: - status == valid explanation: An safety architecture element can only link other safety architecture elements. + # req-Id: tool_req__docs_arch_link_security tool_req__docs_arch_link_security: needs: include: feat_arc_sta, logic_arc_int, logic_arc_int_op, comp_arc_sta, real_arc_int, real_arc_int_op @@ -950,6 +951,7 @@ graph_checks: # - Requirements with the same ASIL or # - Requirements with a higher ASIL # as the corresponding ASIL of the Feature or Component that is analyzed. + # req-Id: tool_req__docs_saf_attrs_mitigated_by saf_linkage_safety: needs: include: feat_saf_fmea, comp_saf_fmea, plat_saf_dfa, feat_saf_dfa, comp_saf_dfa From 1931ac256ee9da4658e5fbd62d8f4fb09f120251 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 3 Dec 2025 15:03:09 +0100 Subject: [PATCH 167/231] Upgrade python dependencies (#310) --- src/requirements.in | 3 +- src/requirements.txt | 2136 ++++++++++++++++++++++-------------------- 2 files changed, 1120 insertions(+), 1019 deletions(-) diff --git a/src/requirements.in b/src/requirements.in index 789af6b1..1610e6a4 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -2,7 +2,8 @@ Sphinx # At least 4.2.0, as it fixes a bug in combination with esbonio live preview: # https://github.com/useblocks/sphinx-needs/issues/1350 -sphinx-needs>=4.2.0 +# 6 needs some work, as it's a breaking change. +sphinx-needs>=4.2.0,<6 # Due to needed bugfix in 0.3.1 sphinx-collections>=0.3.1 diff --git a/src/requirements.txt b/src/requirements.txt index a405c168..632beacd 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -12,15 +12,15 @@ alabaster==1.0.0 \ --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b # via sphinx -anyio==4.10.0 \ - --hash=sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6 \ - --hash=sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1 +anyio==4.11.0 \ + --hash=sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc \ + --hash=sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4 # via # starlette # watchfiles -attrs==25.3.0 \ - --hash=sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3 \ - --hash=sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b +attrs==25.4.0 \ + --hash=sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11 \ + --hash=sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373 # via # cattrs # jsonschema @@ -35,176 +35,227 @@ babel==2.17.0 \ basedpyright==1.29.2 \ --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d - # via -r external/score_tooling+/python_basics/requirements.txt -beautifulsoup4==4.13.4 \ - --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ - --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 + # via -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt +beautifulsoup4==4.14.2 \ + --hash=sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e \ + --hash=sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515 # via pydata-sphinx-theme -cattrs==25.1.1 \ - --hash=sha256:1b40b2d3402af7be79a7e7e097a9b4cd16d4c06e6d526644b0b26a063a1cc064 \ - --hash=sha256:c914b734e0f2d59e5b720d145ee010f1fd9a13ee93900922a2f3f9d593b8382c +cattrs==25.3.0 \ + --hash=sha256:1ac88d9e5eda10436c4517e390a4142d88638fe682c436c93db7ce4a277b884a \ + --hash=sha256:9896e84e0a5bf723bc7b4b68f4481785367ce07a8a02e7e9ee6eb2819bc306ff # via # lsprotocol # pygls -certifi==2025.8.3 \ - --hash=sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407 \ - --hash=sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5 +certifi==2025.11.12 \ + --hash=sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b \ + --hash=sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316 # via requests -cffi==1.17.1 \ - --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ - --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ - --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ - --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ - --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ - --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ - --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ - --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ - --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ - --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ - --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ - --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ - --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ - --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ - --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ - --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ - --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ - --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ - --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ - --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ - --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ - --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ - --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ - --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ - --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ - --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ - --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ - --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ - --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ - --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ - --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ - --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ - --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ - --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ - --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ - --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ - --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ - --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ - --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ - --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ - --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ - --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ - --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ - --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ - --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ - --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ - --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ - --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ - --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ - --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ - --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ - --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ - --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ - --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ - --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ - --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ - --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ - --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ - --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ - --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ - --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ - --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ - --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ - --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ - --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ - --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ - --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b +cffi==2.0.0 \ + --hash=sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb \ + --hash=sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b \ + --hash=sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f \ + --hash=sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9 \ + --hash=sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44 \ + --hash=sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2 \ + --hash=sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c \ + --hash=sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75 \ + --hash=sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65 \ + --hash=sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e \ + --hash=sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a \ + --hash=sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e \ + --hash=sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25 \ + --hash=sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a \ + --hash=sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe \ + --hash=sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b \ + --hash=sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91 \ + --hash=sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592 \ + --hash=sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187 \ + --hash=sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c \ + --hash=sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1 \ + --hash=sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94 \ + --hash=sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba \ + --hash=sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb \ + --hash=sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165 \ + --hash=sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529 \ + --hash=sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca \ + --hash=sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c \ + --hash=sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6 \ + --hash=sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c \ + --hash=sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0 \ + --hash=sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743 \ + --hash=sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63 \ + --hash=sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5 \ + --hash=sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5 \ + --hash=sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4 \ + --hash=sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d \ + --hash=sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b \ + --hash=sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93 \ + --hash=sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205 \ + --hash=sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27 \ + --hash=sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512 \ + --hash=sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d \ + --hash=sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c \ + --hash=sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037 \ + --hash=sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26 \ + --hash=sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322 \ + --hash=sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb \ + --hash=sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c \ + --hash=sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8 \ + --hash=sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4 \ + --hash=sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414 \ + --hash=sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9 \ + --hash=sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664 \ + --hash=sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9 \ + --hash=sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775 \ + --hash=sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739 \ + --hash=sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc \ + --hash=sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062 \ + --hash=sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe \ + --hash=sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9 \ + --hash=sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92 \ + --hash=sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5 \ + --hash=sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13 \ + --hash=sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d \ + --hash=sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26 \ + --hash=sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f \ + --hash=sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495 \ + --hash=sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b \ + --hash=sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6 \ + --hash=sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c \ + --hash=sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef \ + --hash=sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5 \ + --hash=sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18 \ + --hash=sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad \ + --hash=sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3 \ + --hash=sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7 \ + --hash=sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5 \ + --hash=sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534 \ + --hash=sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49 \ + --hash=sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2 \ + --hash=sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5 \ + --hash=sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453 \ + --hash=sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf # via # cryptography # pynacl -charset-normalizer==3.4.3 \ - --hash=sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91 \ - --hash=sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0 \ - --hash=sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154 \ - --hash=sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601 \ - --hash=sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884 \ - --hash=sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07 \ - --hash=sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c \ - --hash=sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64 \ - --hash=sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe \ - --hash=sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f \ - --hash=sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432 \ - --hash=sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc \ - --hash=sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa \ - --hash=sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9 \ - --hash=sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae \ - --hash=sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19 \ - --hash=sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d \ - --hash=sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e \ - --hash=sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4 \ - --hash=sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7 \ - --hash=sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312 \ - --hash=sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92 \ - --hash=sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31 \ - --hash=sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c \ - --hash=sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f \ - --hash=sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99 \ - --hash=sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b \ - --hash=sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15 \ - --hash=sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392 \ - --hash=sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f \ - --hash=sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8 \ - --hash=sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491 \ - --hash=sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0 \ - --hash=sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc \ - --hash=sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0 \ - --hash=sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f \ - --hash=sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a \ - --hash=sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40 \ - --hash=sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927 \ - --hash=sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849 \ - --hash=sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce \ - --hash=sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14 \ - --hash=sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05 \ - --hash=sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c \ - --hash=sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c \ - --hash=sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a \ - --hash=sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc \ - --hash=sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34 \ - --hash=sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9 \ - --hash=sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096 \ - --hash=sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14 \ - --hash=sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30 \ - --hash=sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b \ - --hash=sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b \ - --hash=sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942 \ - --hash=sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db \ - --hash=sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5 \ - --hash=sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b \ - --hash=sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce \ - --hash=sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669 \ - --hash=sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0 \ - --hash=sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018 \ - --hash=sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93 \ - --hash=sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe \ - --hash=sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049 \ - --hash=sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a \ - --hash=sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef \ - --hash=sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2 \ - --hash=sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca \ - --hash=sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16 \ - --hash=sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f \ - --hash=sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb \ - --hash=sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1 \ - --hash=sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557 \ - --hash=sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37 \ - --hash=sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7 \ - --hash=sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72 \ - --hash=sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c \ - --hash=sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9 +charset-normalizer==3.4.4 \ + --hash=sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad \ + --hash=sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93 \ + --hash=sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394 \ + --hash=sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89 \ + --hash=sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc \ + --hash=sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86 \ + --hash=sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63 \ + --hash=sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d \ + --hash=sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f \ + --hash=sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8 \ + --hash=sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0 \ + --hash=sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505 \ + --hash=sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161 \ + --hash=sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af \ + --hash=sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152 \ + --hash=sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318 \ + --hash=sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72 \ + --hash=sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4 \ + --hash=sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e \ + --hash=sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3 \ + --hash=sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576 \ + --hash=sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c \ + --hash=sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1 \ + --hash=sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8 \ + --hash=sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1 \ + --hash=sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2 \ + --hash=sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44 \ + --hash=sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26 \ + --hash=sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88 \ + --hash=sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016 \ + --hash=sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede \ + --hash=sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf \ + --hash=sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a \ + --hash=sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc \ + --hash=sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0 \ + --hash=sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84 \ + --hash=sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db \ + --hash=sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1 \ + --hash=sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7 \ + --hash=sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed \ + --hash=sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8 \ + --hash=sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133 \ + --hash=sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e \ + --hash=sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef \ + --hash=sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14 \ + --hash=sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2 \ + --hash=sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0 \ + --hash=sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d \ + --hash=sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828 \ + --hash=sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f \ + --hash=sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf \ + --hash=sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6 \ + --hash=sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328 \ + --hash=sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090 \ + --hash=sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa \ + --hash=sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381 \ + --hash=sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c \ + --hash=sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb \ + --hash=sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc \ + --hash=sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a \ + --hash=sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec \ + --hash=sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc \ + --hash=sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac \ + --hash=sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e \ + --hash=sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313 \ + --hash=sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569 \ + --hash=sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3 \ + --hash=sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d \ + --hash=sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525 \ + --hash=sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894 \ + --hash=sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3 \ + --hash=sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9 \ + --hash=sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a \ + --hash=sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9 \ + --hash=sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14 \ + --hash=sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25 \ + --hash=sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50 \ + --hash=sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf \ + --hash=sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1 \ + --hash=sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3 \ + --hash=sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac \ + --hash=sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e \ + --hash=sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815 \ + --hash=sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c \ + --hash=sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6 \ + --hash=sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6 \ + --hash=sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e \ + --hash=sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4 \ + --hash=sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84 \ + --hash=sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69 \ + --hash=sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15 \ + --hash=sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191 \ + --hash=sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0 \ + --hash=sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897 \ + --hash=sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd \ + --hash=sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2 \ + --hash=sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794 \ + --hash=sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d \ + --hash=sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074 \ + --hash=sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3 \ + --hash=sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224 \ + --hash=sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838 \ + --hash=sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a \ + --hash=sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d \ + --hash=sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d \ + --hash=sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f \ + --hash=sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8 \ + --hash=sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490 \ + --hash=sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966 \ + --hash=sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9 \ + --hash=sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3 \ + --hash=sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e \ + --hash=sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608 # via requests -click==8.2.1 \ - --hash=sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202 \ - --hash=sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b +click==8.3.1 \ + --hash=sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a \ + --hash=sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6 # via uvicorn colorama==0.4.6 \ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ @@ -284,76 +335,97 @@ contourpy==1.3.3 \ --hash=sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9 \ --hash=sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a # via matplotlib -cryptography==45.0.6 \ - --hash=sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5 \ - --hash=sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74 \ - --hash=sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394 \ - --hash=sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301 \ - --hash=sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08 \ - --hash=sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3 \ - --hash=sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b \ - --hash=sha256:2384f2ab18d9be88a6e4f8972923405e2dbb8d3e16c6b43f15ca491d7831bd18 \ - --hash=sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402 \ - --hash=sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3 \ - --hash=sha256:31a2b9a10530a1cb04ffd6aa1cd4d3be9ed49f7d77a4dafe198f3b382f41545c \ - --hash=sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0 \ - --hash=sha256:3b5bf5267e98661b9b888a9250d05b063220dfa917a8203744454573c7eb79db \ - --hash=sha256:3de77e4df42ac8d4e4d6cdb342d989803ad37707cf8f3fbf7b088c9cbdd46427 \ - --hash=sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f \ - --hash=sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3 \ - --hash=sha256:599c8d7df950aa68baa7e98f7b73f4f414c9f02d0e8104a30c0182a07732638b \ - --hash=sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9 \ - --hash=sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5 \ - --hash=sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719 \ - --hash=sha256:629127cfdcdc6806dfe234734d7cb8ac54edaf572148274fa377a7d3405b0043 \ - --hash=sha256:705bb7c7ecc3d79a50f236adda12ca331c8e7ecfbea51edd931ce5a7a7c4f012 \ - --hash=sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02 \ - --hash=sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2 \ - --hash=sha256:826b46dae41a1155a0c0e66fafba43d0ede1dc16570b95e40c4d83bfcf0a451d \ - --hash=sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec \ - --hash=sha256:cc4d66f5dc4dc37b89cfef1bd5044387f7a1f6f0abb490815628501909332d5d \ - --hash=sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159 \ - --hash=sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453 \ - --hash=sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf \ - --hash=sha256:e5b3dda1b00fb41da3af4c5ef3f922a200e33ee5ba0f0bc9ecf0b0c173958385 \ - --hash=sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9 \ - --hash=sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016 \ - --hash=sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05 \ - --hash=sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42 \ - --hash=sha256:f68f833a9d445cc49f01097d95c83a850795921b3f7cc6488731e69bde3288da \ - --hash=sha256:fc022c1fa5acff6def2fc6d7819bbbd31ccddfe67d075331a65d9cfb28a20983 +cryptography==46.0.3 \ + --hash=sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217 \ + --hash=sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d \ + --hash=sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc \ + --hash=sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71 \ + --hash=sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971 \ + --hash=sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a \ + --hash=sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926 \ + --hash=sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc \ + --hash=sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d \ + --hash=sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b \ + --hash=sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20 \ + --hash=sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044 \ + --hash=sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3 \ + --hash=sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715 \ + --hash=sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4 \ + --hash=sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506 \ + --hash=sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f \ + --hash=sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0 \ + --hash=sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683 \ + --hash=sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3 \ + --hash=sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21 \ + --hash=sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91 \ + --hash=sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c \ + --hash=sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8 \ + --hash=sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df \ + --hash=sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c \ + --hash=sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb \ + --hash=sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7 \ + --hash=sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04 \ + --hash=sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db \ + --hash=sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459 \ + --hash=sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea \ + --hash=sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914 \ + --hash=sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717 \ + --hash=sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9 \ + --hash=sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac \ + --hash=sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32 \ + --hash=sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec \ + --hash=sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1 \ + --hash=sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb \ + --hash=sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac \ + --hash=sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665 \ + --hash=sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e \ + --hash=sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb \ + --hash=sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5 \ + --hash=sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936 \ + --hash=sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de \ + --hash=sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372 \ + --hash=sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54 \ + --hash=sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422 \ + --hash=sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849 \ + --hash=sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c \ + --hash=sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963 \ + --hash=sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018 # via pyjwt cycler==0.12.1 \ --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c # via matplotlib -debugpy==1.8.16 \ - --hash=sha256:135ccd2b1161bade72a7a099c9208811c137a150839e970aeaf121c2467debe8 \ - --hash=sha256:19c9521962475b87da6f673514f7fd610328757ec993bf7ec0d8c96f9a325f9e \ - --hash=sha256:211238306331a9089e253fd997213bc4a4c65f949271057d6695953254095376 \ - --hash=sha256:2801329c38f77c47976d341d18040a9ac09d0c71bf2c8b484ad27c74f83dc36f \ - --hash=sha256:2a3958fb9c2f40ed8ea48a0d34895b461de57a1f9862e7478716c35d76f56c65 \ - --hash=sha256:31e69a1feb1cf6b51efbed3f6c9b0ef03bc46ff050679c4be7ea6d2e23540870 \ - --hash=sha256:64473c4a306ba11a99fe0bb14622ba4fbd943eb004847d9b69b107bde45aa9ea \ - --hash=sha256:67371b28b79a6a12bcc027d94a06158f2fde223e35b5c4e0783b6f9d3b39274a \ - --hash=sha256:687c7ab47948697c03b8f81424aa6dc3f923e6ebab1294732df1ca9773cc67bc \ - --hash=sha256:70f5fcd6d4d0c150a878d2aa37391c52de788c3dc680b97bdb5e529cb80df87a \ - --hash=sha256:75f204684581e9ef3dc2f67687c3c8c183fde2d6675ab131d94084baf8084121 \ - --hash=sha256:833a61ed446426e38b0dd8be3e9d45ae285d424f5bf6cd5b2b559c8f12305508 \ - --hash=sha256:85df3adb1de5258dca910ae0bb185e48c98801ec15018a263a92bb06be1c8787 \ - --hash=sha256:8624a6111dc312ed8c363347a0b59c5acc6210d897e41a7c069de3c53235c9a6 \ - --hash=sha256:88eb9ffdfb59bf63835d146c183d6dba1f722b3ae2a5f4b9fc03e925b3358922 \ - --hash=sha256:a2ba6fc5d7c4bc84bcae6c5f8edf5988146e55ae654b1bb36fecee9e5e77e9e2 \ - --hash=sha256:b202e2843e32e80b3b584bcebfe0e65e0392920dc70df11b2bfe1afcb7a085e4 \ - --hash=sha256:b2abae6dd02523bec2dee16bd6b0781cccb53fd4995e5c71cc659b5f45581898 \ - --hash=sha256:b5aea1083f6f50023e8509399d7dc6535a351cc9f2e8827d1e093175e4d9fa4c \ - --hash=sha256:bee89e948bc236a5c43c4214ac62d28b29388453f5fd328d739035e205365f0b \ - --hash=sha256:c2c47c2e52b40449552843b913786499efcc3dbc21d6c49287d939cd0dbc49fd \ - --hash=sha256:cf358066650439847ec5ff3dae1da98b5461ea5da0173d93d5e10f477c94609a \ - --hash=sha256:d58c48d8dbbbf48a3a3a638714a2d16de537b0dace1e3432b8e92c57d43707f8 \ - --hash=sha256:e5ca7314042e8a614cc2574cd71f6ccd7e13a9708ce3c6d8436959eae56f2378 \ - --hash=sha256:f8340a3ac2ed4f5da59e064aa92e39edd52729a88fbde7bbaa54e08249a04493 \ - --hash=sha256:fee6db83ea5c978baf042440cfe29695e1a5d48a30147abf4c3be87513609817 +debugpy==1.8.17 \ + --hash=sha256:045290c010bcd2d82bc97aa2daf6837443cd52f6328592698809b4549babcee1 \ + --hash=sha256:1440fd514e1b815edd5861ca394786f90eb24960eb26d6f7200994333b1d79e3 \ + --hash=sha256:17e456da14848d618662354e1dccfd5e5fb75deec3d1d48dc0aa0baacda55860 \ + --hash=sha256:24693179ef9dfa20dca8605905a42b392be56d410c333af82f1c5dff807a64cc \ + --hash=sha256:3a32c0af575749083d7492dc79f6ab69f21b2d2ad4cd977a958a07d5865316e4 \ + --hash=sha256:3bea3b0b12f3946e098cce9b43c3c46e317b567f79570c3f43f0b96d00788088 \ + --hash=sha256:5c59b74aa5630f3a5194467100c3b3d1c77898f9ab27e3f7dc5d40fc2f122670 \ + --hash=sha256:60c7dca6571efe660ccb7a9508d73ca14b8796c4ed484c2002abba714226cfef \ + --hash=sha256:6a4e9dacf2cbb60d2514ff7b04b4534b0139facbf2abdffe0639ddb6088e59cf \ + --hash=sha256:6c5cd6f009ad4fca8e33e5238210dc1e5f42db07d4b6ab21ac7ffa904a196420 \ + --hash=sha256:857c1dd5d70042502aef1c6d1c2801211f3ea7e56f75e9c335f434afb403e464 \ + --hash=sha256:893cba7bb0f55161de4365584b025f7064e1f88913551bcd23be3260b231429c \ + --hash=sha256:8deb4e31cd575c9f9370042876e078ca118117c1b5e1f22c32befcfbb6955f0c \ + --hash=sha256:a3aad0537cf4d9c1996434be68c6c9a6d233ac6f76c2a482c7803295b4e4f99a \ + --hash=sha256:b13eea5587e44f27f6c48588b5ad56dcb74a4f3a5f89250443c94587f3eb2ea1 \ + --hash=sha256:b532282ad4eca958b1b2d7dbcb2b7218e02cb934165859b918e3b6ba7772d3f4 \ + --hash=sha256:b69b6bd9dba6a03632534cdf67c760625760a215ae289f7489a452af1031fe1f \ + --hash=sha256:b75868b675949a96ab51abc114c7163f40ff0d8f7d6d5fd63f8932fd38e9c6d7 \ + --hash=sha256:bb1bbf92317e1f35afcf3ef0450219efb3afe00be79d8664b250ac0933b9015f \ + --hash=sha256:c41d2ce8bbaddcc0009cc73f65318eedfa3dbc88a8298081deb05389f1ab5542 \ + --hash=sha256:c6bdf134457ae0cac6fb68205776be635d31174eeac9541e1d0c062165c6461f \ + --hash=sha256:d3fce3f0e3de262a3b67e69916d001f3e767661c6e1ee42553009d445d1cd840 \ + --hash=sha256:e34ee844c2f17b18556b5bbe59e1e2ff4e86a00282d2a46edab73fd7f18f4a83 \ + --hash=sha256:e79a195f9e059edfe5d8bf6f3749b2599452d3e9380484cd261f6b7cd2c7c4da \ + --hash=sha256:e851beb536a427b5df8aa7d0c7835b29a13812f41e46292ff80b2ef77327355a \ + --hash=sha256:e8f8f61c518952fb15f74a302e068b48d9c4691768ade433e4adeea961993464 \ + --hash=sha256:eaa85bce251feca8e4c87ce3b954aba84b8c645b90f0e6a515c00394a9f5c0e7 \ + --hash=sha256:f14467edef672195c6f6b8e27ce5005313cb5d03c9239059bc7182b60c176e2d \ + --hash=sha256:f2ac8055a0c4a09b30b931100996ba49ef334c6947e7ae365cdd870416d7513e \ + --hash=sha256:fd723b47a8c08892b1a16b2c6239a8b96637c62a59b94bb5dab4bac592a58a8e # via -r src/requirements.in docutils==0.21.2 \ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ @@ -366,65 +438,65 @@ esbonio==0.16.5 \ --hash=sha256:04ba926e3603f7b1fde1abc690b47afd60749b64b1029b6bce8e1de0bb284921 \ --hash=sha256:acab2e16c6cf8f7232fb04e0d48514ce50566516b1f6fcf669ccf2f247e8b10f # via -r src/requirements.in -fonttools==4.59.1 \ - --hash=sha256:01158376b8a418a0bae9625c476cebfcfcb5e6761e9d243b219cd58341e7afbb \ - --hash=sha256:02e4fdf27c550dded10fe038a5981c29f81cb9bc649ff2eaa48e80dab8998f97 \ - --hash=sha256:075f745d539a998cd92cb84c339a82e53e49114ec62aaea8307c80d3ad3aef3a \ - --hash=sha256:0b9e4fa7eaf046ed6ac470f6033d52c052481ff7a6e0a92373d14f556f298dc0 \ - --hash=sha256:1017413cdc8555dce7ee23720da490282ab7ec1cf022af90a241f33f9a49afc4 \ - --hash=sha256:1ab4c1fb45f2984b8b4a3face7cff0f67f9766e9414cbb6fd061e9d77819de98 \ - --hash=sha256:2a2d0d33307f6ad3a2086a95dd607c202ea8852fa9fb52af9b48811154d1428a \ - --hash=sha256:2aeb829ad9d41a2ef17cab8bb5d186049ba38a840f10352e654aa9062ec32dc1 \ - --hash=sha256:2beb5bfc4887a3130f8625349605a3a45fe345655ce6031d1bac11017454b943 \ - --hash=sha256:39dfd42cc2dc647b2c5469bc7a5b234d9a49e72565b96dd14ae6f11c2c59ef15 \ - --hash=sha256:412a5fd6345872a7c249dac5bcce380393f40c1c316ac07f447bc17d51900922 \ - --hash=sha256:419f16d750d78e6d704bfe97b48bba2f73b15c9418f817d0cb8a9ca87a5b94bf \ - --hash=sha256:42052b56d176f8b315fbc09259439c013c0cb2109df72447148aeda677599612 \ - --hash=sha256:43ab814bbba5f02a93a152ee61a04182bb5809bd2bc3609f7822e12c53ae2c91 \ - --hash=sha256:43d177cd0e847ea026fedd9f099dc917da136ed8792d142298a252836390c478 \ - --hash=sha256:4909cce2e35706f3d18c54d3dcce0414ba5e0fb436a454dffec459c61653b513 \ - --hash=sha256:4f04c3ffbfa0baafcbc550657cf83657034eb63304d27b05cff1653b448ccff6 \ - --hash=sha256:5265bc52ed447187d39891b5f21d7217722735d0de9fe81326566570d12851a9 \ - --hash=sha256:57a3708ca6bfccb790f585fa6d8f29432ec329618a09ff94c16bcb3c55994643 \ - --hash=sha256:58a8844f96cff35860647a65345bfca87f47a2494bfb4bef754e58c082511443 \ - --hash=sha256:5b9b4c35b3be45e5bc774d3fc9608bbf4f9a8d371103b858c80edbeed31dd5aa \ - --hash=sha256:5c6d8d773470a5107052874341ed3c487c16ecd179976d81afed89dea5cd7406 \ - --hash=sha256:5d29ab70658d2ec19422b25e6ace00a0b0ae4181ee31e03335eaef53907d2d83 \ - --hash=sha256:5f3f021cea6e36410874763f4a517a5e2d6ac36ca8f95521f3a9fdaad0fe73dc \ - --hash=sha256:6065fdec8ff44c32a483fd44abe5bcdb40dd5e2571a5034b555348f2b3a52cea \ - --hash=sha256:647db657073672a8330608970a984d51573557f328030566521bc03415535042 \ - --hash=sha256:652159e8214eb4856e8387ebcd6b6bd336ee258cbeb639c8be52005b122b9609 \ - --hash=sha256:729367c91eb1ee84e61a733acc485065a00590618ca31c438e7dd4d600c01486 \ - --hash=sha256:74995b402ad09822a4c8002438e54940d9f1ecda898d2bb057729d7da983e4cb \ - --hash=sha256:8156b11c0d5405810d216f53907bd0f8b982aa5f1e7e3127ab3be1a4062154ff \ - --hash=sha256:8387876a8011caec52d327d5e5bca705d9399ec4b17afb8b431ec50d47c17d23 \ - --hash=sha256:89d9957b54246c6251345297dddf77a84d2c19df96af30d2de24093bbdf0528b \ - --hash=sha256:8c8758a7d97848fc8b514b3d9b4cb95243714b2f838dde5e1e3c007375de6214 \ - --hash=sha256:8ee39da0227950f88626c91e219659e6cd725ede826b1c13edd85fc4cec9bbe6 \ - --hash=sha256:8f8ef66ac6db450193ed150e10b3b45dde7aded10c5d279968bc63368027f62b \ - --hash=sha256:94f9721a564978a10d5c12927f99170d18e9a32e5a727c61eae56f956a4d118b \ - --hash=sha256:a960b09ff50c2e87864e83f352e5a90bcf1ad5233df579b1124660e1643de272 \ - --hash=sha256:ac216a2980a2d2b3b88c68a24f8a9bfb203e2490e991b3238502ad8f1e7bfed0 \ - --hash=sha256:b11bc177a0d428b37890825d7d025040d591aa833f85f8d8878ed183354f47df \ - --hash=sha256:bcd52eaa5c4c593ae9f447c1d13e7e4a00ca21d755645efa660b6999425b3c88 \ - --hash=sha256:bf5fb864f80061a40c1747e0dbc4f6e738de58dd6675b07eb80bd06a93b063c4 \ - --hash=sha256:c29ea087843e27a7cffc78406d32a5abf166d92afde7890394e9e079c9b4dbe9 \ - --hash=sha256:c2b0597522d4c5bb18aa5cf258746a2d4a90f25878cbe865e4d35526abd1b9fc \ - --hash=sha256:c536f8a852e8d3fa71dde1ec03892aee50be59f7154b533f0bf3c1174cfd5126 \ - --hash=sha256:c735e385e30278c54f43a0d056736942023c9043f84ee1021eff9fd616d17693 \ - --hash=sha256:c866eef7a0ba320486ade6c32bfc12813d1a5db8567e6904fb56d3d40acc5116 \ - --hash=sha256:cf7c5089d37787387123f1cb8f1793a47c5e1e3d1e4e7bfbc1cc96e0f925eabe \ - --hash=sha256:d31dc137ed8ec71dbc446949eba9035926e6e967b90378805dcf667ff57cabb1 \ - --hash=sha256:d5c3bfdc9663f3d4b565f9cb3b8c1efb3e178186435b45105bde7328cfddd7fe \ - --hash=sha256:d601b153e51a5a6221f0d4ec077b6bfc6ac35bfe6c19aeaa233d8990b2b71726 \ - --hash=sha256:e1ca10da138c300f768bb68e40e5b20b6ecfbd95f91aac4cc15010b6b9d65455 \ - --hash=sha256:e3680884189e2b7c3549f6d304376e64711fd15118e4b1ae81940cb6b1eaa267 \ - --hash=sha256:e54437651e1440ee53a95e6ceb6ee440b67a3d348c76f45f4f48de1a5ecab019 \ - --hash=sha256:e90a89e52deb56b928e761bb5b5f65f13f669bfd96ed5962975debea09776a23 \ - --hash=sha256:e9ad4ce044e3236f0814c906ccce8647046cc557539661e35211faadf76f283b \ - --hash=sha256:ea03f1da0d722fe3c2278a05957e6550175571a4894fbf9d178ceef4a3783d2b \ - --hash=sha256:efbec204fa9f877641747f2d9612b2b656071390d7a7ef07a9dbf0ecf9c7195c \ - --hash=sha256:fb13823a74b3a9204a8ed76d3d6d5ec12e64cc5bc44914eb9ff1cdac04facd43 +fonttools==4.60.1 \ + --hash=sha256:022beaea4b73a70295b688f817ddc24ed3e3418b5036ffcd5658141184ef0d0c \ + --hash=sha256:026290e4ec76583881763fac284aca67365e0be9f13a7fb137257096114cb3bc \ + --hash=sha256:0b0835ed15dd5b40d726bb61c846a688f5b4ce2208ec68779bc81860adb5851a \ + --hash=sha256:0eae96373e4b7c9e45d099d7a523444e3554360927225c1cdae221a58a45b856 \ + --hash=sha256:122e1a8ada290423c493491d002f622b1992b1ab0b488c68e31c413390dc7eb2 \ + --hash=sha256:1410155d0e764a4615774e5c2c6fc516259fe3eca5882f034eb9bfdbee056259 \ + --hash=sha256:145daa14bf24824b677b9357c5e44fd8895c2a8f53596e1b9ea3496081dc692c \ + --hash=sha256:1525796c3ffe27bb6268ed2a1bb0dcf214d561dfaf04728abf01489eb5339dce \ + --hash=sha256:154cb6ee417e417bf5f7c42fe25858c9140c26f647c7347c06f0cc2d47eff003 \ + --hash=sha256:2299df884c11162617a66b7c316957d74a18e3758c0274762d2cc87df7bc0272 \ + --hash=sha256:2409d5fb7b55fd70f715e6d34e7a6e4f7511b8ad29a49d6df225ee76da76dd77 \ + --hash=sha256:268ecda8ca6cb5c4f044b1fb9b3b376e8cd1b361cef275082429dc4174907038 \ + --hash=sha256:282dafa55f9659e8999110bd8ed422ebe1c8aecd0dc396550b038e6c9a08b8ea \ + --hash=sha256:2ee06fc57512144d8b0445194c2da9f190f61ad51e230f14836286470c99f854 \ + --hash=sha256:3630e86c484263eaac71d117085d509cbcf7b18f677906824e4bace598fb70d2 \ + --hash=sha256:398447f3d8c0c786cbf1209711e79080a40761eb44b27cdafffb48f52bcec258 \ + --hash=sha256:4ba4bd646e86de16160f0fb72e31c3b9b7d0721c3e5b26b9fa2fc931dfdb2652 \ + --hash=sha256:5664fd1a9ea7f244487ac8f10340c4e37664675e8667d6fee420766e0fb3cf08 \ + --hash=sha256:583b7f8e3c49486e4d489ad1deacfb8d5be54a8ef34d6df824f6a171f8511d99 \ + --hash=sha256:596ecaca36367027d525b3b426d8a8208169d09edcf8c7506aceb3a38bfb55c7 \ + --hash=sha256:5c1015318e4fec75dd4943ad5f6a206d9727adf97410d58b7e32ab644a807914 \ + --hash=sha256:66929e2ea2810c6533a5184f938502cfdaea4bc3efb7130d8cc02e1c1b4108d6 \ + --hash=sha256:6ec722ee589e89a89f5b7574f5c45604030aa6ae24cb2c751e2707193b466fed \ + --hash=sha256:6f68576bb4bbf6060c7ab047b1574a1ebe5c50a17de62830079967b211059ebb \ + --hash=sha256:7473a8ed9ed09aeaa191301244a5a9dbe46fe0bf54f9d6cd21d83044c3321217 \ + --hash=sha256:7b0c6d57ab00dae9529f3faf187f2254ea0aa1e04215cf2f1a8ec277c96661bc \ + --hash=sha256:7b4c32e232a71f63a5d00259ca3d88345ce2a43295bb049d21061f338124246f \ + --hash=sha256:8177ec9676ea6e1793c8a084a90b65a9f778771998eb919d05db6d4b1c0b114c \ + --hash=sha256:839565cbf14645952d933853e8ade66a463684ed6ed6c9345d0faf1f0e868877 \ + --hash=sha256:875cb7764708b3132637f6c5fb385b16eeba0f7ac9fa45a69d35e09b47045801 \ + --hash=sha256:8a44788d9d91df72d1a5eac49b31aeb887a5f4aab761b4cffc4196c74907ea85 \ + --hash=sha256:8b4eb332f9501cb1cd3d4d099374a1e1306783ff95489a1026bde9eb02ccc34a \ + --hash=sha256:906306ac7afe2156fcf0042173d6ebbb05416af70f6b370967b47f8f00103bbb \ + --hash=sha256:992775c9fbe2cf794786fa0ffca7f09f564ba3499b8fe9f2f80bd7197db60383 \ + --hash=sha256:996a4d1834524adbb423385d5a629b868ef9d774670856c63c9a0408a3063401 \ + --hash=sha256:9a52f254ce051e196b8fe2af4634c2d2f02c981756c6464dc192f1b6050b4e28 \ + --hash=sha256:9d0ced62b59e0430b3690dbc5373df1c2aa7585e9a8ce38eff87f0fd993c5b01 \ + --hash=sha256:a140761c4ff63d0cb9256ac752f230460ee225ccef4ad8f68affc723c88e2036 \ + --hash=sha256:a184b2ea57b13680ab6d5fbde99ccef152c95c06746cb7718c583abd8f945ccc \ + --hash=sha256:a3db56f153bd4c5c2b619ab02c5db5192e222150ce5a1bc10f16164714bc39ac \ + --hash=sha256:a46b2f450bc79e06ef3b6394f0c68660529ed51692606ad7f953fc2e448bc903 \ + --hash=sha256:a884aef09d45ba1206712c7dbda5829562d3fea7726935d3289d343232ecb0d3 \ + --hash=sha256:b2cf105cee600d2de04ca3cfa1f74f1127f8455b71dbad02b9da6ec266e116d6 \ + --hash=sha256:b33a7884fabd72bdf5f910d0cf46be50dce86a0362a65cfc746a4168c67eb96c \ + --hash=sha256:b42d86938e8dda1cd9a1a87a6d82f1818eaf933348429653559a458d027446da \ + --hash=sha256:b6379e7546ba4ae4b18f8ae2b9bc5960936007a1c0e30b342f662577e8bc3299 \ + --hash=sha256:c7420a2696a44650120cdd269a5d2e56a477e2bfa9d95e86229059beb1c19e15 \ + --hash=sha256:c8651e0d4b3bdeda6602b85fdc2abbefc1b41e573ecb37b6779c4ca50753a199 \ + --hash=sha256:d066ea419f719ed87bc2c99a4a4bfd77c2e5949cb724588b9dd58f3fd90b92bf \ + --hash=sha256:e6c58beb17380f7c2ea181ea11e7db8c0ceb474c9dd45f48e71e2cb577d146a1 \ + --hash=sha256:e852d9dda9f93ad3651ae1e3bb770eac544ec93c3807888798eccddf84596537 \ + --hash=sha256:ec3681a0cb34c255d76dd9d865a55f260164adb9fa02628415cdc2d43ee2c05d \ + --hash=sha256:ee0c0b3b35b34f782afc673d503167157094a16f442ace7c6c5e0ca80b08f50c \ + --hash=sha256:eedacb5c5d22b7097482fa834bda0dafa3d914a4e829ec83cdea2a01f8c813c4 \ + --hash=sha256:ef00af0439ebfee806b25f24c8f92109157ff3fac5731dc7867957812e87b8d9 \ + --hash=sha256:f0e8817c7d1a0c2eedebf57ef9a9896f3ea23324769a9a2061a80fe8852705ed \ + --hash=sha256:f3d5be054c461d6a2268831f04091dc82753176f6ea06dc6047a5e168265a987 \ + --hash=sha256:f4b5c37a5f40e4d733d3bbaaef082149bee5a5ea3156a785ff64d949bd1353fa # via matplotlib gitdb==4.0.12 \ --hash=sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571 \ @@ -438,9 +510,9 @@ h11==0.16.0 \ --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \ --hash=sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86 # via uvicorn -idna==3.10 \ - --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ - --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 +idna==3.11 \ + --hash=sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea \ + --hash=sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902 # via # anyio # requests @@ -452,7 +524,7 @@ iniconfig==2.1.0 \ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 # via - # -r external/score_tooling+/python_basics/requirements.txt + # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ @@ -465,9 +537,9 @@ jsonschema==4.25.1 \ --hash=sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63 \ --hash=sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85 # via sphinx-needs -jsonschema-specifications==2025.4.1 \ - --hash=sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af \ - --hash=sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608 +jsonschema-specifications==2025.9.1 \ + --hash=sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe \ + --hash=sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d # via jsonschema kiwisolver==1.4.9 \ --hash=sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c \ @@ -583,125 +655,153 @@ markdown-it-py==3.0.0 \ # mdit-py-plugins # myst-parser # rich -markupsafe==3.0.2 \ - --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ - --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ - --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ - --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ - --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ - --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ - --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ - --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ - --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ - --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ - --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ - --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ - --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ - --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ - --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ - --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ - --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ - --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ - --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ - --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ - --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ - --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ - --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ - --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ - --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ - --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ - --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ - --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ - --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ - --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ - --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ - --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ - --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ - --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ - --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ - --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ - --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ - --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ - --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ - --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ - --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ - --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ - --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ - --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ - --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ - --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ - --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ - --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ - --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ - --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ - --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ - --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ - --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ - --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ - --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ - --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ - --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ - --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ - --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ - --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ - --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 +markupsafe==3.0.3 \ + --hash=sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f \ + --hash=sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a \ + --hash=sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf \ + --hash=sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19 \ + --hash=sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf \ + --hash=sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c \ + --hash=sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175 \ + --hash=sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219 \ + --hash=sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb \ + --hash=sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6 \ + --hash=sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab \ + --hash=sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26 \ + --hash=sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1 \ + --hash=sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce \ + --hash=sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218 \ + --hash=sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634 \ + --hash=sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695 \ + --hash=sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad \ + --hash=sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73 \ + --hash=sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c \ + --hash=sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe \ + --hash=sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa \ + --hash=sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559 \ + --hash=sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa \ + --hash=sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37 \ + --hash=sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758 \ + --hash=sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f \ + --hash=sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8 \ + --hash=sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d \ + --hash=sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c \ + --hash=sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97 \ + --hash=sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a \ + --hash=sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19 \ + --hash=sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9 \ + --hash=sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9 \ + --hash=sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc \ + --hash=sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2 \ + --hash=sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4 \ + --hash=sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354 \ + --hash=sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50 \ + --hash=sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698 \ + --hash=sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9 \ + --hash=sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b \ + --hash=sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc \ + --hash=sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115 \ + --hash=sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e \ + --hash=sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485 \ + --hash=sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f \ + --hash=sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12 \ + --hash=sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025 \ + --hash=sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009 \ + --hash=sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d \ + --hash=sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b \ + --hash=sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a \ + --hash=sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5 \ + --hash=sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f \ + --hash=sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d \ + --hash=sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1 \ + --hash=sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287 \ + --hash=sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6 \ + --hash=sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f \ + --hash=sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581 \ + --hash=sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed \ + --hash=sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b \ + --hash=sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c \ + --hash=sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026 \ + --hash=sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8 \ + --hash=sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676 \ + --hash=sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6 \ + --hash=sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e \ + --hash=sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d \ + --hash=sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d \ + --hash=sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01 \ + --hash=sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7 \ + --hash=sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419 \ + --hash=sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795 \ + --hash=sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1 \ + --hash=sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5 \ + --hash=sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d \ + --hash=sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42 \ + --hash=sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe \ + --hash=sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda \ + --hash=sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e \ + --hash=sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737 \ + --hash=sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523 \ + --hash=sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591 \ + --hash=sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc \ + --hash=sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a \ + --hash=sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50 # via jinja2 -matplotlib==3.10.5 \ - --hash=sha256:00b6feadc28a08bd3c65b2894f56cf3c94fc8f7adcbc6ab4516ae1e8ed8f62e2 \ - --hash=sha256:07442d2692c9bd1cceaa4afb4bbe5b57b98a7599de4dabfcca92d3eea70f9ebe \ - --hash=sha256:080c3676a56b8ee1c762bcf8fca3fe709daa1ee23e6ef06ad9f3fc17332f2d2a \ - --hash=sha256:160e125da27a749481eaddc0627962990f6029811dbeae23881833a011a0907f \ - --hash=sha256:1f5f3ec4c191253c5f2b7c07096a142c6a1c024d9f738247bfc8e3f9643fc975 \ - --hash=sha256:1fc0d2a3241cdcb9daaca279204a3351ce9df3c0e7e621c7e04ec28aaacaca30 \ - --hash=sha256:1ff10ea43288f0c8bab608a305dc6c918cc729d429c31dcbbecde3b9f4d5b569 \ - --hash=sha256:21a95b9bf408178d372814de7baacd61c712a62cae560b5e6f35d791776f6516 \ - --hash=sha256:27f52634315e96b1debbfdc5c416592edcd9c4221bc2f520fd39c33db5d9f202 \ - --hash=sha256:2efaf97d72629e74252e0b5e3c46813e9eeaa94e011ecf8084a971a31a97f40b \ - --hash=sha256:33775bbeb75528555a15ac29396940128ef5613cf9a2d31fb1bfd18b3c0c0903 \ - --hash=sha256:352ed6ccfb7998a00881692f38b4ca083c691d3e275b4145423704c34c909076 \ - --hash=sha256:354204db3f7d5caaa10e5de74549ef6a05a4550fdd1c8f831ab9bca81efd39ed \ - --hash=sha256:3967424121d3a46705c9fa9bdb0931de3228f13f73d7bb03c999c88343a89d89 \ - --hash=sha256:3b80eb8621331449fc519541a7461987f10afa4f9cfd91afcd2276ebe19bd56c \ - --hash=sha256:47a388908e469d6ca2a6015858fa924e0e8a2345a37125948d8e93a91c47933e \ - --hash=sha256:48fe6d47380b68a37ccfcc94f009530e84d41f71f5dae7eda7c4a5a84aa0a674 \ - --hash=sha256:4b4984d5064a35b6f66d2c11d668565f4389b1119cc64db7a4c1725bc11adffc \ - --hash=sha256:4fa40a8f98428f789a9dcacd625f59b7bc4e3ef6c8c7c80187a7a709475cf592 \ - --hash=sha256:525f6e28c485c769d1f07935b660c864de41c37fd716bfa64158ea646f7084bb \ - --hash=sha256:52c6573dfcb7726a9907b482cd5b92e6b5499b284ffacb04ffbfe06b3e568124 \ - --hash=sha256:56da3b102cf6da2776fef3e71cd96fcf22103a13594a18ac9a9b31314e0be154 \ - --hash=sha256:5d4773a6d1c106ca05cb5a5515d277a6bb96ed09e5c8fab6b7741b8fcaa62c8f \ - --hash=sha256:64c4535419d5617f7363dad171a5a59963308e0f3f813c4bed6c9e6e2c131512 \ - --hash=sha256:6c49465bf689c4d59d174d0c7795fb42a21d4244d11d70e52b8011987367ac61 \ - --hash=sha256:707f9c292c4cd4716f19ab8a1f93f26598222cd931e0cd98fbbb1c5994bf7667 \ - --hash=sha256:77fab633e94b9da60512d4fa0213daeb76d5a7b05156840c4fd0399b4b818837 \ - --hash=sha256:7e44cada61bec8833c106547786814dd4a266c1b2964fd25daa3804f1b8d4467 \ - --hash=sha256:8a8da0453a7fd8e3da114234ba70c5ba9ef0e98f190309ddfde0f089accd46ea \ - --hash=sha256:8b6b49167d208358983ce26e43aa4196073b4702858670f2eb111f9a10652b4b \ - --hash=sha256:8dee65cb1424b7dc982fe87895b5613d4e691cc57117e8af840da0148ca6c1d7 \ - --hash=sha256:903352681b59f3efbf4546985142a9686ea1d616bb054b09a537a06e4b892ccf \ - --hash=sha256:94986a242747a0605cb3ff1cb98691c736f28a59f8ffe5175acaeb7397c49a5a \ - --hash=sha256:95672a5d628b44207aab91ec20bf59c26da99de12b88f7e0b1fb0a84a86ff959 \ - --hash=sha256:96ef8f5a3696f20f55597ffa91c28e2e73088df25c555f8d4754931515512715 \ - --hash=sha256:97b9d6443419085950ee4a5b1ee08c363e5c43d7176e55513479e53669e88468 \ - --hash=sha256:a17e57e33de901d221a07af32c08870ed4528db0b6059dce7d7e65c1122d4bea \ - --hash=sha256:a23193db2e9d64ece69cac0c8231849db7dd77ce59c7b89948cf9d0ce655a3ce \ - --hash=sha256:a277033048ab22d34f88a3c5243938cef776493f6201a8742ed5f8b553201343 \ - --hash=sha256:a41bcb6e2c8e79dc99c5511ae6f7787d2fb52efd3d805fff06d5d4f667db16b2 \ - --hash=sha256:a6b310f95e1102a8c7c817ef17b60ee5d1851b8c71b63d9286b66b177963039e \ - --hash=sha256:ac3d50760394d78a3c9be6b28318fe22b494c4fcf6407e8fd4794b538251899b \ - --hash=sha256:b072aac0c3ad563a2b3318124756cb6112157017f7431626600ecbe890df57a1 \ - --hash=sha256:b5fa2e941f77eb579005fb804026f9d0a1082276118d01cc6051d0d9626eaa7f \ - --hash=sha256:ba6c3c9c067b83481d647af88b4e441d532acdb5ef22178a14935b0b881188f4 \ - --hash=sha256:c04cba0f93d40e45b3c187c6c52c17f24535b27d545f757a2fffebc06c12b98b \ - --hash=sha256:c61333a8e5e6240e73769d5826b9a31d8b22df76c0778f8480baf1b4b01c9420 \ - --hash=sha256:ceefe5d40807d29a66ae916c6a3915d60ef9f028ce1927b84e727be91d884369 \ - --hash=sha256:d52fd5b684d541b5a51fb276b2b97b010c75bee9aa392f96b4a07aeb491e33c7 \ - --hash=sha256:dc88af74e7ba27de6cbe6faee916024ea35d895ed3d61ef6f58c4ce97da7185a \ - --hash=sha256:dcfc39c452c6a9f9028d3e44d2d721484f665304857188124b505b2c95e1eecf \ - --hash=sha256:e4a6470a118a2e93022ecc7d3bd16b3114b2004ea2bf014fff875b3bc99b70c6 \ - --hash=sha256:ee7a09ae2f4676276f5a65bd9f2bd91b4f9fbaedf49f40267ce3f9b448de501f \ - --hash=sha256:ee98a5c5344dc7f48dc261b6ba5d9900c008fc12beb3fa6ebda81273602cc389 \ - --hash=sha256:f6adb644c9d040ffb0d3434e440490a66cf73dbfa118a6f79cd7568431f7a012 +matplotlib==3.10.7 \ + --hash=sha256:07124afcf7a6504eafcb8ce94091c5898bbdd351519a1beb5c45f7a38c67e77f \ + --hash=sha256:09d7945a70ea43bf9248f4b6582734c2fe726723204a76eca233f24cffc7ef67 \ + --hash=sha256:0d8c32b7ea6fb80b1aeff5a2ceb3fb9778e2759e899d9beff75584714afcc5ee \ + --hash=sha256:11ae579ac83cdf3fb72573bb89f70e0534de05266728740d478f0f818983c695 \ + --hash=sha256:15112bcbaef211bd663fa935ec33313b948e214454d949b723998a43357b17b0 \ + --hash=sha256:1d9d3713a237970569156cfb4de7533b7c4eacdd61789726f444f96a0d28f57f \ + --hash=sha256:1e4bbad66c177a8fdfa53972e5ef8be72a5f27e6a607cec0d8579abd0f3102b1 \ + --hash=sha256:2222c7ba2cbde7fe63032769f6eb7e83ab3227f47d997a8453377709b7fe3a5a \ + --hash=sha256:22df30ffaa89f6643206cf13877191c63a50e8f800b038bc39bee9d2d4957632 \ + --hash=sha256:31963603041634ce1a96053047b40961f7a29eb8f9a62e80cc2c0427aa1d22a2 \ + --hash=sha256:37a1fea41153dd6ee061d21ab69c9cf2cf543160b1b85d89cd3d2e2a7902ca4c \ + --hash=sha256:3886e47f64611046bc1db523a09dd0a0a6bed6081e6f90e13806dd1d1d1b5e91 \ + --hash=sha256:4645fc5d9d20ffa3a39361fcdbcec731382763b623b72627806bf251b6388866 \ + --hash=sha256:4a11c2e9e72e7de09b7b72e62f3df23317c888299c875e2b778abf1eda8c0a42 \ + --hash=sha256:4a74f79fafb2e177f240579bc83f0b60f82cc47d2f1d260f422a0627207008ca \ + --hash=sha256:4c14b6acd16cddc3569a2d515cfdd81c7a68ac5639b76548cfc1a9e48b20eb65 \ + --hash=sha256:53b492410a6cd66c7a471de6c924f6ede976e963c0f3097a3b7abfadddc67d0a \ + --hash=sha256:53cc80662dd197ece414dd5b66e07370201515a3eaf52e7c518c68c16814773b \ + --hash=sha256:5c09cf8f2793f81368f49f118b6f9f937456362bee282eac575cca7f84cda537 \ + --hash=sha256:5e38c2d581d62ee729a6e144c47a71b3f42fb4187508dbbf4fe71d5612c3433b \ + --hash=sha256:5f3f6d315dcc176ba7ca6e74c7768fb7e4cf566c49cb143f6bc257b62e634ed8 \ + --hash=sha256:6516ce375109c60ceec579e699524e9d504cd7578506f01150f7a6bc174a775e \ + --hash=sha256:667ecd5d8d37813a845053d8f5bf110b534c3c9f30e69ebd25d4701385935a6d \ + --hash=sha256:6f1851eab59ca082c95df5a500106bad73672645625e04538b3ad0f69471ffcc \ + --hash=sha256:702590829c30aada1e8cef0568ddbffa77ca747b4d6e36c6d173f66e301f89cc \ + --hash=sha256:7146d64f561498764561e9cd0ed64fcf582e570fc519e6f521e2d0cfd43365e1 \ + --hash=sha256:744991e0cc863dd669c8dc9136ca4e6e0082be2070b9d793cbd64bec872a6815 \ + --hash=sha256:786656bb13c237bbcebcd402f65f44dd61ead60ee3deb045af429d889c8dbc67 \ + --hash=sha256:7a0edb7209e21840e8361e91ea84ea676658aa93edd5f8762793dec77a4a6748 \ + --hash=sha256:7ac81eee3b7c266dd92cee1cd658407b16c57eed08c7421fa354ed68234de380 \ + --hash=sha256:90ad854c0a435da3104c01e2c6f0028d7e719b690998a2333d7218db80950722 \ + --hash=sha256:9257be2f2a03415f9105c486d304a321168e61ad450f6153d77c69504ad764bb \ + --hash=sha256:932c55d1fa7af4423422cb6a492a31cbcbdbe68fd1a9a3f545aa5e7a143b5355 \ + --hash=sha256:a06ba7e2a2ef9131c79c49e63dad355d2d878413a0376c1727c8b9335ff731c7 \ + --hash=sha256:aebed7b50aa6ac698c90f60f854b47e48cd2252b30510e7a1feddaf5a3f72cbf \ + --hash=sha256:b172db79759f5f9bc13ef1c3ef8b9ee7b37b0247f987fbbbdaa15e4f87fd46a9 \ + --hash=sha256:b3c4ea4948d93c9c29dc01c0c23eef66f2101bf75158c291b88de6525c55c3d1 \ + --hash=sha256:b498e9e4022f93de2d5a37615200ca01297ceebbb56fe4c833f46862a490f9e3 \ + --hash=sha256:b4d41379b05528091f00e1728004f9a8d7191260f3862178b88e8fd770206318 \ + --hash=sha256:b69676845a0a66f9da30e87f48be36734d6748024b525ec4710be40194282c84 \ + --hash=sha256:c17398b709a6cce3d9fdb1595c33e356d91c098cd9486cb2cc21ea2ea418e715 \ + --hash=sha256:c380371d3c23e0eadf8ebff114445b9f970aff2010198d498d4ab4c3b41eea4f \ + --hash=sha256:cb783436e47fcf82064baca52ce748af71725d0352e1d31564cbe9c95df92b9c \ + --hash=sha256:cc1c51b846aca49a5a8b44fbba6a92d583a35c64590ad9e1e950dc88940a4297 \ + --hash=sha256:d0b181e9fa8daf1d9f2d4c547527b167cb8838fc587deabca7b5c01f97199e84 \ + --hash=sha256:d2a959c640cdeecdd2ec3136e8ea0441da59bcaf58d67e9c590740addba2cb68 \ + --hash=sha256:d5f256d49fea31f40f166a5e3131235a5d2f4b7f44520b1cf0baf1ce568ccff0 \ + --hash=sha256:d883460c43e8c6b173fef244a2341f7f7c0e9725c7fe68306e8e44ed9c8fb100 \ + --hash=sha256:d8eb7194b084b12feb19142262165832fc6ee879b945491d1c3d4660748020c4 \ + --hash=sha256:d9749313deb729f08207718d29c86246beb2ea3fdba753595b55901dee5d2fd6 \ + --hash=sha256:de66744b2bb88d5cd27e80dfc2ec9f0517d0a46d204ff98fe9e5f2864eb67657 \ + --hash=sha256:e91f61a064c92c307c5a9dc8c05dc9f8a68f0a3be199d9a002a0622e13f874a1 \ + --hash=sha256:f19410b486fdd139885ace124e57f938c1e6a3210ea13dd29cab58f5d4bc12c7 \ + --hash=sha256:f79d5de970fc90cd5591f60053aecfce1fcd736e0303d9f0bf86be649fa68fb8 \ + --hash=sha256:fba2974df0bf8ce3c995fa84b79cde38326e0f7b5409e7a3a481c1141340bcf7 # via sphinx-needs mdit-py-plugins==0.5.0 \ --hash=sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f \ @@ -730,83 +830,83 @@ nodejs-wheel-binaries==22.16.0 \ --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e # via - # -r external/score_tooling+/python_basics/requirements.txt + # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt # basedpyright -numpy==2.3.2 \ - --hash=sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5 \ - --hash=sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b \ - --hash=sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631 \ - --hash=sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58 \ - --hash=sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b \ - --hash=sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc \ - --hash=sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089 \ - --hash=sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf \ - --hash=sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15 \ - --hash=sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f \ - --hash=sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3 \ - --hash=sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170 \ - --hash=sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910 \ - --hash=sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91 \ - --hash=sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45 \ - --hash=sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c \ - --hash=sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f \ - --hash=sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b \ - --hash=sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89 \ - --hash=sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a \ - --hash=sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220 \ - --hash=sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e \ - --hash=sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab \ - --hash=sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2 \ - --hash=sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b \ - --hash=sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370 \ - --hash=sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2 \ - --hash=sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee \ - --hash=sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619 \ - --hash=sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712 \ - --hash=sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1 \ - --hash=sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec \ - --hash=sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a \ - --hash=sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450 \ - --hash=sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a \ - --hash=sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2 \ - --hash=sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168 \ - --hash=sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2 \ - --hash=sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73 \ - --hash=sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296 \ - --hash=sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9 \ - --hash=sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125 \ - --hash=sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0 \ - --hash=sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19 \ - --hash=sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b \ - --hash=sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f \ - --hash=sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2 \ - --hash=sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f \ - --hash=sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a \ - --hash=sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6 \ - --hash=sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286 \ - --hash=sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981 \ - --hash=sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f \ - --hash=sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2 \ - --hash=sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0 \ - --hash=sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b \ - --hash=sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b \ - --hash=sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56 \ - --hash=sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5 \ - --hash=sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3 \ - --hash=sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8 \ - --hash=sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0 \ - --hash=sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036 \ - --hash=sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6 \ - --hash=sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8 \ - --hash=sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48 \ - --hash=sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07 \ - --hash=sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b \ - --hash=sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b \ - --hash=sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d \ - --hash=sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0 \ - --hash=sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097 \ - --hash=sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be \ - --hash=sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5 +numpy==2.3.5 \ + --hash=sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b \ + --hash=sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae \ + --hash=sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3 \ + --hash=sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0 \ + --hash=sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b \ + --hash=sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa \ + --hash=sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28 \ + --hash=sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e \ + --hash=sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017 \ + --hash=sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41 \ + --hash=sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e \ + --hash=sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63 \ + --hash=sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9 \ + --hash=sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8 \ + --hash=sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff \ + --hash=sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7 \ + --hash=sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139 \ + --hash=sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4 \ + --hash=sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748 \ + --hash=sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952 \ + --hash=sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd \ + --hash=sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b \ + --hash=sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce \ + --hash=sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f \ + --hash=sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5 \ + --hash=sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42 \ + --hash=sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7 \ + --hash=sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248 \ + --hash=sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e \ + --hash=sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3 \ + --hash=sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b \ + --hash=sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e \ + --hash=sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0 \ + --hash=sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa \ + --hash=sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a \ + --hash=sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5 \ + --hash=sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d \ + --hash=sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4 \ + --hash=sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c \ + --hash=sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52 \ + --hash=sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5 \ + --hash=sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d \ + --hash=sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1 \ + --hash=sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c \ + --hash=sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18 \ + --hash=sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7 \ + --hash=sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188 \ + --hash=sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218 \ + --hash=sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2 \ + --hash=sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903 \ + --hash=sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c \ + --hash=sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c \ + --hash=sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234 \ + --hash=sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82 \ + --hash=sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39 \ + --hash=sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf \ + --hash=sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20 \ + --hash=sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946 \ + --hash=sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0 \ + --hash=sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9 \ + --hash=sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff \ + --hash=sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad \ + --hash=sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227 \ + --hash=sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10 \ + --hash=sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e \ + --hash=sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf \ + --hash=sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769 \ + --hash=sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310 \ + --hash=sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425 \ + --hash=sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013 \ + --hash=sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c \ + --hash=sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb \ + --hash=sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d \ + --hash=sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520 # via # contourpy # matplotlib @@ -814,140 +914,125 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via - # -r external/score_tooling+/python_basics/requirements.txt + # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt # matplotlib # pytest # sphinx # sphinx-collections -pillow==11.3.0 \ - --hash=sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2 \ - --hash=sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214 \ - --hash=sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e \ - --hash=sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59 \ - --hash=sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50 \ - --hash=sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632 \ - --hash=sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06 \ - --hash=sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a \ - --hash=sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51 \ - --hash=sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced \ - --hash=sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f \ - --hash=sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12 \ - --hash=sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8 \ - --hash=sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6 \ - --hash=sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580 \ - --hash=sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f \ - --hash=sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac \ - --hash=sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860 \ - --hash=sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd \ - --hash=sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722 \ - --hash=sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8 \ - --hash=sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4 \ - --hash=sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673 \ - --hash=sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788 \ - --hash=sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542 \ - --hash=sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e \ - --hash=sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd \ - --hash=sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8 \ - --hash=sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523 \ - --hash=sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967 \ - --hash=sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809 \ - --hash=sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477 \ - --hash=sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027 \ - --hash=sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae \ - --hash=sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b \ - --hash=sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c \ - --hash=sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f \ - --hash=sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e \ - --hash=sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b \ - --hash=sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7 \ - --hash=sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27 \ - --hash=sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361 \ - --hash=sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae \ - --hash=sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d \ - --hash=sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc \ - --hash=sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58 \ - --hash=sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad \ - --hash=sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6 \ - --hash=sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024 \ - --hash=sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978 \ - --hash=sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb \ - --hash=sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d \ - --hash=sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0 \ - --hash=sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9 \ - --hash=sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f \ - --hash=sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874 \ - --hash=sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa \ - --hash=sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081 \ - --hash=sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149 \ - --hash=sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6 \ - --hash=sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d \ - --hash=sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd \ - --hash=sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f \ - --hash=sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c \ - --hash=sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31 \ - --hash=sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e \ - --hash=sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db \ - --hash=sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6 \ - --hash=sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f \ - --hash=sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494 \ - --hash=sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69 \ - --hash=sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94 \ - --hash=sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77 \ - --hash=sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d \ - --hash=sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7 \ - --hash=sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a \ - --hash=sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438 \ - --hash=sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288 \ - --hash=sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b \ - --hash=sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635 \ - --hash=sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3 \ - --hash=sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d \ - --hash=sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe \ - --hash=sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0 \ - --hash=sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe \ - --hash=sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a \ - --hash=sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805 \ - --hash=sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8 \ - --hash=sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36 \ - --hash=sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a \ - --hash=sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b \ - --hash=sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e \ - --hash=sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25 \ - --hash=sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12 \ - --hash=sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada \ - --hash=sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c \ - --hash=sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71 \ - --hash=sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d \ - --hash=sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c \ - --hash=sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6 \ - --hash=sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1 \ - --hash=sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50 \ - --hash=sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653 \ - --hash=sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c \ - --hash=sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4 \ - --hash=sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3 +pillow==12.0.0 \ + --hash=sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643 \ + --hash=sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e \ + --hash=sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e \ + --hash=sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc \ + --hash=sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642 \ + --hash=sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6 \ + --hash=sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1 \ + --hash=sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b \ + --hash=sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399 \ + --hash=sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba \ + --hash=sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad \ + --hash=sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47 \ + --hash=sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739 \ + --hash=sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b \ + --hash=sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f \ + --hash=sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10 \ + --hash=sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52 \ + --hash=sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d \ + --hash=sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b \ + --hash=sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a \ + --hash=sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9 \ + --hash=sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d \ + --hash=sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098 \ + --hash=sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905 \ + --hash=sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b \ + --hash=sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3 \ + --hash=sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371 \ + --hash=sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953 \ + --hash=sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01 \ + --hash=sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca \ + --hash=sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e \ + --hash=sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7 \ + --hash=sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27 \ + --hash=sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082 \ + --hash=sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e \ + --hash=sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d \ + --hash=sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8 \ + --hash=sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a \ + --hash=sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad \ + --hash=sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3 \ + --hash=sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a \ + --hash=sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d \ + --hash=sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353 \ + --hash=sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee \ + --hash=sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b \ + --hash=sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b \ + --hash=sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a \ + --hash=sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7 \ + --hash=sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef \ + --hash=sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a \ + --hash=sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a \ + --hash=sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257 \ + --hash=sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07 \ + --hash=sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4 \ + --hash=sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c \ + --hash=sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c \ + --hash=sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4 \ + --hash=sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe \ + --hash=sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8 \ + --hash=sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5 \ + --hash=sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6 \ + --hash=sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e \ + --hash=sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8 \ + --hash=sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e \ + --hash=sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275 \ + --hash=sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3 \ + --hash=sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76 \ + --hash=sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227 \ + --hash=sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9 \ + --hash=sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5 \ + --hash=sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79 \ + --hash=sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca \ + --hash=sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa \ + --hash=sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b \ + --hash=sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e \ + --hash=sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197 \ + --hash=sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab \ + --hash=sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79 \ + --hash=sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2 \ + --hash=sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363 \ + --hash=sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0 \ + --hash=sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e \ + --hash=sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782 \ + --hash=sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925 \ + --hash=sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0 \ + --hash=sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b \ + --hash=sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced \ + --hash=sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c \ + --hash=sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344 \ + --hash=sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9 \ + --hash=sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1 # via matplotlib -platformdirs==4.3.8 \ - --hash=sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc \ - --hash=sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4 +platformdirs==4.5.0 \ + --hash=sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312 \ + --hash=sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3 # via esbonio pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via - # -r external/score_tooling+/python_basics/requirements.txt + # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt # pytest -pycparser==2.22 \ - --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ - --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc +pycparser==2.23 \ + --hash=sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2 \ + --hash=sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934 # via cffi pydata-sphinx-theme==0.16.1 \ --hash=sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde \ --hash=sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7 # via -r src/requirements.in -pygithub==2.7.0 \ - --hash=sha256:40ecbfe26dc55cc34ab4b0ffa1d455e6f816ef9a2bc8d6f5ad18ce572f163700 \ - --hash=sha256:7cd6eafabb09b5369afba3586d86b1f1ad6f1326d2ff01bc47bb26615dce4cbb +pygithub==2.8.1 \ + --hash=sha256:23a0a5bca93baef082e03411bf0ce27204c32be8bfa7abc92fe4a3e132936df0 \ + --hash=sha256:341b7c78521cb07324ff670afd1baa2bf5c286f8d9fd302c1798ba594a5400c9 # via -r src/requirements.in pygls==1.3.1 \ --hash=sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018 \ @@ -965,21 +1050,38 @@ pyjwt[crypto]==2.10.1 \ --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb # via pygithub -pynacl==1.5.0 \ - --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ - --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ - --hash=sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93 \ - --hash=sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1 \ - --hash=sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92 \ - --hash=sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff \ - --hash=sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba \ - --hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \ - --hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \ - --hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543 +pynacl==1.6.1 \ + --hash=sha256:262a8de6bba4aee8a66f5edf62c214b06647461c9b6b641f8cd0cb1e3b3196fe \ + --hash=sha256:2b12f1b97346f177affcdfdc78875ff42637cb40dcf79484a97dae3448083a78 \ + --hash=sha256:319de653ef84c4f04e045eb250e6101d23132372b0a61a7acf91bac0fda8e58c \ + --hash=sha256:3206fa98737fdc66d59b8782cecc3d37d30aeec4593d1c8c145825a345bba0f0 \ + --hash=sha256:3384a454adf5d716a9fadcb5eb2e3e72cd49302d1374a60edc531c9957a9b014 \ + --hash=sha256:3cd787ec1f5c155dc8ecf39b1333cfef41415dc96d392f1ce288b4fe970df489 \ + --hash=sha256:4ce50d19f1566c391fedc8dc2f2f5be265ae214112ebe55315e41d1f36a7f0a9 \ + --hash=sha256:53543b4f3d8acb344f75fd4d49f75e6572fce139f4bfb4815a9282296ff9f4c0 \ + --hash=sha256:543f869140f67d42b9b8d47f922552d7a967e6c116aad028c9bfc5f3f3b3a7b7 \ + --hash=sha256:5953e8b8cfadb10889a6e7bd0f53041a745d1b3d30111386a1bb37af171e6daf \ + --hash=sha256:5a3becafc1ee2e5ea7f9abc642f56b82dcf5be69b961e782a96ea52b55d8a9fc \ + --hash=sha256:5f5b35c1a266f8a9ad22525049280a600b19edd1f785bccd01ae838437dcf935 \ + --hash=sha256:6b35d93ab2df03ecb3aa506be0d3c73609a51449ae0855c2e89c7ed44abde40b \ + --hash=sha256:7713f8977b5d25f54a811ec9efa2738ac592e846dd6e8a4d3f7578346a841078 \ + --hash=sha256:7d7c09749450c385301a3c20dca967a525152ae4608c0a096fe8464bfc3df93d \ + --hash=sha256:8d361dac0309f2b6ad33b349a56cd163c98430d409fa503b10b70b3ad66eaa1d \ + --hash=sha256:9fd1a4eb03caf8a2fe27b515a998d26923adb9ddb68db78e35ca2875a3830dde \ + --hash=sha256:a2bb472458c7ca959aeeff8401b8efef329b0fc44a89d3775cffe8fad3398ad8 \ + --hash=sha256:a569a4069a7855f963940040f35e87d8bc084cb2d6347428d5ad20550a0a1a21 \ + --hash=sha256:a6f9fd6d6639b1e81115c7f8ff16b8dedba1e8098d2756275d63d208b0e32021 \ + --hash=sha256:c2228054f04bf32d558fb89bb99f163a8197d5a9bf4efa13069a7fa8d4b93fc3 \ + --hash=sha256:d8615ee34d01c8e0ab3f302dcdd7b32e2bcf698ba5f4809e7cc407c8cdea7717 \ + --hash=sha256:d984c91fe3494793b2a1fb1e91429539c6c28e9ec8209d26d25041ec599ccf63 \ + --hash=sha256:dece79aecbb8f4640a1adbb81e4aa3bfb0e98e99834884a80eb3f33c7c30e708 \ + --hash=sha256:e49a3f3d0da9f79c1bec2aa013261ab9fa651c7da045d376bd306cf7c1792993 \ + --hash=sha256:e735c3a1bdfde3834503baf1a6d74d4a143920281cb724ba29fb84c9f49b9c48 \ + --hash=sha256:fc734c1696ffd49b40f7c1779c89ba908157c57345cf626be2e0719488a076d3 # via pygithub -pyparsing==3.2.3 \ - --hash=sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf \ - --hash=sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be +pyparsing==3.2.5 \ + --hash=sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6 \ + --hash=sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e # via matplotlib pyspellchecker==0.8.3 \ --hash=sha256:cb06eeafe124837f321e0d02f8e21deab713e966e28e0360319a28a089c43978 \ @@ -988,71 +1090,91 @@ pyspellchecker==0.8.3 \ pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r external/score_tooling+/python_basics/requirements.txt + # via -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via matplotlib -pyyaml==6.0.2 \ - --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ - --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \ - --hash=sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086 \ - --hash=sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e \ - --hash=sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133 \ - --hash=sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5 \ - --hash=sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484 \ - --hash=sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee \ - --hash=sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5 \ - --hash=sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68 \ - --hash=sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a \ - --hash=sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf \ - --hash=sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99 \ - --hash=sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8 \ - --hash=sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85 \ - --hash=sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19 \ - --hash=sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc \ - --hash=sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a \ - --hash=sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1 \ - --hash=sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317 \ - --hash=sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c \ - --hash=sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631 \ - --hash=sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d \ - --hash=sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652 \ - --hash=sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5 \ - --hash=sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e \ - --hash=sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b \ - --hash=sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8 \ - --hash=sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476 \ - --hash=sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706 \ - --hash=sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563 \ - --hash=sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237 \ - --hash=sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b \ - --hash=sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083 \ - --hash=sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180 \ - --hash=sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425 \ - --hash=sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e \ - --hash=sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f \ - --hash=sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725 \ - --hash=sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183 \ - --hash=sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab \ - --hash=sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774 \ - --hash=sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725 \ - --hash=sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e \ - --hash=sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5 \ - --hash=sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d \ - --hash=sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290 \ - --hash=sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44 \ - --hash=sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed \ - --hash=sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4 \ - --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ - --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ - --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 +pyyaml==6.0.3 \ + --hash=sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c \ + --hash=sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a \ + --hash=sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3 \ + --hash=sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956 \ + --hash=sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6 \ + --hash=sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c \ + --hash=sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65 \ + --hash=sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a \ + --hash=sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0 \ + --hash=sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b \ + --hash=sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1 \ + --hash=sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6 \ + --hash=sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7 \ + --hash=sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e \ + --hash=sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007 \ + --hash=sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310 \ + --hash=sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4 \ + --hash=sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9 \ + --hash=sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295 \ + --hash=sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea \ + --hash=sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0 \ + --hash=sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e \ + --hash=sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac \ + --hash=sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9 \ + --hash=sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7 \ + --hash=sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35 \ + --hash=sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb \ + --hash=sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b \ + --hash=sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69 \ + --hash=sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5 \ + --hash=sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b \ + --hash=sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c \ + --hash=sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369 \ + --hash=sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd \ + --hash=sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824 \ + --hash=sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198 \ + --hash=sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065 \ + --hash=sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c \ + --hash=sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c \ + --hash=sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764 \ + --hash=sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196 \ + --hash=sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b \ + --hash=sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00 \ + --hash=sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac \ + --hash=sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8 \ + --hash=sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e \ + --hash=sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28 \ + --hash=sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3 \ + --hash=sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5 \ + --hash=sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4 \ + --hash=sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b \ + --hash=sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf \ + --hash=sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5 \ + --hash=sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702 \ + --hash=sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8 \ + --hash=sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788 \ + --hash=sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da \ + --hash=sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d \ + --hash=sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc \ + --hash=sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c \ + --hash=sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba \ + --hash=sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f \ + --hash=sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917 \ + --hash=sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5 \ + --hash=sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26 \ + --hash=sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f \ + --hash=sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b \ + --hash=sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be \ + --hash=sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c \ + --hash=sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3 \ + --hash=sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6 \ + --hash=sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926 \ + --hash=sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0 # via # myst-parser # sphinxcontrib-mermaid -referencing==0.36.2 \ - --hash=sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa \ - --hash=sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0 +referencing==0.37.0 \ + --hash=sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231 \ + --hash=sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8 # via # jsonschema # jsonschema-specifications @@ -1068,224 +1190,199 @@ requests-file==2.1.0 \ --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c # via sphinx-needs -rich==14.1.0 \ - --hash=sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f \ - --hash=sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8 +rich==14.2.0 \ + --hash=sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4 \ + --hash=sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd # via -r src/requirements.in roman-numerals-py==3.1.0 \ --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d # via sphinx -rpds-py==0.27.0 \ - --hash=sha256:010c4843a3b92b54373e3d2291a7447d6c3fc29f591772cc2ea0e9f5c1da434b \ - --hash=sha256:05284439ebe7d9f5f5a668d4d8a0a1d851d16f7d47c78e1fab968c8ad30cab04 \ - --hash=sha256:0665be515767dc727ffa5f74bd2ef60b0ff85dad6bb8f50d91eaa6b5fb226f51 \ - --hash=sha256:069e0384a54f427bd65d7fda83b68a90606a3835901aaff42185fcd94f5a9295 \ - --hash=sha256:08680820d23df1df0a0260f714d12966bc6c42d02e8055a91d61e03f0c47dda0 \ - --hash=sha256:0954e3a92e1d62e83a54ea7b3fdc9efa5d61acef8488a8a3d31fdafbfb00460d \ - --hash=sha256:09965b314091829b378b60607022048953e25f0b396c2b70e7c4c81bcecf932e \ - --hash=sha256:0c431bfb91478d7cbe368d0a699978050d3b112d7f1d440a41e90faa325557fd \ - --hash=sha256:0f401c369186a5743694dd9fc08cba66cf70908757552e1f714bfc5219c655b5 \ - --hash=sha256:0f4f69d7a4300fbf91efb1fb4916421bd57804c01ab938ab50ac9c4aa2212f03 \ - --hash=sha256:11e8e28c0ba0373d052818b600474cfee2fafa6c9f36c8587d217b13ee28ca7d \ - --hash=sha256:130c1ffa5039a333f5926b09e346ab335f0d4ec393b030a18549a7c7e7c2cea4 \ - --hash=sha256:1321bce595ad70e80f97f998db37356b2e22cf98094eba6fe91782e626da2f71 \ - --hash=sha256:13bbc4846ae4c993f07c93feb21a24d8ec637573d567a924b1001e81c8ae80f9 \ - --hash=sha256:14f028eb47f59e9169bfdf9f7ceafd29dd64902141840633683d0bad5b04ff34 \ - --hash=sha256:15ea4d2e182345dd1b4286593601d766411b43f868924afe297570658c31a62b \ - --hash=sha256:181bc29e59e5e5e6e9d63b143ff4d5191224d355e246b5a48c88ce6b35c4e466 \ - --hash=sha256:183f5e221ba3e283cd36fdfbe311d95cd87699a083330b4f792543987167eff1 \ - --hash=sha256:184f0d7b342967f6cda94a07d0e1fae177d11d0b8f17d73e06e36ac02889f303 \ - --hash=sha256:190d7285cd3bb6d31d37a0534d7359c1ee191eb194c511c301f32a4afa5a1dd4 \ - --hash=sha256:19c990fdf5acecbf0623e906ae2e09ce1c58947197f9bced6bbd7482662231c4 \ - --hash=sha256:1d66f45b9399036e890fb9c04e9f70c33857fd8f58ac8db9f3278cfa835440c3 \ - --hash=sha256:203f581accef67300a942e49a37d74c12ceeef4514874c7cede21b012613ca2c \ - --hash=sha256:20e222a44ae9f507d0f2678ee3dd0c45ec1e930f6875d99b8459631c24058aec \ - --hash=sha256:2406d034635d1497c596c40c85f86ecf2bf9611c1df73d14078af8444fe48031 \ - --hash=sha256:249ab91ceaa6b41abc5f19513cb95b45c6f956f6b89f1fe3d99c81255a849f9e \ - --hash=sha256:25a4aebf8ca02bbb90a9b3e7a463bbf3bee02ab1c446840ca07b1695a68ce424 \ - --hash=sha256:27bac29bbbf39601b2aab474daf99dbc8e7176ca3389237a23944b17f8913d97 \ - --hash=sha256:299a245537e697f28a7511d01038c310ac74e8ea213c0019e1fc65f52c0dcb23 \ - --hash=sha256:2cff9bdd6c7b906cc562a505c04a57d92e82d37200027e8d362518df427f96cd \ - --hash=sha256:2e307cb5f66c59ede95c00e93cd84190a5b7f3533d7953690b2036780622ba81 \ - --hash=sha256:2e39169ac6aae06dd79c07c8a69d9da867cef6a6d7883a0186b46bb46ccfb0c3 \ - --hash=sha256:2fe6e18e5c8581f0361b35ae575043c7029d0a92cb3429e6e596c2cdde251432 \ - --hash=sha256:3001013dae10f806380ba739d40dee11db1ecb91684febb8406a87c2ded23dae \ - --hash=sha256:32196b5a99821476537b3f7732432d64d93a58d680a52c5e12a190ee0135d8b5 \ - --hash=sha256:33ba649a6e55ae3808e4c39e01580dc9a9b0d5b02e77b66bb86ef117922b1264 \ - --hash=sha256:341d8acb6724c0c17bdf714319c393bb27f6d23d39bc74f94221b3e59fc31828 \ - --hash=sha256:343cf24de9ed6c728abefc5d5c851d5de06497caa7ac37e5e65dd572921ed1b5 \ - --hash=sha256:36184b44bf60a480863e51021c26aca3dfe8dd2f5eeabb33622b132b9d8b8b54 \ - --hash=sha256:3841f66c1ffdc6cebce8aed64e36db71466f1dc23c0d9a5592e2a782a3042c79 \ - --hash=sha256:4045e2fc4b37ec4b48e8907a5819bdd3380708c139d7cc358f03a3653abedb89 \ - --hash=sha256:419dd9c98bcc9fb0242be89e0c6e922df333b975d4268faa90d58499fd9c9ebe \ - --hash=sha256:42894616da0fc0dcb2ec08a77896c3f56e9cb2f4b66acd76fc8992c3557ceb1c \ - --hash=sha256:42ccc57ff99166a55a59d8c7d14f1a357b7749f9ed3584df74053fd098243451 \ - --hash=sha256:4300e15e7d03660f04be84a125d1bdd0e6b2f674bc0723bc0fd0122f1a4585dc \ - --hash=sha256:443d239d02d9ae55b74015234f2cd8eb09e59fbba30bf60baeb3123ad4c6d5ff \ - --hash=sha256:44524b96481a4c9b8e6c46d6afe43fa1fb485c261e359fbe32b63ff60e3884d8 \ - --hash=sha256:45d04a73c54b6a5fd2bab91a4b5bc8b426949586e61340e212a8484919183859 \ - --hash=sha256:46f48482c1a4748ab2773f75fffbdd1951eb59794e32788834b945da857c47a8 \ - --hash=sha256:4790c9d5dd565ddb3e9f656092f57268951398cef52e364c405ed3112dc7c7c1 \ - --hash=sha256:4bc262ace5a1a7dc3e2eac2fa97b8257ae795389f688b5adf22c5db1e2431c43 \ - --hash=sha256:4c3f8a0d4802df34fcdbeb3dfe3a4d8c9a530baea8fafdf80816fcaac5379d83 \ - --hash=sha256:5355527adaa713ab693cbce7c1e0ec71682f599f61b128cf19d07e5c13c9b1f1 \ - --hash=sha256:555ed147cbe8c8f76e72a4c6cd3b7b761cbf9987891b9448808148204aed74a5 \ - --hash=sha256:55d42a0ef2bdf6bc81e1cc2d49d12460f63c6ae1423c4f4851b828e454ccf6f1 \ - --hash=sha256:59195dc244fc183209cf8a93406889cadde47dfd2f0a6b137783aa9c56d67c85 \ - --hash=sha256:59714ab0a5af25d723d8e9816638faf7f4254234decb7d212715c1aa71eee7be \ - --hash=sha256:5b3a5c8089eed498a3af23ce87a80805ff98f6ef8f7bdb70bd1b7dae5105f6ac \ - --hash=sha256:5d6790ff400254137b81b8053b34417e2c46921e302d655181d55ea46df58cf7 \ - --hash=sha256:5df559e9e7644d9042f626f2c3997b555f347d7a855a15f170b253f6c5bfe358 \ - --hash=sha256:5fa01b3d5e3b7d97efab65bd3d88f164e289ec323a8c033c5c38e53ee25c007e \ - --hash=sha256:61490d57e82e23b45c66f96184237994bfafa914433b8cd1a9bb57fecfced59d \ - --hash=sha256:6168af0be75bba990a39f9431cdfae5f0ad501f4af32ae62e8856307200517b8 \ - --hash=sha256:64a0fe3f334a40b989812de70160de6b0ec7e3c9e4a04c0bbc48d97c5d3600ae \ - --hash=sha256:64f689ab822f9b5eb6dfc69893b4b9366db1d2420f7db1f6a2adf2a9ca15ad64 \ - --hash=sha256:699c346abc73993962cac7bb4f02f58e438840fa5458a048d3a178a7a670ba86 \ - --hash=sha256:6b96b0b784fe5fd03beffff2b1533dc0d85e92bab8d1b2c24ef3a5dc8fac5669 \ - --hash=sha256:6bde37765564cd22a676dd8101b657839a1854cfaa9c382c5abf6ff7accfd4ae \ - --hash=sha256:6c135708e987f46053e0a1246a206f53717f9fadfba27174a9769ad4befba5c3 \ - --hash=sha256:6c27a7054b5224710fcfb1a626ec3ff4f28bcb89b899148c72873b18210e446b \ - --hash=sha256:6de6a7f622860af0146cb9ee148682ff4d0cea0b8fd3ad51ce4d40efb2f061d0 \ - --hash=sha256:737005088449ddd3b3df5a95476ee1c2c5c669f5c30eed909548a92939c0e12d \ - --hash=sha256:7451ede3560086abe1aa27dcdcf55cd15c96b56f543fb12e5826eee6f721f858 \ - --hash=sha256:7873b65686a6471c0037139aa000d23fe94628e0daaa27b6e40607c90e3f5ec4 \ - --hash=sha256:79af163a4b40bbd8cfd7ca86ec8b54b81121d3b213b4435ea27d6568bcba3e9d \ - --hash=sha256:7aed8118ae20515974650d08eb724150dc2e20c2814bcc307089569995e88a14 \ - --hash=sha256:7cf9bc4508efb18d8dff6934b602324eb9f8c6644749627ce001d6f38a490889 \ - --hash=sha256:7e57906e38583a2cba67046a09c2637e23297618dc1f3caddbc493f2be97c93f \ - --hash=sha256:7ec85994f96a58cf7ed288caa344b7fe31fd1d503bdf13d7331ead5f70ab60d5 \ - --hash=sha256:81f81bbd7cdb4bdc418c09a73809abeda8f263a6bf8f9c7f93ed98b5597af39d \ - --hash=sha256:86aca1616922b40d8ac1b3073a1ead4255a2f13405e5700c01f7c8d29a03972d \ - --hash=sha256:88051c3b7d5325409f433c5a40328fcb0685fc04e5db49ff936e910901d10114 \ - --hash=sha256:887ab1f12b0d227e9260558a4a2320024b20102207ada65c43e1ffc4546df72e \ - --hash=sha256:8a06aa1197ec0281eb1d7daf6073e199eb832fe591ffa329b88bae28f25f5fe5 \ - --hash=sha256:8a1dca5507fa1337f75dcd5070218b20bc68cf8844271c923c1b79dfcbc20391 \ - --hash=sha256:8b23cf252f180cda89220b378d917180f29d313cd6a07b2431c0d3b776aae86f \ - --hash=sha256:8d0e09cf4863c74106b5265c2c310f36146e2b445ff7b3018a56799f28f39f6f \ - --hash=sha256:8de567dec6d451649a781633d36f5c7501711adee329d76c095be2178855b042 \ - --hash=sha256:90fb790138c1a89a2e58c9282fe1089638401f2f3b8dddd758499041bc6e0774 \ - --hash=sha256:92f3b3ec3e6008a1fe00b7c0946a170f161ac00645cde35e3c9a68c2475e8156 \ - --hash=sha256:935afcdea4751b0ac918047a2df3f720212892347767aea28f5b3bf7be4f27c0 \ - --hash=sha256:9a0ff7ee28583ab30a52f371b40f54e7138c52ca67f8ca17ccb7ccf0b383cb5f \ - --hash=sha256:9ad08547995a57e74fea6abaf5940d399447935faebbd2612b3b0ca6f987946b \ - --hash=sha256:9b2a4e17bfd68536c3b801800941c95a1d4a06e3cada11c146093ba939d9638d \ - --hash=sha256:9b78430703cfcf5f5e86eb74027a1ed03a93509273d7c705babb547f03e60016 \ - --hash=sha256:9d0f92b78cfc3b74a42239fdd8c1266f4715b573204c234d2f9fc3fc7a24f185 \ - --hash=sha256:9da162b718b12c4219eeeeb68a5b7552fbc7aadedf2efee440f88b9c0e54b45d \ - --hash=sha256:a00c91104c173c9043bc46f7b30ee5e6d2f6b1149f11f545580f5d6fdff42c0b \ - --hash=sha256:a029be818059870664157194e46ce0e995082ac49926f1423c1f058534d2aaa9 \ - --hash=sha256:a1b3db5fae5cbce2131b7420a3f83553d4d89514c03d67804ced36161fe8b6b2 \ - --hash=sha256:a4cf32a26fa744101b67bfd28c55d992cd19438aff611a46cac7f066afca8fd4 \ - --hash=sha256:aa0bf113d15e8abdfee92aa4db86761b709a09954083afcb5bf0f952d6065fdb \ - --hash=sha256:ab47fe727c13c09d0e6f508e3a49e545008e23bf762a245b020391b621f5b726 \ - --hash=sha256:af22763a0a1eff106426a6e1f13c4582e0d0ad89c1493ab6c058236174cd6c6a \ - --hash=sha256:af9d4fd79ee1cc8e7caf693ee02737daabfc0fcf2773ca0a4735b356c8ad6f7c \ - --hash=sha256:b1fef1f13c842a39a03409e30ca0bf87b39a1e2a305a9924deadb75a43105d23 \ - --hash=sha256:b2eff8ee57c5996b0d2a07c3601fb4ce5fbc37547344a26945dd9e5cbd1ed27a \ - --hash=sha256:b4c4fbbcff474e1e5f38be1bf04511c03d492d42eec0babda5d03af3b5589374 \ - --hash=sha256:b8a4131698b6992b2a56015f51646711ec5d893a0b314a4b985477868e240c87 \ - --hash=sha256:b8a7acf04fda1f30f1007f3cc96d29d8cf0a53e626e4e1655fdf4eabc082d367 \ - --hash=sha256:ba783541be46f27c8faea5a6645e193943c17ea2f0ffe593639d906a327a9bcc \ - --hash=sha256:be0744661afbc4099fef7f4e604e7f1ea1be1dd7284f357924af12a705cc7d5c \ - --hash=sha256:be3964f7312ea05ed283b20f87cb533fdc555b2e428cc7be64612c0b2124f08c \ - --hash=sha256:be806e2961cd390a89d6c3ce8c2ae34271cfcd05660f716257838bb560f1c3b6 \ - --hash=sha256:bec77545d188f8bdd29d42bccb9191682a46fb2e655e3d1fb446d47c55ac3b8d \ - --hash=sha256:c10d92fb6d7fd827e44055fcd932ad93dac6a11e832d51534d77b97d1d85400f \ - --hash=sha256:c3782fb753aa825b4ccabc04292e07897e2fd941448eabf666856c5530277626 \ - --hash=sha256:c9ce7a9e967afc0a2af7caa0d15a3e9c1054815f73d6a8cb9225b61921b419bd \ - --hash=sha256:cb0702c12983be3b2fab98ead349ac63a98216d28dda6f518f52da5498a27a1b \ - --hash=sha256:cbc619e84a5e3ab2d452de831c88bdcad824414e9c2d28cd101f94dbdf26329c \ - --hash=sha256:ce4ed8e0c7dbc5b19352b9c2c6131dd23b95fa8698b5cdd076307a33626b72dc \ - --hash=sha256:ce96ab0bdfcef1b8c371ada2100767ace6804ea35aacce0aef3aeb4f3f499ca8 \ - --hash=sha256:cf824aceaeffff029ccfba0da637d432ca71ab21f13e7f6f5179cd88ebc77a8a \ - --hash=sha256:d2a81bdcfde4245468f7030a75a37d50400ac2455c3a4819d9d550c937f90ab5 \ - --hash=sha256:d2cc2b34f9e1d31ce255174da82902ad75bd7c0d88a33df54a77a22f2ef421ee \ - --hash=sha256:d2f184336bc1d6abfaaa1262ed42739c3789b1e3a65a29916a615307d22ffd2e \ - --hash=sha256:d3c622c39f04d5751408f5b801ecb527e6e0a471b367f420a877f7a660d583f6 \ - --hash=sha256:d7cf5e726b6fa977e428a61880fb108a62f28b6d0c7ef675b117eaff7076df49 \ - --hash=sha256:d85d784c619370d9329bbd670f41ff5f2ae62ea4519761b679d0f57f0f0ee267 \ - --hash=sha256:d93ebdb82363d2e7bec64eecdc3632b59e84bd270d74fe5be1659f7787052f9b \ - --hash=sha256:db8a6313dbac934193fc17fe7610f70cd8181c542a91382531bef5ed785e5615 \ - --hash=sha256:dbc2ab5d10544eb485baa76c63c501303b716a5c405ff2469a1d8ceffaabf622 \ - --hash=sha256:dbd749cff1defbde270ca346b69b3baf5f1297213ef322254bf2a28537f0b046 \ - --hash=sha256:dc662bc9375a6a394b62dfd331874c434819f10ee3902123200dbcf116963f89 \ - --hash=sha256:dc6b0d5a1ea0318ef2def2b6a55dccf1dcaf77d605672347271ed7b829860765 \ - --hash=sha256:dc79d192fb76fc0c84f2c58672c17bbbc383fd26c3cdc29daae16ce3d927e8b2 \ - --hash=sha256:dd2c1d27ebfe6a015cfa2005b7fe8c52d5019f7bbdd801bc6f7499aab9ae739e \ - --hash=sha256:dea0808153f1fbbad772669d906cddd92100277533a03845de6893cadeffc8be \ - --hash=sha256:e0d7151a1bd5d0a203a5008fc4ae51a159a610cb82ab0a9b2c4d80241745582e \ - --hash=sha256:e14aab02258cb776a108107bd15f5b5e4a1bbaa61ef33b36693dfab6f89d54f9 \ - --hash=sha256:e24d8031a2c62f34853756d9208eeafa6b940a1efcbfe36e8f57d99d52bb7261 \ - --hash=sha256:e36c80c49853b3ffda7aa1831bf175c13356b210c73128c861f3aa93c3cc4015 \ - --hash=sha256:e377e4cf8795cdbdff75b8f0223d7b6c68ff4fef36799d88ccf3a995a91c0112 \ - --hash=sha256:e3acb9c16530362aeaef4e84d57db357002dc5cbfac9a23414c3e73c08301ab2 \ - --hash=sha256:e3dc8d4ede2dbae6c0fc2b6c958bf51ce9fd7e9b40c0f5b8835c3fde44f5807d \ - --hash=sha256:e6491658dd2569f05860bad645569145c8626ac231877b0fb2d5f9bcb7054089 \ - --hash=sha256:eb91d252b35004a84670dfeafadb042528b19842a0080d8b53e5ec1128e8f433 \ - --hash=sha256:f0396e894bd1e66c74ecbc08b4f6a03dc331140942c4b1d345dd131b68574a60 \ - --hash=sha256:f09c9d4c26fa79c1bad927efb05aca2391350b8e61c38cbc0d7d3c814e463124 \ - --hash=sha256:f3cd110e02c5bf17d8fb562f6c9df5c20e73029d587cf8602a2da6c5ef1e32cb \ - --hash=sha256:f7a37dd208f0d658e0487522078b1ed68cd6bce20ef4b5a915d2809b9094b410 \ - --hash=sha256:fae4a01ef8c4cb2bbe92ef2063149596907dc4a881a8d26743b3f6b304713171 \ - --hash=sha256:fc327f4497b7087d06204235199daf208fd01c82d80465dc5efa4ec9df1c5b4e \ - --hash=sha256:fcc01c57ce6e70b728af02b2401c5bc853a9e14eb07deda30624374f0aebfe42 \ - --hash=sha256:fde355b02934cc6b07200cc3b27ab0c15870a757d1a72fd401aa92e2ea3c6bfe +rpds-py==0.29.0 \ + --hash=sha256:00e56b12d2199ca96068057e1ae7f9998ab6e99cda82431afafd32f3ec98cca9 \ + --hash=sha256:0248b19405422573621172ab8e3a1f29141362d13d9f72bafa2e28ea0cdca5a2 \ + --hash=sha256:05a2bd42768ea988294ca328206efbcc66e220d2d9b7836ee5712c07ad6340ea \ + --hash=sha256:070befbb868f257d24c3bb350dbd6e2f645e83731f31264b19d7231dd5c396c7 \ + --hash=sha256:0a8896986efaa243ab713c69e6491a4138410f0fe36f2f4c71e18bd5501e8014 \ + --hash=sha256:0ea962671af5cb9a260489e311fa22b2e97103e3f9f0caaea6f81390af96a9ed \ + --hash=sha256:115f48170fd4296a33938d8c11f697f5f26e0472e43d28f35624764173a60e4d \ + --hash=sha256:12597d11d97b8f7e376c88929a6e17acb980e234547c92992f9f7c058f1a7310 \ + --hash=sha256:1585648d0760b88292eecab5181f5651111a69d90eff35d6b78aa32998886a61 \ + --hash=sha256:16e9da2bda9eb17ea318b4c335ec9ac1818e88922cbe03a5743ea0da9ecf74fb \ + --hash=sha256:1a409b0310a566bfd1be82119891fefbdce615ccc8aa558aff7835c27988cbef \ + --hash=sha256:1c3c3e8101bb06e337c88eb0c0ede3187131f19d97d43ea0e1c5407ea74c0cbf \ + --hash=sha256:1d24564a700ef41480a984c5ebed62b74e6ce5860429b98b1fede76049e953e6 \ + --hash=sha256:1de2345af363d25696969befc0c1688a6cb5e8b1d32b515ef84fc245c6cddba3 \ + --hash=sha256:1ea59b23ea931d494459c8338056fe7d93458c0bf3ecc061cd03916505369d55 \ + --hash=sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2 \ + --hash=sha256:20c51ae86a0bb9accc9ad4e6cdeec58d5ebb7f1b09dd4466331fc65e1766aae7 \ + --hash=sha256:24a16cb7163933906c62c272de20ea3c228e4542c8c45c1d7dc2b9913e17369a \ + --hash=sha256:24a7231493e3c4a4b30138b50cca089a598e52c34cf60b2f35cebf62f274fdea \ + --hash=sha256:2549d833abdf8275c901313b9e8ff8fba57e50f6a495035a2a4e30621a2f7cc4 \ + --hash=sha256:28de03cf48b8a9e6ec10318f2197b83946ed91e2891f651a109611be4106ac4b \ + --hash=sha256:28fd300326dd21198f311534bdb6d7e989dd09b3418b3a91d54a0f384c700967 \ + --hash=sha256:295ce5ac7f0cf69a651ea75c8f76d02a31f98e5698e82a50a5f4d4982fbbae3b \ + --hash=sha256:2a21deb8e0d1571508c6491ce5ea5e25669b1dd4adf1c9d64b6314842f708b5d \ + --hash=sha256:2aba991e041d031c7939e1358f583ae405a7bf04804ca806b97a5c0e0af1ea5e \ + --hash=sha256:2b8e54d6e61f3ecd3abe032065ce83ea63417a24f437e4a3d73d2f85ce7b7cfe \ + --hash=sha256:2d6fb2ad1c36f91c4646989811e84b1ea5e0c3cf9690b826b6e32b7965853a63 \ + --hash=sha256:33ca7bdfedd83339ca55da3a5e1527ee5870d4b8369456b5777b197756f3ca22 \ + --hash=sha256:37d94eadf764d16b9a04307f2ab1d7af6dc28774bbe0535c9323101e14877b4c \ + --hash=sha256:3897924d3f9a0361472d884051f9a2460358f9a45b1d85a39a158d2f8f1ad71c \ + --hash=sha256:3919a3bbecee589300ed25000b6944174e07cd20db70552159207b3f4bbb45b8 \ + --hash=sha256:394d27e4453d3b4d82bb85665dc1fcf4b0badc30fc84282defed71643b50e1a1 \ + --hash=sha256:3fbd4e9aebf110473a420dea85a238b254cf8a15acb04b22a5a6b5ce8925b760 \ + --hash=sha256:3fd2164d73812026ce970d44c3ebd51e019d2a26a4425a5dcbdfa93a34abc383 \ + --hash=sha256:40f65470919dc189c833e86b2c4bd21bd355f98436a2cef9e0a9a92aebc8e57e \ + --hash=sha256:4448dad428f28a6a767c3e3b80cde3446a22a0efbddaa2360f4bb4dc836d0688 \ + --hash=sha256:44a91e0ab77bdc0004b43261a4b8cd6d6b451e8d443754cfda830002b5745b32 \ + --hash=sha256:453783477aa4f2d9104c4b59b08c871431647cb7af51b549bbf2d9eb9c827756 \ + --hash=sha256:4a097b7f7f7274164566ae90a221fd725363c0e9d243e2e9ed43d195ccc5495c \ + --hash=sha256:4aa195e5804d32c682e453b34474f411ca108e4291c6a0f824ebdc30a91c973c \ + --hash=sha256:4ae4b88c6617e1b9e5038ab3fccd7bac0842fdda2b703117b2aa99bc85379113 \ + --hash=sha256:521807963971a23996ddaf764c682b3e46459b3c58ccd79fefbe16718db43154 \ + --hash=sha256:534dc9df211387547267ccdb42253aa30527482acb38dd9b21c5c115d66a96d2 \ + --hash=sha256:539eb77eb043afcc45314d1be09ea6d6cafb3addc73e0547c171c6d636957f60 \ + --hash=sha256:55d827b2ae95425d3be9bc9a5838b6c29d664924f98146557f7715e331d06df8 \ + --hash=sha256:56838e1cd9174dc23c5691ee29f1d1be9eab357f27efef6bded1328b23e1ced2 \ + --hash=sha256:5a572911cd053137bbff8e3a52d31c5d2dba51d3a67ad902629c70185f3f2181 \ + --hash=sha256:5c9546cfdd5d45e562cc0444b6dddc191e625c62e866bf567a2c69487c7ad28a \ + --hash=sha256:5cc58aac218826d054c7da7f95821eba94125d88be673ff44267bb89d12a5866 \ + --hash=sha256:6410e66f02803600edb0b1889541f4b5cc298a5ccda0ad789cc50ef23b54813e \ + --hash=sha256:66786c3fb1d8de416a7fa8e1cb1ec6ba0a745b2b0eee42f9b7daa26f1a495545 \ + --hash=sha256:6e97846e9800a5d0fe7be4d008f0c93d0feeb2700da7b1f7528dabafb31dfadb \ + --hash=sha256:7033c1010b1f57bb44d8067e8c25aa6fa2e944dbf46ccc8c92b25043839c3fd2 \ + --hash=sha256:715b67eac317bf1c7657508170a3e011a1ea6ccb1c9d5f296e20ba14196be6b3 \ + --hash=sha256:72fdfd5ff8992e4636621826371e3ac5f3e3b8323e9d0e48378e9c13c3dac9d0 \ + --hash=sha256:76054d540061eda273274f3d13a21a4abdde90e13eaefdc205db37c05230efce \ + --hash=sha256:76fe96632d53f3bf0ea31ede2f53bbe3540cc2736d4aec3b3801b0458499ef3a \ + --hash=sha256:7971bdb7bf4ee0f7e6f67fa4c7fbc6019d9850cc977d126904392d363f6f8318 \ + --hash=sha256:799156ef1f3529ed82c36eb012b5d7a4cf4b6ef556dd7cc192148991d07206ae \ + --hash=sha256:7cdc0490374e31cedefefaa1520d5fe38e82fde8748cbc926e7284574c714d6b \ + --hash=sha256:7d9128ec9d8cecda6f044001fde4fb71ea7c24325336612ef8179091eb9596b9 \ + --hash=sha256:7f437026dbbc3f08c99cc41a5b2570c6e1a1ddbe48ab19a9b814254128d4ea7a \ + --hash=sha256:80fdf53d36e6c72819993e35d1ebeeb8e8fc688d0c6c2b391b55e335b3afba5a \ + --hash=sha256:8238d1d310283e87376c12f658b61e1ee23a14c0e54c7c0ce953efdbdc72deed \ + --hash=sha256:89ca2e673ddd5bde9b386da9a0aac0cab0e76f40c8f0aaf0d6311b6bbf2aa311 \ + --hash=sha256:8ae33ad9ce580c7a47452c3b3f7d8a9095ef6208e0a0c7e4e2384f9fc5bf8212 \ + --hash=sha256:8c5a8ecaa44ce2d8d9d20a68a2483a74c07f05d72e94a4dff88906c8807e77b0 \ + --hash=sha256:8e5bb73ffc029820f4348e9b66b3027493ae00bca6629129cd433fd7a76308ee \ + --hash=sha256:90f30d15f45048448b8da21c41703b31c61119c06c216a1bf8c245812a0f0c17 \ + --hash=sha256:923248a56dd8d158389a28934f6f69ebf89f218ef96a6b216a9be6861804d3f4 \ + --hash=sha256:9459a33f077130dbb2c7c3cea72ee9932271fb3126404ba2a2661e4fe9eb7b79 \ + --hash=sha256:97c817863ffc397f1e6a6e9d2d89fe5408c0a9922dac0329672fb0f35c867ea5 \ + --hash=sha256:9b9c764a11fd637e0322a488560533112837f5334ffeb48b1be20f6d98a7b437 \ + --hash=sha256:9ba8028597e824854f0f1733d8b964e914ae3003b22a10c2c664cb6927e0feb9 \ + --hash=sha256:9efe71687d6427737a0a2de9ca1c0a216510e6cd08925c44162be23ed7bed2d5 \ + --hash=sha256:9f84c549746a5be3bc7415830747a3a0312573afc9f95785eb35228bb17742ec \ + --hash=sha256:a0891cfd8db43e085c0ab93ab7e9b0c8fee84780d436d3b266b113e51e79f954 \ + --hash=sha256:a110e14508fd26fd2e472bb541f37c209409876ba601cf57e739e87d8a53cf95 \ + --hash=sha256:a5d9da3ff5af1ca1249b1adb8ef0573b94c76e6ae880ba1852f033bf429d4588 \ + --hash=sha256:a738f2da2f565989401bd6fd0b15990a4d1523c6d7fe83f300b7e7d17212feca \ + --hash=sha256:acd82a9e39082dc5f4492d15a6b6c8599aa21db5c35aaf7d6889aea16502c07d \ + --hash=sha256:ad7bd570be92695d89285a4b373006930715b78d96449f686af422debb4d3949 \ + --hash=sha256:b016eddf00dca7944721bf0cd85b6af7f6c4efaf83ee0b37c4133bd39757a8c7 \ + --hash=sha256:b1581fcde18fcdf42ea2403a16a6b646f8eb1e58d7f90a0ce693da441f76942e \ + --hash=sha256:b58f5c77f1af888b5fd1876c9a0d9858f6f88a39c9dd7c073a88e57e577da66d \ + --hash=sha256:b5f6134faf54b3cb83375db0f113506f8b7770785be1f95a631e7e2892101977 \ + --hash=sha256:b9cf2359a4fca87cfb6801fae83a76aedf66ee1254a7a151f1341632acf67f1b \ + --hash=sha256:ba5e1aeaf8dd6d8f6caba1f5539cddda87d511331714b7b5fc908b6cfc3636b7 \ + --hash=sha256:bb78b3a0d31ac1bde132c67015a809948db751cb4e92cdb3f0b242e430b6ed0d \ + --hash=sha256:bdb67151ea81fcf02d8f494703fb728d4d34d24556cbff5f417d74f6f5792e7c \ + --hash=sha256:c07d107b7316088f1ac0177a7661ca0c6670d443f6fe72e836069025e6266761 \ + --hash=sha256:c4695dd224212f6105db7ea62197144230b808d6b2bba52238906a2762f1d1e7 \ + --hash=sha256:c5523b0009e7c3c1263471b69d8da1c7d41b3ecb4cb62ef72be206b92040a950 \ + --hash=sha256:c661132ab2fb4eeede2ef69670fd60da5235209874d001a98f1542f31f2a8a94 \ + --hash=sha256:d37812c3da8e06f2bb35b3cf10e4a7b68e776a706c13058997238762b4e07f4f \ + --hash=sha256:d456e64724a075441e4ed648d7f154dc62e9aabff29bcdf723d0c00e9e1d352f \ + --hash=sha256:d472cf73efe5726a067dce63eebe8215b14beabea7c12606fd9994267b3cfe2b \ + --hash=sha256:d583d4403bcbf10cffc3ab5cee23d7643fcc960dff85973fd3c2d6c86e8dbb0c \ + --hash=sha256:de73e40ebc04dd5d9556f50180395322193a78ec247e637e741c1b954810f295 \ + --hash=sha256:def48ff59f181130f1a2cb7c517d16328efac3ec03951cca40c1dc2049747e83 \ + --hash=sha256:e6596b93c010d386ae46c9fba9bfc9fc5965fa8228edeac51576299182c2e31c \ + --hash=sha256:e71136fd0612556b35c575dc2726ae04a1669e6a6c378f2240312cf5d1a2ab10 \ + --hash=sha256:e7fa2ccc312bbd91e43aa5e0869e46bc03278a3dddb8d58833150a18b0f0283a \ + --hash=sha256:ea7173df5d86f625f8dde6d5929629ad811ed8decda3b60ae603903839ac9ac0 \ + --hash=sha256:f3b1b87a237cb2dba4db18bcfaaa44ba4cd5936b91121b62292ff21df577fc43 \ + --hash=sha256:f475f103488312e9bd4000bc890a95955a07b2d0b6e8884aef4be56132adbbf1 \ + --hash=sha256:f49196aec7c4b406495f60e6f947ad71f317a765f956d74bbd83996b9edc0352 \ + --hash=sha256:f49d41559cebd608042fdcf54ba597a4a7555b49ad5c1c0c03e0af82692661cd \ + --hash=sha256:f7728653900035fb7b8d06e1e5900545d8088efc9d5d4545782da7df03ec803f \ + --hash=sha256:f9f436aee28d13b9ad2c764fc273e0457e37c2e61529a07b928346b219fcde3b \ + --hash=sha256:fc31a07ed352e5462d3ee1b22e89285f4ce97d5266f6d1169da1142e78045626 \ + --hash=sha256:fc935f6b20b0c9f919a8ff024739174522abd331978f750a74bb68abd117bd19 \ + --hash=sha256:fcae1770b401167f8b9e1e3f566562e6966ffa9ce63639916248a9e25fa8a244 \ + --hash=sha256:fd7951c964069039acc9d67a8ff1f0a7f34845ae180ca542b17dc1456b1f1808 \ + --hash=sha256:fe55fe686908f50154d1dc599232016e50c243b438c3b7432f24e2895b0e5359 # via # jsonschema # referencing -ruamel-yaml==0.18.15 \ - --hash=sha256:148f6488d698b7a5eded5ea793a025308b25eca97208181b6a026037f391f701 \ - --hash=sha256:dbfca74b018c4c3fba0b9cc9ee33e53c371194a9000e694995e620490fd40700 +ruamel-yaml==0.18.16 \ + --hash=sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba \ + --hash=sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a # via -r src/requirements.in -ruamel-yaml-clib==0.2.12 \ - --hash=sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b \ - --hash=sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4 \ - --hash=sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef \ - --hash=sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5 \ - --hash=sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3 \ - --hash=sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632 \ - --hash=sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6 \ - --hash=sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7 \ - --hash=sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680 \ - --hash=sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf \ - --hash=sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da \ - --hash=sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6 \ - --hash=sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a \ - --hash=sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01 \ - --hash=sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519 \ - --hash=sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6 \ - --hash=sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f \ - --hash=sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd \ - --hash=sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2 \ - --hash=sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52 \ - --hash=sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd \ - --hash=sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d \ - --hash=sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c \ - --hash=sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6 \ - --hash=sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb \ - --hash=sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a \ - --hash=sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969 \ - --hash=sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28 \ - --hash=sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d \ - --hash=sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e \ - --hash=sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45 \ - --hash=sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4 \ - --hash=sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12 \ - --hash=sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31 \ - --hash=sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642 \ - --hash=sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e \ - --hash=sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285 \ - --hash=sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed \ - --hash=sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1 \ - --hash=sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7 \ - --hash=sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3 \ - --hash=sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475 \ - --hash=sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5 \ - --hash=sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76 \ - --hash=sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987 \ - --hash=sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df +ruamel-yaml-clib==0.2.15 \ + --hash=sha256:014181cdec565c8745b7cbc4de3bf2cc8ced05183d986e6d1200168e5bb59490 \ + --hash=sha256:04d21dc9c57d9608225da28285900762befbb0165ae48482c15d8d4989d4af14 \ + --hash=sha256:05c70f7f86be6f7bee53794d80050a28ae7e13e4a0087c1839dcdefd68eb36b6 \ + --hash=sha256:0ba6604bbc3dfcef844631932d06a1a4dcac3fee904efccf582261948431628a \ + --hash=sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9 \ + --hash=sha256:1b45498cc81a4724a2d42273d6cfc243c0547ad7c6b87b4f774cb7bcc131c98d \ + --hash=sha256:1bb7b728fd9f405aa00b4a0b17ba3f3b810d0ccc5f77f7373162e9b5f0ff75d5 \ + --hash=sha256:1f66f600833af58bea694d5892453f2270695b92200280ee8c625ec5a477eed3 \ + --hash=sha256:27dc656e84396e6d687f97c6e65fb284d100483628f02d95464fd731743a4afe \ + --hash=sha256:2812ff359ec1f30129b62372e5f22a52936fac13d5d21e70373dbca5d64bb97c \ + --hash=sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc \ + --hash=sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf \ + --hash=sha256:3cb75a3c14f1d6c3c2a94631e362802f70e83e20d1f2b2ef3026c05b415c4900 \ + --hash=sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a \ + --hash=sha256:424ead8cef3939d690c4b5c85ef5b52155a231ff8b252961b6516ed7cf05f6aa \ + --hash=sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6 \ + --hash=sha256:468858e5cbde0198337e6a2a78eda8c3fb148bdf4c6498eaf4bc9ba3f8e780bd \ + --hash=sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25 \ + --hash=sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600 \ + --hash=sha256:480894aee0b29752560a9de46c0e5f84a82602f2bc5c6cde8db9a345319acfdf \ + --hash=sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642 \ + --hash=sha256:4be366220090d7c3424ac2b71c90d1044ea34fca8c0b88f250064fd06087e614 \ + --hash=sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf \ + --hash=sha256:4d3b58ab2454b4747442ac76fab66739c72b1e2bb9bd173d7694b9f9dbc9c000 \ + --hash=sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb \ + --hash=sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690 \ + --hash=sha256:542d77b72786a35563f97069b9379ce762944e67055bea293480f7734b2c7e5e \ + --hash=sha256:56ea19c157ed8c74b6be51b5fa1c3aff6e289a041575f0556f66e5fb848bb137 \ + --hash=sha256:5d3c9210219cbc0f22706f19b154c9a798ff65a6beeafbf77fc9c057ec806f7d \ + --hash=sha256:5fea0932358e18293407feb921d4f4457db837b67ec1837f87074667449f9401 \ + --hash=sha256:617d35dc765715fa86f8c3ccdae1e4229055832c452d4ec20856136acc75053f \ + --hash=sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2 \ + --hash=sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471 \ + --hash=sha256:6f1d38cbe622039d111b69e9ca945e7e3efebb30ba998867908773183357f3ed \ + --hash=sha256:713cd68af9dfbe0bb588e144a61aad8dcc00ef92a82d2e87183ca662d242f524 \ + --hash=sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60 \ + --hash=sha256:753faf20b3a5906faf1fc50e4ddb8c074cb9b251e00b14c18b28492f933ac8ef \ + --hash=sha256:7e74ea87307303ba91073b63e67f2c667e93f05a8c63079ee5b7a5c8d0d7b043 \ + --hash=sha256:88eea8baf72f0ccf232c22124d122a7f26e8a24110a0273d9bcddcb0f7e1fa03 \ + --hash=sha256:923816815974425fbb1f1bf57e85eca6e14d8adc313c66db21c094927ad01815 \ + --hash=sha256:9b6f7d74d094d1f3a4e157278da97752f16ee230080ae331fcc219056ca54f77 \ + --hash=sha256:a8220fd4c6f98485e97aea65e1df76d4fed1678ede1fe1d0eed2957230d287c4 \ + --hash=sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d \ + --hash=sha256:ac9b8d5fa4bb7fd2917ab5027f60d4234345fd366fe39aa711d5dca090aa1467 \ + --hash=sha256:badd1d7283f3e5894779a6ea8944cc765138b96804496c91812b2829f70e18a7 \ + --hash=sha256:bdc06ad71173b915167702f55d0f3f027fc61abd975bd308a0968c02db4a4c3e \ + --hash=sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec \ + --hash=sha256:bfd309b316228acecfa30670c3887dcedf9b7a44ea39e2101e75d2654522acd4 \ + --hash=sha256:c583229f336682b7212a43d2fa32c30e643d3076178fb9f7a6a14dde85a2d8bd \ + --hash=sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff \ + --hash=sha256:d290eda8f6ada19e1771b54e5706b8f9807e6bb08e873900d5ba114ced13e02c \ + --hash=sha256:da3d6adadcf55a93c214d23941aef4abfd45652110aed6580e814152f385b862 \ + --hash=sha256:dcc7f3162d3711fd5d52e2267e44636e3e566d1e5675a5f0b30e98f2c4af7974 \ + --hash=sha256:def5663361f6771b18646620fca12968aae730132e104688766cf8a3b1d65922 \ + --hash=sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a \ + --hash=sha256:e9fde97ecb7bb9c41261c2ce0da10323e9227555c674989f8d9eb7572fc2098d \ + --hash=sha256:ef71831bd61fbdb7aa0399d5c4da06bea37107ab5c79ff884cc07f2450910262 \ + --hash=sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144 \ + --hash=sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1 \ + --hash=sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51 \ + --hash=sha256:fe239bdfdae2302e93bd6e8264bd9b71290218fff7084a9db250b55caaccf43f # via ruamel-yaml six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ @@ -1303,9 +1400,9 @@ snowballstemmer==3.0.1 \ --hash=sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064 \ --hash=sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895 # via sphinx -soupsieve==2.7 \ - --hash=sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4 \ - --hash=sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a +soupsieve==2.8 \ + --hash=sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c \ + --hash=sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f # via beautifulsoup4 sphinx==8.2.3 \ --hash=sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348 \ @@ -1324,9 +1421,9 @@ sphinx==8.2.3 \ # sphinxcontrib-jquery # sphinxcontrib-mermaid # sphinxcontrib-plantuml -sphinx-autobuild==2024.10.3 \ - --hash=sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa \ - --hash=sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1 +sphinx-autobuild==2025.8.25 \ + --hash=sha256:9cf5aab32853c8c31af572e4fecdc09c997e2b8be5a07daf2a389e270e85b213 \ + --hash=sha256:b750ac7d5a18603e4665294323fd20f6dcc0a984117026d1986704fa68f0379a # via -r src/requirements.in sphinx-collections==0.3.1 \ --hash=sha256:4dda762479d2ad2163ccb074b15f36f72810d9cd08be4daa69854a6e34c99f92 \ @@ -1366,12 +1463,12 @@ sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 # via sphinx -sphinxcontrib-mermaid==1.0.0 \ - --hash=sha256:2e8ab67d3e1e2816663f9347d026a8dee4a858acdd4ad32dd1c808893db88146 \ - --hash=sha256:60b72710ea02087f212028feb09711225fbc2e343a10d34822fe787510e1caa3 +sphinxcontrib-mermaid==1.2.3 \ + --hash=sha256:358699d0ec924ef679b41873d9edd97d0773446daf9760c75e18dc0adfd91371 \ + --hash=sha256:5be782b27026bef97bfb15ccb2f7868b674a1afc0982b54cb149702cfc25aa02 # via -r src/requirements.in -sphinxcontrib-plantuml==0.30 \ - --hash=sha256:2a1266ca43bddf44640ae44107003df4490de2b3c3154a0d627cfb63e9a169bf +sphinxcontrib-plantuml==0.31 \ + --hash=sha256:fd74752f8ea070e641c3f8a402fccfa1d4a4056e0967b56033d2a76282d9f956 # via -r src/requirements.in sphinxcontrib-qthelp==2.0.0 \ --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ @@ -1381,9 +1478,9 @@ sphinxcontrib-serializinghtml==2.0.0 \ --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d # via sphinx -starlette==0.47.2 \ - --hash=sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8 \ - --hash=sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b +starlette==0.50.0 \ + --hash=sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca \ + --hash=sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca # via sphinx-autobuild tomli==2.3.0 \ --hash=sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456 \ @@ -1433,9 +1530,9 @@ tomli-w==1.2.0 \ --hash=sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90 \ --hash=sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021 # via needs-config-writer -typing-extensions==4.14.1 \ - --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \ - --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76 +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 # via # anyio # beautifulsoup4 @@ -1450,117 +1547,120 @@ urllib3==2.5.0 \ # via # pygithub # requests -uvicorn==0.35.0 \ - --hash=sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a \ - --hash=sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01 +uvicorn==0.38.0 \ + --hash=sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02 \ + --hash=sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d # via sphinx-autobuild -watchfiles==1.1.0 \ - --hash=sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a \ - --hash=sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f \ - --hash=sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6 \ - --hash=sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3 \ - --hash=sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7 \ - --hash=sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a \ - --hash=sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259 \ - --hash=sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297 \ - --hash=sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1 \ - --hash=sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c \ - --hash=sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a \ - --hash=sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b \ - --hash=sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb \ - --hash=sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc \ - --hash=sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b \ - --hash=sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339 \ - --hash=sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9 \ - --hash=sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df \ - --hash=sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb \ - --hash=sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4 \ - --hash=sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5 \ - --hash=sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc \ - --hash=sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c \ - --hash=sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8 \ - --hash=sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433 \ - --hash=sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12 \ - --hash=sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30 \ - --hash=sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0 \ - --hash=sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86 \ - --hash=sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c \ - --hash=sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5 \ - --hash=sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866 \ - --hash=sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb \ - --hash=sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2 \ - --hash=sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e \ - --hash=sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575 \ - --hash=sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f \ - --hash=sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a \ - --hash=sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f \ - --hash=sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d \ - --hash=sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277 \ - --hash=sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9 \ - --hash=sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf \ - --hash=sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92 \ - --hash=sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72 \ - --hash=sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b \ - --hash=sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68 \ - --hash=sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa \ - --hash=sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc \ - --hash=sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b \ - --hash=sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd \ - --hash=sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4 \ - --hash=sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7 \ - --hash=sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792 \ - --hash=sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9 \ - --hash=sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0 \ - --hash=sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297 \ - --hash=sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef \ - --hash=sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179 \ - --hash=sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d \ - --hash=sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea \ - --hash=sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5 \ - --hash=sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee \ - --hash=sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82 \ - --hash=sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011 \ - --hash=sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e \ - --hash=sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4 \ - --hash=sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf \ - --hash=sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db \ - --hash=sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20 \ - --hash=sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4 \ - --hash=sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575 \ - --hash=sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa \ - --hash=sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c \ - --hash=sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f \ - --hash=sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f \ - --hash=sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267 \ - --hash=sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018 \ - --hash=sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2 \ - --hash=sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d \ - --hash=sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd \ - --hash=sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47 \ - --hash=sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb \ - --hash=sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29 \ - --hash=sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147 \ - --hash=sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8 \ - --hash=sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670 \ - --hash=sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587 \ - --hash=sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97 \ - --hash=sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c \ - --hash=sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5 \ - --hash=sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e \ - --hash=sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e \ - --hash=sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6 \ - --hash=sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc \ - --hash=sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e \ - --hash=sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8 \ - --hash=sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895 \ - --hash=sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7 \ - --hash=sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432 \ - --hash=sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc \ - --hash=sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633 \ - --hash=sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f \ - --hash=sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77 \ - --hash=sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12 \ - --hash=sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f +watchfiles==1.1.1 \ + --hash=sha256:00485f441d183717038ed2e887a7c868154f216877653121068107b227a2f64c \ + --hash=sha256:03fa0f5237118a0c5e496185cafa92878568b652a2e9a9382a5151b1a0380a43 \ + --hash=sha256:04e78dd0b6352db95507fd8cb46f39d185cf8c74e4cf1e4fbad1d3df96faf510 \ + --hash=sha256:059098c3a429f62fc98e8ec62b982230ef2c8df68c79e826e37b895bc359a9c0 \ + --hash=sha256:08af70fd77eee58549cd69c25055dc344f918d992ff626068242259f98d598a2 \ + --hash=sha256:0b495de0bb386df6a12b18335a0285dda90260f51bdb505503c02bcd1ce27a8b \ + --hash=sha256:130e4876309e8686a5e37dba7d5e9bc77e6ed908266996ca26572437a5271e18 \ + --hash=sha256:14e0b1fe858430fc0251737ef3824c54027bedb8c37c38114488b8e131cf8219 \ + --hash=sha256:17ef139237dfced9da49fb7f2232c86ca9421f666d78c264c7ffca6601d154c3 \ + --hash=sha256:1a0bb430adb19ef49389e1ad368450193a90038b5b752f4ac089ec6942c4dff4 \ + --hash=sha256:1db5d7ae38ff20153d542460752ff397fcf5c96090c1230803713cf3147a6803 \ + --hash=sha256:28475ddbde92df1874b6c5c8aaeb24ad5be47a11f87cde5a28ef3835932e3e94 \ + --hash=sha256:2edc3553362b1c38d9f06242416a5d8e9fe235c204a4072e988ce2e5bb1f69f6 \ + --hash=sha256:30f7da3fb3f2844259cba4720c3fc7138eb0f7b659c38f3bfa65084c7fc7abce \ + --hash=sha256:311ff15a0bae3714ffb603e6ba6dbfba4065ab60865d15a6ec544133bdb21099 \ + --hash=sha256:319b27255aacd9923b8a276bb14d21a5f7ff82564c744235fc5eae58d95422ae \ + --hash=sha256:35c53bd62a0b885bf653ebf6b700d1bf05debb78ad9292cf2a942b23513dc4c4 \ + --hash=sha256:36193ed342f5b9842edd3532729a2ad55c4160ffcfa3700e0d54be496b70dd43 \ + --hash=sha256:39574d6370c4579d7f5d0ad940ce5b20db0e4117444e39b6d8f99db5676c52fd \ + --hash=sha256:399600947b170270e80134ac854e21b3ccdefa11a9529a3decc1327088180f10 \ + --hash=sha256:3a476189be23c3686bc2f4321dd501cb329c0a0469e77b7b534ee10129ae6374 \ + --hash=sha256:3ad9fe1dae4ab4212d8c91e80b832425e24f421703b5a42ef2e4a1e215aff051 \ + --hash=sha256:3bc570d6c01c206c46deb6e935a260be44f186a2f05179f52f7fcd2be086a94d \ + --hash=sha256:3dbd8cbadd46984f802f6d479b7e3afa86c42d13e8f0f322d669d79722c8ec34 \ + --hash=sha256:3e6f39af2eab0118338902798b5aa6664f46ff66bc0280de76fca67a7f262a49 \ + --hash=sha256:3f53fa183d53a1d7a8852277c92b967ae99c2d4dcee2bfacff8868e6e30b15f7 \ + --hash=sha256:3f6d37644155fb5beca5378feb8c1708d5783145f2a0f1c4d5a061a210254844 \ + --hash=sha256:3f7eb7da0eb23aa2ba036d4f616d46906013a68caf61b7fdbe42fc8b25132e77 \ + --hash=sha256:3fa0b59c92278b5a7800d3ee7733da9d096d4aabcfabb9a928918bd276ef9b9b \ + --hash=sha256:421e29339983e1bebc281fab40d812742268ad057db4aee8c4d2bce0af43b741 \ + --hash=sha256:4b943d3668d61cfa528eb949577479d3b077fd25fb83c641235437bc0b5bc60e \ + --hash=sha256:526e86aced14a65a5b0ec50827c745597c782ff46b571dbfe46192ab9e0b3c33 \ + --hash=sha256:52e06553899e11e8074503c8e716d574adeeb7e68913115c4b3653c53f9bae42 \ + --hash=sha256:544364b2b51a9b0c7000a4b4b02f90e9423d97fbbf7e06689236443ebcad81ab \ + --hash=sha256:5524298e3827105b61951a29c3512deb9578586abf3a7c5da4a8069df247cccc \ + --hash=sha256:55c7475190662e202c08c6c0f4d9e345a29367438cf8e8037f3155e10a88d5a5 \ + --hash=sha256:563b116874a9a7ce6f96f87cd0b94f7faf92d08d0021e837796f0a14318ef8da \ + --hash=sha256:57ca5281a8b5e27593cb7d82c2ac927ad88a96ed406aa446f6344e4328208e9e \ + --hash=sha256:5c85794a4cfa094714fb9c08d4a218375b2b95b8ed1666e8677c349906246c05 \ + --hash=sha256:5f3bde70f157f84ece3765b42b4a52c6ac1a50334903c6eaf765362f6ccca88a \ + --hash=sha256:5f3f58818dc0b07f7d9aa7fe9eb1037aecb9700e63e1f6acfed13e9fef648f5d \ + --hash=sha256:5fac835b4ab3c6487b5dbad78c4b3724e26bcc468e886f8ba8cc4306f68f6701 \ + --hash=sha256:620bae625f4cb18427b1bb1a2d9426dc0dd5a5ba74c7c2cdb9de405f7b129863 \ + --hash=sha256:672b8adf25b1a0d35c96b5888b7b18699d27d4194bac8beeae75be4b7a3fc9b2 \ + --hash=sha256:6aae418a8b323732fa89721d86f39ec8f092fc2af67f4217a2b07fd3e93c6101 \ + --hash=sha256:6c3631058c37e4a0ec440bf583bc53cdbd13e5661bb6f465bc1d88ee9a0a4d02 \ + --hash=sha256:6c9c9262f454d1c4d8aaa7050121eb4f3aea197360553699520767daebf2180b \ + --hash=sha256:6e43d39a741e972bab5d8100b5cdacf69db64e34eb19b6e9af162bccf63c5cc6 \ + --hash=sha256:7365b92c2e69ee952902e8f70f3ba6360d0d596d9299d55d7d386df84b6941fb \ + --hash=sha256:743185e7372b7bc7c389e1badcc606931a827112fbbd37f14c537320fca08620 \ + --hash=sha256:74472234c8370669850e1c312490f6026d132ca2d396abfad8830b4f1c096957 \ + --hash=sha256:74d5012b7630714b66be7b7b7a78855ef7ad58e8650c73afc4c076a1f480a8d6 \ + --hash=sha256:77a13aea58bc2b90173bc69f2a90de8e282648939a00a602e1dc4ee23e26b66d \ + --hash=sha256:79ff6c6eadf2e3fc0d7786331362e6ef1e51125892c75f1004bd6b52155fb956 \ + --hash=sha256:831a62658609f0e5c64178211c942ace999517f5770fe9436be4c2faeba0c0ef \ + --hash=sha256:836398932192dae4146c8f6f737d74baeac8b70ce14831a239bdb1ca882fc261 \ + --hash=sha256:842178b126593addc05acf6fce960d28bc5fae7afbaa2c6c1b3a7b9460e5be02 \ + --hash=sha256:8526e8f916bb5b9a0a777c8317c23ce65de259422bba5b31325a6fa6029d33af \ + --hash=sha256:859e43a1951717cc8de7f4c77674a6d389b106361585951d9e69572823f311d9 \ + --hash=sha256:88863fbbc1a7312972f1c511f202eb30866370ebb8493aef2812b9ff28156a21 \ + --hash=sha256:89eef07eee5e9d1fda06e38822ad167a044153457e6fd997f8a858ab7564a336 \ + --hash=sha256:8c89f9f2f740a6b7dcc753140dd5e1ab9215966f7a3530d0c0705c83b401bd7d \ + --hash=sha256:8c91ed27800188c2ae96d16e3149f199d62f86c7af5f5f4d2c61a3ed8cd3666c \ + --hash=sha256:8ca65483439f9c791897f7db49202301deb6e15fe9f8fe2fed555bf986d10c31 \ + --hash=sha256:8fbe85cb3201c7d380d3d0b90e63d520f15d6afe217165d7f98c9c649654db81 \ + --hash=sha256:91d4c9a823a8c987cce8fa2690923b069966dabb196dd8d137ea2cede885fde9 \ + --hash=sha256:9bb9f66367023ae783551042d31b1d7fd422e8289eedd91f26754a66f44d5cff \ + --hash=sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2 \ + --hash=sha256:a36d8efe0f290835fd0f33da35042a1bb5dc0e83cbc092dcf69bce442579e88e \ + --hash=sha256:a55f3e9e493158d7bfdb60a1165035f1cf7d320914e7b7ea83fe22c6023b58fc \ + --hash=sha256:a625815d4a2bdca61953dbba5a39d60164451ef34c88d751f6c368c3ea73d404 \ + --hash=sha256:a916a2932da8f8ab582f242c065f5c81bed3462849ca79ee357dd9551b0e9b01 \ + --hash=sha256:ac3cc5759570cd02662b15fbcd9d917f7ecd47efe0d6b40474eafd246f91ea18 \ + --hash=sha256:acb08650863767cbc58bca4813b92df4d6c648459dcaa3d4155681962b2aa2d3 \ + --hash=sha256:aebfd0861a83e6c3d1110b78ad54704486555246e542be3e2bb94195eabb2606 \ + --hash=sha256:afaeff7696e0ad9f02cbb8f56365ff4686ab205fcf9c4c5b6fdfaaa16549dd04 \ + --hash=sha256:b27cf2eb1dda37b2089e3907d8ea92922b673c0c427886d4edc6b94d8dfe5db3 \ + --hash=sha256:b2cd9e04277e756a2e2d2543d65d1e2166d6fd4c9b183f8808634fda23f17b14 \ + --hash=sha256:b9c4702f29ca48e023ffd9b7ff6b822acdf47cb1ff44cb490a3f1d5ec8987e9c \ + --hash=sha256:bbe1ef33d45bc71cf21364df962af171f96ecaeca06bd9e3d0b583efb12aec82 \ + --hash=sha256:bd404be08018c37350f0d6e34676bd1e2889990117a2b90070b3007f172d0610 \ + --hash=sha256:bf0a91bfb5574a2f7fc223cf95eeea79abfefa404bf1ea5e339c0c1560ae99a0 \ + --hash=sha256:bfb5862016acc9b869bb57284e6cb35fdf8e22fe59f7548858e2f971d045f150 \ + --hash=sha256:bfff9740c69c0e4ed32416f013f3c45e2ae42ccedd1167ef2d805c000b6c71a5 \ + --hash=sha256:c1f5210f1b8fc91ead1283c6fd89f70e76fb07283ec738056cf34d51e9c1d62c \ + --hash=sha256:c2047d0b6cea13b3316bdbafbfa0c4228ae593d995030fda39089d36e64fc03a \ + --hash=sha256:c22c776292a23bfc7237a98f791b9ad3144b02116ff10d820829ce62dff46d0b \ + --hash=sha256:c755367e51db90e75b19454b680903631d41f9e3607fbd941d296a020c2d752d \ + --hash=sha256:c882d69f6903ef6092bedfb7be973d9319940d56b8427ab9187d1ecd73438a70 \ + --hash=sha256:cb467c999c2eff23a6417e58d75e5828716f42ed8289fe6b77a7e5a91036ca70 \ + --hash=sha256:cdab464fee731e0884c35ae3588514a9bcf718d0e2c82169c1c4a85cc19c3c7f \ + --hash=sha256:ce19e06cbda693e9e7686358af9cd6f5d61312ab8b00488bc36f5aabbaf77e24 \ + --hash=sha256:ce70f96a46b894b36eba678f153f052967a0d06d5b5a19b336ab0dbbd029f73e \ + --hash=sha256:cf57a27fb986c6243d2ee78392c503826056ffe0287e8794503b10fb51b881be \ + --hash=sha256:d1715143123baeeaeadec0528bb7441103979a1d5f6fd0e1f915383fea7ea6d5 \ + --hash=sha256:d6ff426a7cb54f310d51bfe83fe9f2bbe40d540c741dc974ebc30e6aa238f52e \ + --hash=sha256:d7e7067c98040d646982daa1f37a33d3544138ea155536c2e0e63e07ff8a7e0f \ + --hash=sha256:db476ab59b6765134de1d4fe96a1a9c96ddf091683599be0f26147ea1b2e4b88 \ + --hash=sha256:dcc5c24523771db3a294c77d94771abcfcb82a0e0ee8efd910c37c59ec1b31bb \ + --hash=sha256:de6da501c883f58ad50db3a32ad397b09ad29865b5f26f64c24d3e3281685849 \ + --hash=sha256:e84087b432b6ac94778de547e08611266f1f8ffad28c0ee4c82e028b0fc5966d \ + --hash=sha256:eef58232d32daf2ac67f42dea51a2c80f0d03379075d44a587051e63cc2e368c \ + --hash=sha256:f096076119da54a6080e8920cbdaac3dbee667eb91dcc5e5b78840b87415bd44 \ + --hash=sha256:f0ab1c1af0cb38e3f598244c17919fb1a84d1629cc08355b0074b6d7f53138ac \ + --hash=sha256:f27db948078f3823a6bb3b465180db8ebecf26dd5dae6f6180bd87383b6b4428 \ + --hash=sha256:f537afb3276d12814082a2e9b242bdcf416c2e8fd9f799a737990a1dbe906e5b \ + --hash=sha256:f57b396167a2565a4e8b5e56a5a1c537571733992b226f4f1197d79e94cf0ae5 \ + --hash=sha256:f8979280bdafff686ba5e4d8f97840f929a87ed9cdf133cbbd42f7766774d2aa \ + --hash=sha256:f9a2ae5c91cecc9edd47e041a930490c31c3afb1f5e6d71de3dc671bfaca02bf # via sphinx-autobuild websockets==15.0.1 \ --hash=sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2 \ From b5df7e01ec6a01e20adb16e686971e4c96f2ee91 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Wed, 3 Dec 2025 15:03:23 +0100 Subject: [PATCH 168/231] fix: update doc_path to use SOURCE_DIRECTORY environment variable (#320) --- src/incremental.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/incremental.py b/src/incremental.py index 5699c6ec..91f14954 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -89,7 +89,7 @@ def get_env(name: str) -> str: base_arguments.append(f"-A=github_user={args.github_user}") base_arguments.append(f"-A=github_repo={args.github_repo}") base_arguments.append("-A=github_version=main") - base_arguments.append("-A=doc_path=docs") + base_arguments.append(f"-A=doc_path='{get_env('SOURCE_DIRECTORY')}'") action = get_env("ACTION") if action == "live_preview": From c5e0c3f23ce7e619e13a4ac7be9143b778f66208 Mon Sep 17 00:00:00 2001 From: PhilipPartsch <95444300+PhilipPartsch@users.noreply.github.com> Date: Thu, 4 Dec 2025 13:58:04 +0100 Subject: [PATCH 169/231] Introduce feature, sw-module and component as sphinx-needs elements (#323) Co-authored-by: Alexander Lanin Co-authored-by: Roland Jentsch --- src/extensions/score_metamodel/metamodel.yaml | 95 ++++++++++++++++++- 1 file changed, 90 insertions(+), 5 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index b67adfeb..a18c32f8 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -301,6 +301,8 @@ needs_types: # req-Id: tool_req__docs_req_attr_validity_correctness valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + optional_links: + belongs_to: feat # for evaluation tags: - requirement - requirement_excl_process @@ -330,6 +332,8 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ + optional_links: + belongs_to: comp # TODO: make it mandatory tags: - requirement - requirement_excl_process @@ -393,6 +397,28 @@ needs_types: parts: 3 # - Architecture - + + # Architecture Element + # No process requirement - For evaluation: + # https://github.com/orgs/eclipse-score/discussions/407#discussioncomment-15125454 + feat: + title: Feature + color: #FEDCD2 + style: card + mandatory_options: + # req-Id: tool_req__docs_common_attr_security + security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety + safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status + status: ^(valid|invalid)$ + mandatory_links: + includes: ^logic_arc_int(_op)*__.+$ + consists_of: comp + tags: + - architecture_element + parts: 2 + # Architecture Element & View # req-Id: tool_req__docs_arch_types # req-Id: tool_req__docs_arch_views @@ -410,6 +436,8 @@ needs_types: mandatory_links: includes: ^logic_arc_int(_op)*__.+$ fulfils: feat_req + optional_links: + belongs_to: feat # for evaluation tags: - architecture_element - architecture_view @@ -430,6 +458,8 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: fulfils: feat_req + optional_links: + belongs_to: feat # for evaluation tags: - architecture_view - architecture_element @@ -476,6 +506,26 @@ needs_types: - architecture_element parts: 3 + # Architecture Element + # No process requirement - For evaluation: + # https://github.com/orgs/eclipse-score/discussions/407#discussioncomment-15125454 + mod: + title: Module + color: #FEDCD2 + style: card + optional_options: + # req-Id: tool_req__docs_common_attr_security + security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety + safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status + status: ^(valid|invalid)$ + optional_links: + includes: comp + tags: + - architecture_element + parts: 2 + # Architecture View # req-Id: tool_req__docs_arch_views mod_view_sta: @@ -484,22 +534,46 @@ needs_types: style: card mandatory_links: includes: comp_arc_sta + optional_links: + belongs_to: mod # for evaluation tags: - architecture_view parts: 3 + # Architecture View # No process requirement mod_view_dyn: title: Module Architecture Dynamic View color: #FEDCD2 style: card parts: 3 + optional_links: + belongs_to: mod # for evaluation - # Architecture Element & View - # req-Id: tool_req__docs_arch_types + # Architecture Element + comp: + title: Component + color: #FEDCD2 + style: card + mandatory_options: + # req-Id: tool_req__docs_common_attr_security + security: ^(YES|NO)$ + # req-Id: tool_req__docs_common_attr_safety + safety: ^(QM|ASIL_B)$ + # req-Id: tool_req__docs_common_attr_status + status: ^(valid|invalid)$ + optional_links: + implements: logic_arc_int, real_arc_int_op + uses: logic_arc_int, real_arc_int_op + consists_of: comp, sw_unit + tags: + - architecture_element + parts: 2 + + # Architecture View # req-Id: tool_req__docs_arch_views comp_arc_sta: - title: Component & Component Package Diagram + title: Component Package Diagram color: #FEDCD2 style: card mandatory_options: @@ -514,8 +588,8 @@ needs_types: implements: logic_arc_int, real_arc_int_op includes: comp_arc_sta uses: logic_arc_int, real_arc_int_op + belongs_to: comp # TODO: make it mandatory tags: - - architecture_element - architecture_view parts: 3 @@ -534,9 +608,9 @@ needs_types: status: ^(valid|invalid)$ optional_links: fulfils: comp_req + belongs_to: comp # TODO: make it mandatory tags: - architecture_view - - architecture_element parts: 3 # Architecture Element & View @@ -607,6 +681,7 @@ needs_types: implements: comp_req satisfies: comp_arc_sta optional_links: + belongs_to: comp # TODO: make it mandatory includes: sw_unit parts: 3 @@ -622,6 +697,8 @@ needs_types: mandatory_links: implements: comp_req satisfies: comp_arc_sta + optional_links: + belongs_to: comp # TODO: make it mandatory parts: 3 sw_unit: @@ -851,6 +928,14 @@ needs_extra_links: outgoing: satisfies # Architecture + consists_of: + incoming: forms part of + outgoing: consists of + + belongs_to: + incoming: has + outgoing: belongs to + fulfils: incoming: fulfilled by outgoing: fulfils From 38ecb3d01f2c7b038d5e188caaceaac78d435ca7 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 5 Dec 2025 08:28:34 +0100 Subject: [PATCH 170/231] Bump version from 2.1.0 to 2.2.0 (#325) Signed-off-by: Alexander Lanin --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index df019fe9..f766e102 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "2.1.0", + version = "2.2.0", compatibility_level = 2, ) From 80ed1b5703076601fe929721bbbb7f846baba703 Mon Sep 17 00:00:00 2001 From: WolfgangFischerEtas Date: Wed, 10 Dec 2025 12:05:21 +0100 Subject: [PATCH 171/231] Do not include rst source into website to save some space (#279) Co-authored-by: Andreas Zwinkau --- src/extensions/score_sphinx_bundle/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index 222d2447..bba802cc 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -33,6 +33,9 @@ def setup(app: Sphinx) -> dict[str, object]: + app.config.html_copy_source = False + app.config.html_show_sourcelink = False + # Global settings # Note: the "sub-extensions" also set their own config values From 02363642a60963a9561d4599c5a988826a7d30c3 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Tue, 6 Jan 2026 08:33:43 +0100 Subject: [PATCH 172/231] modify description of architecture types and fix drawing (#329) --- docs/internals/requirements/requirements.rst | 11 +- .../score_draw_uml_funcs/__init__.py | 2 +- src/extensions/score_metamodel/metamodel.yaml | 13 +- .../rst/architecture/architecture_tests.rst | 162 ++++++++++++++++++ .../rst/attributes/test_prohibited_words.rst | 2 +- .../tests/test_rules_file_based.py | 1 + 6 files changed, 178 insertions(+), 13 deletions(-) create mode 100644 src/extensions/score_metamodel/tests/rst/architecture/architecture_tests.rst diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index a476ab15..52203074 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -535,10 +535,12 @@ Mapping Docs-as-Code shall support the following architecture element types: - * Feature (feat_arc_sta) + * Feature Static View (feat_arc_sta) + * Feature (feat) * Logical Interface (logic_arc_int) * Logical Interface Operation (logic_arc_int_op) - * Component (comp_arc_sta) + * Component Static View (comp_arc_sta) + * Component (comp) * Interface (real_arc_int) * Interface Operation (real_arc_int_op) @@ -669,8 +671,9 @@ Architecture Attributes * Module View (mod_view_sta) .. note:: - feat_arc_sta, comp_arc_sta, logic_arc_int, real_arc_int are architecture elements - AND architecture views. + feat_arc_sta, comp_arc_sta, logic_arc_int, real_arc_int are architecture views, + but are still defined as architectural elements, which means they have the properties of + architectural elements. 💻 Detailed Design & Code ########################## diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index 500ded74..067df1ea 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -156,7 +156,7 @@ def draw_comp_incl_impl_int( logger.info(f"{need}: include {need_inc} could not be found") continue - if curr_need["type"] != "comp_arc_sta": + if curr_need["type"] != "comp": continue sub_structure, sub_linkage, proc_impl_interfaces, proc_used_interfaces = ( diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index a18c32f8..e75ed9c4 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -419,7 +419,7 @@ needs_types: - architecture_element parts: 2 - # Architecture Element & View + # Architecture View (but owns the requirement properties of architectural elements) # req-Id: tool_req__docs_arch_types # req-Id: tool_req__docs_arch_views feat_arc_sta: @@ -443,7 +443,7 @@ needs_types: - architecture_view parts: 3 - # Architecture View + # Architecture View (but owns the requirement properties of architectural elements) # req-Id: tool_req__docs_arch_views feat_arc_dyn: title: Feature Sequence Diagram @@ -532,10 +532,9 @@ needs_types: title: Module Architecture Static View color: #FEDCD2 style: card - mandatory_links: - includes: comp_arc_sta optional_links: belongs_to: mod # for evaluation + includes: comp, comp_arc_sta # deprecated tags: - architecture_view parts: 3 @@ -570,7 +569,7 @@ needs_types: - architecture_element parts: 2 - # Architecture View + # Architecture View (but owns requirement properties of architectural elements) # req-Id: tool_req__docs_arch_views comp_arc_sta: title: Component Package Diagram @@ -586,14 +585,14 @@ needs_types: optional_links: fulfils: comp_req implements: logic_arc_int, real_arc_int_op - includes: comp_arc_sta + includes: comp_arc_sta, comp uses: logic_arc_int, real_arc_int_op belongs_to: comp # TODO: make it mandatory tags: - architecture_view parts: 3 - # Architecture View + # Architecture View (but owns requirement properties of architectural elements) # req-Id: tool_req__docs_arch_views comp_arc_dyn: title: Component Sequence Diagram diff --git a/src/extensions/score_metamodel/tests/rst/architecture/architecture_tests.rst b/src/extensions/score_metamodel/tests/rst/architecture/architecture_tests.rst new file mode 100644 index 00000000..dedd7532 --- /dev/null +++ b/src/extensions/score_metamodel/tests/rst/architecture/architecture_tests.rst @@ -0,0 +1,162 @@ +.. + # ******************************************************************************* + # Copyright (c) 2025 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # SPDX-License-Identifier: Apache-2.0 + # ******************************************************************************* + +#CHECK: check_options + +.. stkh_req:: Test Stakeholder Requirement 1 + :id: stkh_req__test_stakeholder_requirement_1__basic_stkh_req + :reqtype: Non-Functional + :safety: ASIL_B + :security: YES + :rationale: Exists just for the test the component / feature drawings + :status: invalid + +.. feat_req:: Test Feature Requirement 1 + :id: feat_req__test_feature_1__test_req_1 + :reqtype: Process + :security: YES + :safety: ASIL_B + :satisfies: stkh_req__test_stakeholder_requirement_1__basic_stkh_req + :status: invalid + + Test Feature Requirement 1 + +.. feat:: Test Feature 1 + :id: feat__test_feature_1 + :security: YES + :safety: ASIL_B + :status: invalid + :includes: logic_arc_int__test_feature_1__test_interface_1, logic_arc_int_op__test_feature_1__test_operation_1 + :consists_of: comp__test_component_1, comp__test_component_2 + +.. feat_arc_sta:: Test Feature Static View Feature 1 + :id: feat_arc_sta__test_feature_1__static_view + :security: YES + :safety: ASIL_B + :status: invalid + :fulfils: feat_req__test_feature_1__test_req_1 + :includes: logic_arc_int__test_feature_1__test_interface_1 + + .. needarch + :scale: 50 + :align: center + + {{ draw_feature(need(), needs) }} + +.. feat_arc_dyn:: Test Feature Static Dynamic Feature 1 + :id: feat_arc_dyn__test_feature_1__dynamic_view + :security: YES + :safety: ASIL_B + :status: invalid + :fulfils: feat_req__test_feature_1__test_req_1 + + Put here a sequence diagram + +.. logic_arc_int:: Logic Interface Test 1 + :id: logic_arc_int__test_feature_1__test_interface_1 + :security: YES + :safety: ASIL_B + :status: invalid + +.. logic_arc_int_op:: Logic Operation Test 1 + :id: logic_arc_int_op__test_feature_1__test_operation_1 + :security: YES + :safety: ASIL_B + :status: invalid + :included_by: logic_arc_int__test_feature_1__test_interface_1 + +.. logic_arc_int:: Logic Interface Test 2 + :id: logic_arc_int__test_feature_1__test_interface_2 + :security: YES + :safety: ASIL_B + :status: invalid + +Component 1 +~~~~~~~~~~~ + +.. comp:: Test Component 1 + :id: comp__test_component_1 + :security: YES + :safety: ASIL_B + :status: invalid + :implements: logic_arc_int__test_feature_1__test_interface_1 + :uses: logic_arc_int__test_feature_1__test_interface_2 + :consists_of: sw_unit__component_1__test_unit_1, comp__test_sub_component_1 + +.. comp_req:: Test Component 1 Requirement 1 + :id: comp_req__test_component_1__requirement_1 + :reqtype: Process + :security: YES + :safety: ASIL_B + :satisfies: feat_req__test_feature_1__test_req_1 + :status: invalid + + Test Component 1 Requirement + +.. sw_unit:: SW Test Unit 1 + :id: sw_unit__component_1__test_unit_1 + :security: YES + :safety: ASIL_B + :status: invalid + +.. comp:: Test Sub Component 1 + :id: comp__test_sub_component_1 + :security: YES + :safety: ASIL_B + :status: invalid + :implements: logic_arc_int__test_feature_1__test_interface_1 + :consists_of: sw_unit__sub_component_1__test_unit_2 + +.. comp_arc_sta:: Test Component Architecture Component 1 + :id: comp_arc_sta__feature_name__component_name + :safety: ASIL_B + :security: YES + :status: invalid + :fulfils: comp_req__test_component_1__requirement_1 + :implements: logic_arc_int__test_feature_1__test_interface_1 + :belongs_to: comp__test_sub_component_1 + +.. sw_unit:: SW Test Unit 2 + :id: sw_unit__sub_component_1__test_unit_2 + :security: YES + :safety: ASIL_B + :status: invalid + +Component 1 +~~~~~~~~~~~ + +.. comp:: Test Component 2 + :id: comp__test_component_2 + :security: YES + :safety: QM + :status: invalid + :implements: logic_arc_int__test_feature_1__test_interface_2 + +.. mod:: Feature Test Module 1 + :id: mod__test_feature_1_module_1 + :security: YES + :safety: ASIL_B + :status: valid + :includes: comp__test_component_1, comp__test_component_2 + +.. mod_view_sta:: Feature Test Module 1 Static View + :id: mod_view_sta__test_feature_1_module_1__test_static_view_1 + :belongs_to: mod__test_feature_1_module_1 + :includes: comp_arc_sta__feature_name__component_name + + .. needarch + :scale: 50 + :align: center + + {{ draw_module(need(), needs) }} diff --git a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst index 40546c00..4a470eca 100644 --- a/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst +++ b/src/extensions/score_metamodel/tests/rst/attributes/test_prohibited_words.rst @@ -66,7 +66,7 @@ -.. Description of requirement of type feat_arc_sta is not checked for weak words +.. Description of architecture view of type feat_arc_sta is not checked for weak words #EXPECT-NOT: feat_arc_sta_desc_good: contains a weak word: `really` in option: `content`. Please revise the wording. .. feat_arc_sta:: This is a test diff --git a/src/extensions/score_metamodel/tests/test_rules_file_based.py b/src/extensions/score_metamodel/tests/test_rules_file_based.py index 050a60b5..13d3ace5 100644 --- a/src/extensions/score_metamodel/tests/test_rules_file_based.py +++ b/src/extensions/score_metamodel/tests/test_rules_file_based.py @@ -193,6 +193,7 @@ def test_rst_files( # Collect the warnings warnings = app.warning.getvalue().splitlines() + print("\n".join(w for w in warnings if "score_metamodel" in w)) # Check if the expected warnings are present for warning_info in rst_data.warning_infos: From a177ece3b0ee1fe6a418948e05633c5c8180ce74 Mon Sep 17 00:00:00 2001 From: PhilipPartsch <95444300+PhilipPartsch@users.noreply.github.com> Date: Tue, 6 Jan 2026 10:06:57 +0100 Subject: [PATCH 173/231] update consists_of and add belongs_to optional links in needs_types (#331) --- src/extensions/score_metamodel/metamodel.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index e75ed9c4..f0086b87 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -564,7 +564,7 @@ needs_types: optional_links: implements: logic_arc_int, real_arc_int_op uses: logic_arc_int, real_arc_int_op - consists_of: comp, sw_unit + consists_of: comp tags: - architecture_element parts: 2 @@ -681,7 +681,7 @@ needs_types: satisfies: comp_arc_sta optional_links: belongs_to: comp # TODO: make it mandatory - includes: sw_unit + includes: sw_unit, sw_unit_int parts: 3 # req-Id: tool_req__docs_dd_dyn @@ -698,6 +698,7 @@ needs_types: satisfies: comp_arc_sta optional_links: belongs_to: comp # TODO: make it mandatory + includes: sw_unit, sw_unit_int parts: 3 sw_unit: @@ -706,16 +707,21 @@ needs_types: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + optional_links: + belongs_to: comp # TODO: make it mandatory parts: 3 sw_unit_int: - title: Software unit interfaces + title: Software unit interface color: #FEDCD2 style: card mandatory_options: security: ^(YES|NO)$ safety: ^(QM|ASIL_B)$ status: ^(valid|invalid)$ + optional_links: + belongs_to: comp # TODO: make it mandatory + implements: real_arc_int, real_arc_int_op parts: 3 # DFA (Dependent Failure Analysis) From 64df59849822baa171c4bca442cb2a7e30236f22 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Thu, 8 Jan 2026 10:41:19 +0100 Subject: [PATCH 174/231] increase release version (#334) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index f766e102..72e8ae14 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_docs_as_code", - version = "2.2.0", + version = "2.3.0", compatibility_level = 2, ) From 3b2271ddfdb54b74df7a57f689043f8b6d517516 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 13 Jan 2026 10:30:44 +0100 Subject: [PATCH 175/231] fix: update type checks for component and module in UML drawing functions (#338) --- src/extensions/score_draw_uml_funcs/__init__.py | 2 +- src/extensions/score_draw_uml_funcs/helpers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index 067df1ea..a5eb91ca 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -361,7 +361,7 @@ def draw_module( if not curr_need: logger.info(f"{need}: include with id {need_inc} could not be found") continue - if curr_need["type"] not in ["comp_arc_sta", "mod_view_sta"]: + if curr_need["type"] not in ["comp", "mod"]: continue sub_structure, sub_linkage, proc_impl_interfaces, proc_used_interfaces = ( draw_comp_incl_impl_int( diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index 37b1eddf..f82e2396 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -28,7 +28,7 @@ def gen_format(need: dict[str, str]) -> str: style = "" - if "comp_arc_sta" in need["type"] and need["safety"] == "ASIL_B": + if "comp" in need["type"] and need["safety"] == "ASIL_B": style = "<>" if "real_arc_int" in need["type"]: From 8bf33359bdee37c00afbb17c93e09eaba6d781b2 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Tue, 13 Jan 2026 16:40:22 +0100 Subject: [PATCH 176/231] drop module version (#341) --- MODULE.bazel | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 72e8ae14..ffebf8d2 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -11,10 +11,11 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +# Versioning is handled by GitHub Releases. +# Here we always keep "0.0.0" to indicate that. module( name = "score_docs_as_code", - version = "2.3.0", - compatibility_level = 2, + version = "0.0.0", ) ############################################################################### From 7c591f44ec838a454c92e39ea0906d867ba0df27 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Wed, 14 Jan 2026 17:12:07 +0100 Subject: [PATCH 177/231] bugfix draw function for modules (#344) * fix draw function for modules to read only modules * add warning for multiple modules --- src/extensions/score_draw_uml_funcs/helpers.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index f82e2396..456bed7e 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -127,7 +127,14 @@ def get_module(component: str, all_needs: dict[str, dict[str, str]]) -> str: need = all_needs.get(component, {}) if need: - module = need.get("includes_back", "") + # includes_back could deliver multiple needs; only return Modules + parents = need.get("includes_back", []) + module = [pid for pid in parents if all_needs.get(pid, {}).get("type") == "mod"] + + if len(module) > 1: + logger.warning( + f"{component}: included in multiple modules: {module}. Returning first." + ) if module: return module[0] From 86b014562080763d2062553e922fb057c63a0ef1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 14 Jan 2026 17:13:27 +0100 Subject: [PATCH 178/231] Fixing Workflow unit-test execution & Adding Test-Reports functionality (#343) * Run unit-tests instead of consumer-tests Currently consumer-tests are run when the documentation is build. This is not needed as the consumer tests are executed separately as well, and the normal build should execute the unit-tests instead. This also enables the testlinker & xml parser to look for the 'tests-report' folder to enable testing being done outside of bazel. Closes: #328 --- .github/workflows/test.yml | 17 ++ .github/workflows/test_and_docs.yml | 6 +- .../extensions/source_code_linker.md | 2 +- .../tests/test_xml_parser.py | 182 +++++++++++------- .../score_source_code_linker/xml_parser.py | 20 +- 5 files changed, 154 insertions(+), 73 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 31ea1171..a2f9ab93 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,6 +15,7 @@ name: Run Bazel Tests on: pull_request: types: [opened, reopened, synchronize] + workflow_call: jobs: code: runs-on: ubuntu-latest @@ -39,3 +40,19 @@ jobs: run: | bazel run //:ide_support bazel test //src/... + + - name: Prepare bundled consumer report + if: always() + # Creating tests-report directory + # Follow Symlinks via '-L' to copy correctly + # Copy everything inside the 'test-reports' folder + run: | + mkdir -p tests-report + rsync -amL --include='*/' --include='test.xml' --include='test.log' --exclude='*' bazel-testlogs/ tests-report/ + + - name: Upload bundled consumer report + if: always() + uses: actions/upload-artifact@v4 + with: + name: tests-report + path: tests-report diff --git a/.github/workflows/test_and_docs.yml b/.github/workflows/test_and_docs.yml index 542bc6da..c5243895 100644 --- a/.github/workflows/test_and_docs.yml +++ b/.github/workflows/test_and_docs.yml @@ -38,13 +38,13 @@ jobs: bazel-docs-verify-target: "//:docs_check" # This is the user configurable part of the workflow - consumer-tests: - uses: ./.github/workflows/consumer_test.yml + unit-tests: + uses: ./.github/workflows/test.yml secrets: inherit docs-build: # Waits for consumer-tests but run only when docs verification succeeded - needs: [docs-verify, consumer-tests] + needs: [docs-verify, unit-tests] if: ${{ always() && needs.docs-verify.result == 'success' }} uses: eclipse-score/cicd-workflows/.github/workflows/docs.yml@main permissions: diff --git a/docs/internals/extensions/source_code_linker.md b/docs/internals/extensions/source_code_linker.md index 7ef81e59..51ba7690 100644 --- a/docs/internals/extensions/source_code_linker.md +++ b/docs/internals/extensions/source_code_linker.md @@ -64,7 +64,7 @@ These tags are extracted and matched to Sphinx needs via the `source_code_link` ### ✅ TestLink: Test Result Integration -TestLink scans test result XMLs from Bazel and converts each test case with metadata into Sphinx external needs, allowing links from tests to requirements. +TestLink scans test result XMLs from Bazel (bazel-testlogs) or in the folder 'tests-report' and converts each test case with metadata into Sphinx external needs, allowing links from tests to requirements. This depends on the `attribute_plugin` in our tooling repository, find it [here](https://github.com/eclipse-score/tooling/tree/main/python_basics/score_pytest) #### Test Tagging Options diff --git a/src/extensions/score_source_code_linker/tests/test_xml_parser.py b/src/extensions/score_source_code_linker/tests/test_xml_parser.py index c234e08b..adc0d6b9 100644 --- a/src/extensions/score_source_code_linker/tests/test_xml_parser.py +++ b/src/extensions/score_source_code_linker/tests/test_xml_parser.py @@ -17,6 +17,7 @@ """ import xml.etree.ElementTree as ET +from collections.abc import Callable from pathlib import Path from typing import Any @@ -30,67 +31,73 @@ # Unsure if I should make these last a session or not +def _write_test_xml( + path: Path, + name: str, + result: str = "", + props: dict[str, str] | None = None, + file: str = "", + line: int = 0, +): + """Helper to create the XML structure for a test case.""" + ts = ET.Element("testsuites") + suite = ET.SubElement(ts, "testsuite") + + # Create testcase with attributes + tc_attrs = {"name": name} + if file: + tc_attrs["file"] = file + if line: + tc_attrs["line"] = str(line) + tc = ET.SubElement(suite, "testcase", tc_attrs) + + # Add failure/skipped status + if result == "failed": + ET.SubElement(tc, "failure", {"message": "failmsg"}) + elif result == "skipped": + ET.SubElement(tc, "skipped", {"message": "skipmsg"}) + + # Add properties if provided + if props: + props_el = ET.SubElement(tc, "properties") + for k, v in props.items(): + ET.SubElement(props_el, "property", {"name": k, "value": v}) + + # Save to file + ET.ElementTree(ts).write(path, encoding="utf-8", xml_declaration=True) + + @pytest.fixture -def tmp_xml_dirs(tmp_path: Path) -> tuple[Path, Path, Path]: - root: Path = tmp_path / "bazel-testlogs" - dir1: Path = root / "with_props" - dir2: Path = root / "no_props" - dir1.mkdir(parents=True) - dir2.mkdir(parents=True) - - def write(file_path: Path, testcases: list[ET.Element]): - ts = ET.Element("testsuites") - suite = ET.SubElement(ts, "testsuite") - for tc in testcases: - suite.append(tc) - tree = ET.ElementTree(ts) - tree.write(file_path, encoding="utf-8", xml_declaration=True) - - def make_tc( - name: str, - result: str = "", - props: dict[str, str] | None = None, - file: str = "", - line: int = 0, - ): - tc = ET.Element("testcase", {"name": name}) - if file: - tc.set("file", file) - if line: - tc.set("line", str(line)) - if result == "failed": - ET.SubElement(tc, "failure", {"message": "failmsg"}) - elif result == "skipped": - ET.SubElement(tc, "skipped", {"message": "skipmsg"}) - if props: - props_el = ET.SubElement(tc, "properties") - for k, v in props.items(): - ET.SubElement(props_el, "property", {"name": k, "value": v}) - return tc - - # File with properties - tc1 = make_tc( - "tc_with_props", - result="failed", - props={ - "PartiallyVerifies": "REQ1", - "FullyVerifies": "", - "TestType": "type", - "DerivationTechnique": "tech", - "Description": "desc", - }, - file="path1", - line=10, - ) - write(dir1 / "test.xml", [tc1]) - - # File without properties - # HINT: Once the assertions in xml_parser are back and active, this should allow us - # to catch that the tests Need to be changed too. - tc2 = make_tc("tc_no_props", file="path2", line=20) - write(dir2 / "test.xml", [tc2]) - - return root, dir1, dir2 +def tmp_xml_dirs(tmp_path: Path) -> Callable[..., tuple[Path, Path, Path]]: + def _tmp_xml_dirs(test_folder: str = "bazel-testlogs") -> tuple[Path, Path, Path]: + root = tmp_path / test_folder + dir1, dir2 = root / "with_props", root / "no_props" + + for d in (dir1, dir2): + d.mkdir(parents=True, exist_ok=True) + + # File with properties + _write_test_xml( + dir1 / "test.xml", + name="tc_with_props", + result="failed", + file="path1", + line=10, + props={ + "PartiallyVerifies": "REQ1", + "FullyVerifies": "", + "TestType": "type", + "DerivationTechnique": "tech", + "Description": "desc", + }, + ) + + # File without properties + _write_test_xml(dir2 / "test.xml", name="tc_no_props", file="path2", line=20) + + return root, dir1, dir2 + + return _tmp_xml_dirs @add_test_properties( @@ -98,17 +105,62 @@ def make_tc( test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_find_xml_files(tmp_xml_dirs: tuple[Path, Path, Path]): - """Ensure xml files are found as expected""" +def test_find_xml_files(tmp_xml_dirs: Callable[..., tuple[Path, Path, Path]]): + """Ensure xml files are found as expected if bazel-testlogs is used""" root: Path dir1: Path dir2: Path - root, dir1, dir2 = tmp_xml_dirs + root, dir1, dir2 = tmp_xml_dirs() found = xml_parser.find_xml_files(root) expected: set[Path] = {dir1 / "test.xml", dir2 / "test.xml"} assert set(found) == expected +def test_find_xml_folder(tmp_xml_dirs: Callable[..., tuple[Path, Path, Path]]): + """Ensure xml files are found as expected if bazel-testlogs is used""" + root: Path + root, _, _ = tmp_xml_dirs() + found = xml_parser.find_test_folder(base_path=root.parent) + assert found is not None + assert found == root + + +def test_find_xml_folder_test_reports( + tmp_xml_dirs: Callable[..., tuple[Path, Path, Path]], +): + # root is the 'tests-report' folder inside tmp_path + root, _, _ = tmp_xml_dirs(test_folder="tests-report") + # We pass the PARENT of 'tests-report' as the workspace root + found = xml_parser.find_test_folder(base_path=root.parent) + assert found is not None + assert found == root + + +def test_find_xml_files_test_reports( + tmp_xml_dirs: Callable[..., tuple[Path, Path, Path]], +): + """Ensure xml files are found as expected if tests-report is used""" + root: Path + dir1: Path + dir2: Path + root, dir1, dir2 = tmp_xml_dirs(test_folder="tests-report") + found = xml_parser.find_xml_files(dir=root) + assert found is not None + expected: set[Path] = {root / dir1 / "test.xml", root / dir2 / "test.xml"} + assert set(found) == expected + + +def test_early_return(tmp_path: Path): + """ + Ensure that if tests-report & bazel-testlogs is not found, + we return None for early return inside extension + """ + # Move the test execution context to a 100% empty folder + + found = xml_parser.find_test_folder(tmp_path) + assert found is None + + @add_test_properties( partially_verifies=["tool_req__docs_test_link_testcase"], test_type="requirements-based", @@ -152,12 +204,12 @@ def test_parse_properties(): test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_read_test_xml_file(tmp_xml_dirs: tuple[Path, Path, Path]): +def test_read_test_xml_file(tmp_xml_dirs: Callable[..., tuple[Path, Path, Path]]): """Ensure a whole pre-defined xml file is parsed correctly""" _: Path dir1: Path dir2: Path - _, dir1, dir2 = tmp_xml_dirs + _, dir1, dir2 = tmp_xml_dirs() needs1, no_props1 = xml_parser.read_test_xml_file(dir1 / "test.xml") assert isinstance(needs1, list) and len(needs1) == 1 diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index 53c18b23..022168d2 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -171,16 +171,28 @@ def find_xml_files(dir: Path) -> list[Path]: return xml_paths +def find_test_folder(base_path: Path | None = None) -> Path | None: + ws_root = base_path if base_path is not None else find_ws_root() + assert ws_root is not None + if os.path.isdir(ws_root / "tests-report"): + return ws_root / "tests-report" + if os.path.isdir(ws_root / "bazel-testlogs"): + return ws_root / "bazel-testlogs" + logger.info("could not find tests-report or bazel-testlogs to parse testcases") + return None + + def run_xml_parser(app: Sphinx, env: BuildEnvironment): """ This is the 'main' function for parsing test.xml's and building testcase needs. It gets called from the source_code_linker __init__ """ - ws_root = find_ws_root() - assert ws_root is not None - bazel_testlogs = ws_root / "bazel-testlogs" - xml_file_paths = find_xml_files(bazel_testlogs) + testlogs_dir = find_test_folder() + # early return + if testlogs_dir is None: + return + xml_file_paths = find_xml_files(testlogs_dir) test_case_needs = build_test_needs_from_files(app, env, xml_file_paths) # Saving the test case needs for cache store_data_of_test_case_json( From 2e7118b33a8986b05cc8a2ae161c679850947f1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 16 Jan 2026 10:23:18 +0100 Subject: [PATCH 179/231] Add TestClass parsing to XML parser (#347) --- docs/internals/extensions/source_code_linker.md | 4 +++- .../tests/expected_grouped.json | 2 +- .../tests/expected_testlink.json | 2 +- .../tests/test_source_code_link_integration.py | 2 +- .../score_source_code_linker/xml_parser.py | 14 ++++++++++---- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/docs/internals/extensions/source_code_linker.md b/docs/internals/extensions/source_code_linker.md index 51ba7690..2f53c840 100644 --- a/docs/internals/extensions/source_code_linker.md +++ b/docs/internals/extensions/source_code_linker.md @@ -92,7 +92,7 @@ def test_feature(): 1. **XML Parsing** (`xml_parser.py`) - Scans `bazel-testlogs/` for `test.xml` files. - Parses test cases and extracts: - - Name + - Name & Classname - File path - Line - Result (e.g. passed, failed, skipped) @@ -104,6 +104,8 @@ def test_feature(): - `DataFromTestCase` (used for external needs) - `DataForTestLink` (used for linking tests to requirements) +> If there is a Classname then it gets combined with the function name for the displayed link as follows: `Classname__Functionname` + 2. **Need Linking** - Generates external Sphinx needs from `DataFromTestCase`. - Creates `testlink` attributes on linked requirements. diff --git a/src/extensions/score_source_code_linker/tests/expected_grouped.json b/src/extensions/score_source_code_linker/tests/expected_grouped.json index da05343c..b5abf26c 100644 --- a/src/extensions/score_source_code_linker/tests/expected_grouped.json +++ b/src/extensions/score_source_code_linker/tests/expected_grouped.json @@ -21,7 +21,7 @@ ], "TestLinks": [ { - "name": "test_system_startup_time", + "name": "TestRequirementsCoverage__test_system_startup_time", "file": "src/tests/testfile_2.py", "line": 25, "need": "TREQ_ID_1", diff --git a/src/extensions/score_source_code_linker/tests/expected_testlink.json b/src/extensions/score_source_code_linker/tests/expected_testlink.json index 9dc32210..19068a4d 100644 --- a/src/extensions/score_source_code_linker/tests/expected_testlink.json +++ b/src/extensions/score_source_code_linker/tests/expected_testlink.json @@ -36,7 +36,7 @@ "result_text": "" }, { - "name": "test_system_startup_time", + "name": "TestRequirementsCoverage__test_system_startup_time", "file": "src/tests/testfile_2.py", "line": 25, "need": "TREQ_ID_1", diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py index 44ea35a6..a400ff77 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -339,7 +339,7 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): return { "TREQ_ID_1": [ DataForTestLink( - name="test_system_startup_time", + name="TestRequirementsCoverage__test_system_startup_time", file=Path("src/tests/testfile_2.py"), need="TREQ_ID_1", line=25, diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index 022168d2..93456971 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -104,11 +104,17 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str]]: for testsuite in root.findall("testsuite"): for testcase in testsuite.findall("testcase"): case_properties = {} - testname = testcase.get("name") - assert testname is not None, ( - f"Testcase: {testcase} does not have a 'name' attribute. " - "This is mandatory. This should not happen, something is wrong." + testcasename = testcase.get("name", "") + testclassname = testcase.get("classname", "") + assert testclassname or testcasename, ( + f"Testcase: {testcase} does not have a 'name' or 'classname' attribute." + "One of which is mandatory. This should not happen, something is wrong." ) + if testclassname: + testcn = testclassname.split(".")[-1] + testname = "__".join([testcn, testcasename]) + else: + testname = testcasename test_file = testcase.get("file") line = testcase.get("line") From c546eedaf3f2f73bcc21d5fc6c3243bb9cb97482 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 20 Jan 2026 15:31:23 +0100 Subject: [PATCH 180/231] Fix 'edit on github' button (#352) Removing quotations around the source dir fixes the links --- src/incremental.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/incremental.py b/src/incremental.py index 91f14954..fbabf8b1 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -89,7 +89,7 @@ def get_env(name: str) -> str: base_arguments.append(f"-A=github_user={args.github_user}") base_arguments.append(f"-A=github_repo={args.github_repo}") base_arguments.append("-A=github_version=main") - base_arguments.append(f"-A=doc_path='{get_env('SOURCE_DIRECTORY')}'") + base_arguments.append(f"-A=doc_path={get_env('SOURCE_DIRECTORY')}") action = get_env("ACTION") if action == "live_preview": From 39d78d7b123c08ec4ab98a0840fa85127708e827 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Tue, 20 Jan 2026 18:35:37 +0100 Subject: [PATCH 181/231] fix issues which prevent latest main update (#349) --- MODULE.bazel | 2 +- docs/internals/requirements/requirements.rst | 17 ++++++++--------- src/extensions/score_metamodel/metamodel.yaml | 1 - 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index ffebf8d2..c7f9c56a 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -100,7 +100,7 @@ http_file( # docs dependency # Note: requirements were last aligned with 1.2.0, # the switch to 1.3.1 is purely to drop the dependency on docs-as-code 1.x. -bazel_dep(name = "score_process", version = "1.3.2") +bazel_dep(name = "score_process", version = "1.4.2") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index 52203074..816646cb 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -689,9 +689,8 @@ Architecture Attributes :parent_covered: NO: we only enable linking, we do not link :satisfies: gd_req__req_attr_impl, - gd_req__impl_design_code_link, - Docs-as-Code shall allow source code to link to needs. + Docs-as-Code shall allow source code to link to requirement sphinx-needs objects. A link to the corresponding source code location in GitHub shall be generated in the generated documentation within the linked requirement. @@ -720,14 +719,14 @@ Architecture Attributes Provide needs type ``dd_sta`` for static diagrams showing unit interactions as UML. -.. tool_req:: Dynamic Diagram for Unit Interactions - :id: tool_req__docs_dd_dyn - :tags: Detailed Design & Code - :implemented: YES - :parent_covered: YES - :satisfies: gd_req__impl_dynamic_diagram +.. .. tool_req:: Dynamic Diagram for Unit Interactions +.. :id: tool_req__docs_dd_dyn +.. :tags: Detailed Design & Code +.. :implemented: YES +.. :parent_covered: YES +.. :satisfies: gd_req__impl_dynamic_diagram - Provide needs type ``dd_dyn`` for dynamic diagrams showing unit interactions as UML. +.. Provide needs type ``dd_dyn`` for dynamic diagrams showing unit interactions as UML. Testing diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index f0086b87..42c7ced9 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -684,7 +684,6 @@ needs_types: includes: sw_unit, sw_unit_int parts: 3 - # req-Id: tool_req__docs_dd_dyn dd_dyn: title: Dynamic detailed design color: #FEDCD2 From 84de4a4fc1da5de0a46d6a2023d85c4eef8096ff Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Wed, 21 Jan 2026 09:46:54 +0100 Subject: [PATCH 182/231] update mod, comp_arc_sta (#350) --- src/extensions/score_metamodel/metamodel.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 42c7ced9..8112f446 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -520,7 +520,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - optional_links: + mandatory_links: includes: comp tags: - architecture_element @@ -534,7 +534,7 @@ needs_types: style: card optional_links: belongs_to: mod # for evaluation - includes: comp, comp_arc_sta # deprecated + includes: comp tags: - architecture_view parts: 3 @@ -584,8 +584,8 @@ needs_types: status: ^(valid|invalid)$ optional_links: fulfils: comp_req - implements: logic_arc_int, real_arc_int_op - includes: comp_arc_sta, comp + implements: logic_arc_int, real_arc_int_op # deprecated, views does not implement anything. Now moved to comp + includes: comp uses: logic_arc_int, real_arc_int_op belongs_to: comp # TODO: make it mandatory tags: From 794d642cdf266fd70c74b21aef9418faff5503d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Wed, 21 Jan 2026 20:21:45 +0100 Subject: [PATCH 183/231] Enabeling consumer tests to run on PR (#356) --- .github/workflows/consumer_test.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index bb7ab18a..c190b581 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -14,6 +14,10 @@ name: Consumer Tests on: workflow_call: + pull_request_target: + types: [opened, reopened, synchronize] # Allows forks to trigger the docs build + merge_group: + types: [checks_requested] jobs: test: From 4d28db0b7c413c06a6453217a4abd0c8388faa5f Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Thu, 22 Jan 2026 11:45:35 +0100 Subject: [PATCH 184/231] add optional link from component to requirement (#359) --- src/extensions/score_metamodel/metamodel.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 8112f446..af07e809 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -562,6 +562,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: + fulfils: comp_req implements: logic_arc_int, real_arc_int_op uses: logic_arc_int, real_arc_int_op consists_of: comp From 015e0afc35034dd9982a7a17b8b2ab2dc6e2aa2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 23 Jan 2026 14:26:47 +0100 Subject: [PATCH 185/231] Fixing consumer test pipe (#363) The pipe into 'tee' has made it so only the exit code of the 'tee' action was recorded / interpreted by github. This meant that any fail inside the consumer test was ignored and it only failed if the 'tee' action would fail. --- .github/workflows/consumer_test.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index c190b581..f3f41c99 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -39,10 +39,13 @@ jobs: run: | mkdir -p reports + # The pipefail ensures that non 0 exit codes inside the pytest execution get carried into the pipe + # & make the tests red in the end. Without this we only would check the exit code of the 'tee' command. - name: Run Consumer tests + run: | + set -o pipefail .venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" --junitxml="reports/${{ matrix.consumer }}.xml" | tee "reports/${{ matrix.consumer }}.log" - env: FORCE_COLOR: "1" TERM: xterm-256color From 50b21b3c047da02ebbea52159891be07f7f455de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Fri, 23 Jan 2026 17:16:39 +0100 Subject: [PATCH 186/231] fix: cleanup to consumer tests to ensure fresh env for each run (#365) --- .github/workflows/consumer_test.yml | 4 ++-- src/tests/test_consumer.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index f3f41c99..15b15478 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -14,8 +14,8 @@ name: Consumer Tests on: workflow_call: - pull_request_target: - types: [opened, reopened, synchronize] # Allows forks to trigger the docs build + pull_request: + types: [opened, reopened, synchronize] merge_group: types: [checks_requested] diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index a8f8071f..14adad1b 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -12,6 +12,7 @@ # ******************************************************************************* import os import re +import shutil import subprocess from collections import defaultdict from dataclasses import dataclass, field @@ -132,6 +133,17 @@ def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig: Config) -> return CACHE_DIR +def cleanup(): + """ + Cleanup before tests are run + """ + for p in Path(".").glob("*/ubproject.toml"): + p.unlink() + shutil.rmtree("_build", ignore_errors=True) + cmd = "bazel clean --async" + subprocess.run(cmd.split(), text=True) + + def get_current_git_commit(curr_path: Path): """ Get the current git commit hash (HEAD). @@ -449,6 +461,8 @@ def run_cmd( ) -> tuple[list[Result], bool]: verbosity: int = pytestconfig.get_verbosity() + cleanup() + if verbosity >= 3: # Level 3 (-vvv): Stream output in real-time BR = stream_subprocess_output(cmd, repo_name) @@ -584,6 +598,7 @@ def prepare_repo_overrides( # Updated version of your test loop def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config): # Get command line options from pytest config + repo_tests: str | None = cast(str | None, pytestconfig.getoption("--repo")) disable_cache: bool = bool(pytestconfig.getoption("--disable-cache")) From 03826659acc30764215364588e96dc6c337b3350 Mon Sep 17 00:00:00 2001 From: Alexander Lanin Date: Fri, 23 Jan 2026 17:33:22 +0100 Subject: [PATCH 187/231] improve statistics page (#364) --- .../requirements/implementation_state.rst | 112 ++++++++++++++++++ docs/internals/requirements/index.rst | 2 +- docs/internals/requirements/test_overview.rst | 64 ---------- 3 files changed, 113 insertions(+), 65 deletions(-) create mode 100644 docs/internals/requirements/implementation_state.rst delete mode 100644 docs/internals/requirements/test_overview.rst diff --git a/docs/internals/requirements/implementation_state.rst b/docs/internals/requirements/implementation_state.rst new file mode 100644 index 00000000..ea8bfe45 --- /dev/null +++ b/docs/internals/requirements/implementation_state.rst @@ -0,0 +1,112 @@ +.. _statistics: + +Implementation State Statistics +================================ + +Overview +-------- + +.. needpie:: Requirements Status + :labels: not implemented, implemented but not tested, implemented and tested + :colors: red,yellow, green + + type == 'tool_req' and implemented == 'NO' + type == 'tool_req' and testlink == '' and (implemented == 'YES' or implemented == 'PARTIAL') + type == 'tool_req' and testlink != '' and (implemented == 'YES' or implemented == 'PARTIAL') + +In Detail +--------- + +.. grid:: 2 + :class-container: score-grid + + .. grid-item-card:: + + .. needpie:: Requirements marked as Implemented + :labels: not implemented, partial, implemented + :colors: red, orange, green + + type == 'tool_req' and implemented == 'NO' + type == 'tool_req' and implemented == 'PARTIAL' + type == 'tool_req' and implemented == 'YES' + + .. grid-item-card:: + + .. needpie:: Requirements with Codelinks + :labels: no codelink, with codelink + :colors: red, green + + type == 'tool_req' and source_code_link == '' + type == 'tool_req' and source_code_link != '' + + .. grid-item-card:: + + .. needpie:: Test Results + :labels: passed, failed, skipped + :colors: green, red, orange + + type == 'testcase' and result == 'passed' + type == 'testcase' and result == 'failed' + type == 'testcase' and result == 'skipped' + +.. grid:: 2 + + .. grid-item-card:: + + Failed Tests + + *Hint: this table is empty by definition, as PRs with failing tests are not allowed to be merged in docs-as-code repo.* + + .. needtable:: FAILED TESTS + :filter: result == "failed" + :tags: TEST + :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" + + .. grid-item-card:: + + Skipped / Disabled Tests + + *Hint: this table is empty by definition, as we do not allow skipped or disabled tests in docs-as-code repo.* + + .. needtable:: SKIPPED/DISABLED TESTS + :filter: result != "failed" and result != "passed" + :tags: TEST + :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" + + + + +All passed Tests +----------------- + +.. needtable:: SUCCESSFUL TESTS + :filter: result == "passed" + :tags: TEST + :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" + + +Details About Testcases +------------------------ +*Data is not filled out yet within the test cases.* + +.. needpie:: Test Types Used In Testcases + :labels: fault-injection, interface-test, requirements-based, resource-usage + :legend: + + type == 'testcase' and test_type == 'fault-injection' + type == 'testcase' and test_type == 'interface-test' + type == 'testcase' and test_type == 'requirements-based' + type == 'testcase' and test_type == 'resource-usage' + + +.. needpie:: Derivation Techniques Used In Testcases + :labels: requirements-analysis, design-analysis, boundary-values, equivalence-classes, fuzz-testing, error-guessing, explorative-testing + :legend: + + type == 'testcase' and derivation_technique == 'requirements-analysis' + type == 'testcase' and derivation_technique == 'design-analysis' + type == 'testcase' and derivation_technique == 'boundary-values' + type == 'testcase' and derivation_technique == 'equivalence-classes' + type == 'testcase' and derivation_technique == 'fuzz-testing' + type == 'testcase' and derivation_technique == 'error-guessing' + type == 'testcase' and derivation_technique == 'explorative-testing' diff --git a/docs/internals/requirements/index.rst b/docs/internals/requirements/index.rst index 0fecb8a2..66c81dd5 100644 --- a/docs/internals/requirements/index.rst +++ b/docs/internals/requirements/index.rst @@ -8,4 +8,4 @@ Requirements process_overview tool_req_overview requirements - test_overview + implementation_state diff --git a/docs/internals/requirements/test_overview.rst b/docs/internals/requirements/test_overview.rst deleted file mode 100644 index c03d24c4..00000000 --- a/docs/internals/requirements/test_overview.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _testing_stats: - -Testing Statistics -================== - - -.. needtable:: SUCCESSFUL TESTS - :filter: result == "passed" - :tags: TEST - :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" - -.. needtable:: FAILED TESTS - :filter: result == "failed" - :tags: TEST - :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" - - -.. needtable:: SKIPPED/DISABLED TESTS - :filter: result != "failed" and result != "passed" - :tags: TEST - :columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link" - - -.. needpie:: Requirements That Have A Linked Test - :labels: requirement not implemeted, not tested, tested - :colors: red,yellow, green - :legend: - - type == 'tool_req' and implemented == 'NO' - type == 'tool_req' and testlink == '' and (implemented == 'YES' or implemented == 'PARTIAL') - type == 'tool_req' and testlink != '' and (implemented == 'YES' or implemented == 'PARTIAL') - - -.. needpie:: Test Results - :labels: passed, failed, skipped - :colors: green, red, orange - :legend: - - type == 'testcase' and result == 'passed' - type == 'testcase' and result == 'failed' - type == 'testcase' and result == 'skipped' - - -.. needpie:: Test Types Used In Testcases - :labels: fault-injection, interface-test, requirements-based, resource-usage - :legend: - - type == 'testcase' and test_type == 'fault-injection' - type == 'testcase' and test_type == 'interface-test' - type == 'testcase' and test_type == 'requirements-based' - type == 'testcase' and test_type == 'resource-usage' - - -.. needpie:: Derivation Techniques Used In Testcases - :labels: requirements-analysis, design-analysis, boundary-values, equivalence-classes, fuzz-testing, error-guessing, explorative-testing - :legend: - - type == 'testcase' and derivation_technique == 'requirements-analysis' - type == 'testcase' and derivation_technique == 'design-analysis' - type == 'testcase' and derivation_technique == 'boundary-values' - type == 'testcase' and derivation_technique == 'equivalence-classes' - type == 'testcase' and derivation_technique == 'fuzz-testing' - type == 'testcase' and derivation_technique == 'error-guessing' - type == 'testcase' and derivation_technique == 'explorative-testing' From efa82806bd98170398fa2dedafe32c9b1496be34 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Mon, 26 Jan 2026 13:21:07 +0100 Subject: [PATCH 188/231] remove comp to comp_req link, fix drawing func (#362) --- src/extensions/score_draw_uml_funcs/__init__.py | 2 +- src/extensions/score_metamodel/metamodel.yaml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/extensions/score_draw_uml_funcs/__init__.py b/src/extensions/score_draw_uml_funcs/__init__.py index a5eb91ca..8276b80f 100644 --- a/src/extensions/score_draw_uml_funcs/__init__.py +++ b/src/extensions/score_draw_uml_funcs/__init__.py @@ -148,7 +148,7 @@ def draw_comp_incl_impl_int( # Draw inner (sub)components recursively if requested if white_box_view: - for need_inc in need.get("includes", []): + for need_inc in need.get("consists_of", []): curr_need = all_needs.get(need_inc, {}) # check for misspelled include diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index af07e809..451f25d8 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -562,7 +562,6 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - fulfils: comp_req implements: logic_arc_int, real_arc_int_op uses: logic_arc_int, real_arc_int_op consists_of: comp @@ -586,7 +585,7 @@ needs_types: optional_links: fulfils: comp_req implements: logic_arc_int, real_arc_int_op # deprecated, views does not implement anything. Now moved to comp - includes: comp + includes: comp # deprecated uses: logic_arc_int, real_arc_int_op belongs_to: comp # TODO: make it mandatory tags: From 89dcf0f9b381bbfe250b741e42920d801fd7b625 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= Date: Tue, 27 Jan 2026 15:24:04 +0100 Subject: [PATCH 189/231] Increase verbosity & change assert in consumer tests (#370) --- .github/workflows/consumer_test.yml | 2 +- src/tests/test_consumer.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/consumer_test.yml b/.github/workflows/consumer_test.yml index 15b15478..685e8dad 100644 --- a/.github/workflows/consumer_test.yml +++ b/.github/workflows/consumer_test.yml @@ -45,7 +45,7 @@ jobs: run: | set -o pipefail - .venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" --junitxml="reports/${{ matrix.consumer }}.xml" | tee "reports/${{ matrix.consumer }}.log" + .venv_docs/bin/python -m pytest -s -vv src/tests/ --repo="$CONSUMER" --junitxml="reports/${{ matrix.consumer }}.xml" | tee "reports/${{ matrix.consumer }}.log" env: FORCE_COLOR: "1" TERM: xterm-256color diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index 14adad1b..c57e700e 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -669,7 +669,7 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config): # Printing a 'overview' table as a result print_result_table(results) - assert overall_success, ( - "Consumer Tests failed, see table for which commands specifically. " - "Enable verbosity for warning/error printouts" - ) + if not overall_success: + pytest.fail( + reason="Consumer Tests failed, see table for which commands specifically. " + ) From b5023ade5c1714596b3b18328b18f272e0b21c09 Mon Sep 17 00:00:00 2001 From: Arnaud Riess Date: Thu, 29 Jan 2026 11:32:56 +0100 Subject: [PATCH 190/231] Upgrade sphinx-needs to 6.3.0 (#361) * feat: upgrade sphinx-needs to 6.3.0 Add support for new options (is_import, constraints) introduced in 6.3.0 and remove the plantuml workaround that was only needed for older versions. * feat: added a minimum version requirement for sphinx * refactor: replace NeedsInfoType with NeedItem across multiple files * refactor: remove unused link keys from need function * refactor: reorganize imports in test and source code linker modules * refactor: enhance type hints for better clarity in check_options and test_source_code_link_integration * refactor: improve type casting and validation in _get_normalized function --- docs/conf.py | 2 - src/extensions/score_metamodel/__init__.py | 7 +- .../checks/attributes_format.py | 10 +- .../score_metamodel/checks/check_options.py | 31 ++-- .../score_metamodel/checks/graph_checks.py | 13 +- .../checks/id_contains_feature.py | 4 +- .../score_metamodel/checks/standards.py | 24 +-- src/extensions/score_metamodel/log.py | 12 +- .../score_metamodel/tests/__init__.py | 93 +++++++++- .../tests/test_metamodel__init__.py | 97 +++++++++- src/extensions/score_metamodel/yaml_parser.py | 3 + .../score_source_code_linker/__init__.py | 5 +- .../test_source_code_link_integration.py | 14 +- src/requirements.in | 5 +- src/requirements.txt | 167 +++--------------- 15 files changed, 274 insertions(+), 213 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index fffe55be..0255915c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,7 +16,5 @@ version = "0.1" extensions = [ - # TODO remove plantuml here once docs-as-code is updated to sphinx-needs 6 - "sphinxcontrib.plantuml", "score_sphinx_bundle", ] diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index e7eed11d..0a6c4dae 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -18,7 +18,8 @@ from sphinx.application import Sphinx from sphinx_needs import logging -from sphinx_needs.data import NeedsInfoType, NeedsView, SphinxNeedsData +from sphinx_needs.data import NeedsView, SphinxNeedsData +from sphinx_needs.need_item import NeedItem from src.extensions.score_metamodel.external_needs import connect_external_needs from src.extensions.score_metamodel.log import CheckLogger @@ -39,7 +40,7 @@ logger = logging.get_logger(__name__) -local_check_function = Callable[[Sphinx, NeedsInfoType, CheckLogger], None] +local_check_function = Callable[[Sphinx, NeedItem, CheckLogger], None] graph_check_function = Callable[[Sphinx, NeedsView, CheckLogger], None] local_checks: list[local_check_function] = [] @@ -170,7 +171,7 @@ def _remove_prefix(word: str, prefixes: list[str]) -> str: return word -def _get_need_type_for_need(app: Sphinx, need: NeedsInfoType) -> ScoreNeedType: +def _get_need_type_for_need(app: Sphinx, need: NeedItem) -> ScoreNeedType: for nt in app.config.needs_types: if nt["directive"] == need["type"]: return nt diff --git a/src/extensions/score_metamodel/checks/attributes_format.py b/src/extensions/score_metamodel/checks/attributes_format.py index ed05ae3f..248979da 100644 --- a/src/extensions/score_metamodel/checks/attributes_format.py +++ b/src/extensions/score_metamodel/checks/attributes_format.py @@ -16,7 +16,7 @@ from score_metamodel import CheckLogger, ProhibitedWordCheck, ScoreNeedType, local_check from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType +from sphinx_needs.need_item import NeedItem def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeedType: @@ -29,7 +29,7 @@ def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeed # req-Id: tool_req__docs_common_attr_id_scheme @local_check -def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): +def check_id_format(app: Sphinx, need: NeedItem, log: CheckLogger): """ Checking if the title, directory and feature are included in the requirement id or not. @@ -57,7 +57,7 @@ def check_id_format(app: Sphinx, need: NeedsInfoType, log: CheckLogger): @local_check -def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): +def check_id_length(app: Sphinx, need: NeedItem, log: CheckLogger): """ Validates that the requirement ID does not exceed the hard limit of 45 characters. While the recommended limit is 30 characters, this check enforces a strict maximum @@ -85,7 +85,7 @@ def check_id_length(app: Sphinx, need: NeedsInfoType, log: CheckLogger): def _check_options_for_prohibited_words( - prohibited_word_checks: ProhibitedWordCheck, need: NeedsInfoType, log: CheckLogger + prohibited_word_checks: ProhibitedWordCheck, need: NeedItem, log: CheckLogger ): options: list[str] = [ x for x in prohibited_word_checks.option_check if x != "types" @@ -109,7 +109,7 @@ def _check_options_for_prohibited_words( # req-Id: tool_req__docs_common_attr_desc_wording # req-Id: tool_req__docs_common_attr_title @local_check -def check_for_prohibited_words(app: Sphinx, need: NeedsInfoType, log: CheckLogger): +def check_for_prohibited_words(app: Sphinx, need: NeedItem, log: CheckLogger): need_options = get_need_type(app.config.needs_types, need["type"]) prohibited_word_checks: list[ProhibitedWordCheck] = ( app.config.prohibited_words_checks diff --git a/src/extensions/score_metamodel/checks/check_options.py b/src/extensions/score_metamodel/checks/check_options.py index e0b95dbb..5dcc623b 100644 --- a/src/extensions/score_metamodel/checks/check_options.py +++ b/src/extensions/score_metamodel/checks/check_options.py @@ -11,6 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* import re +from typing import cast from score_metamodel import ( CheckLogger, @@ -20,7 +21,7 @@ ) from score_metamodel.metamodel_types import AllowedLinksType from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType +from sphinx_needs.need_item import NeedItem def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeedType: @@ -31,9 +32,7 @@ def get_need_type(needs_types: list[ScoreNeedType], directive: str) -> ScoreNeed raise ValueError(f"Need type {directive} not found in needs_types") -def _get_normalized( - need: NeedsInfoType, key: str, remove_prefix: bool = False -) -> list[str]: +def _get_normalized(need: NeedItem, key: str, remove_prefix: bool = False) -> list[str]: """Normalize a raw value into a list of strings.""" raw_value = need.get(key, None) if not raw_value: @@ -42,17 +41,23 @@ def _get_normalized( if remove_prefix: return [_remove_namespace_prefix_(raw_value)] return [raw_value] - if isinstance(raw_value, list) and all(isinstance(v, str) for v in raw_value): + if isinstance(raw_value, list): + # Verify all elements are strings + raw_list = cast(list[object], raw_value) + for item in raw_list: + if not isinstance(item, str): + raise ValueError + str_list = cast(list[str], raw_value) if remove_prefix: - return [_remove_namespace_prefix_(v) for v in raw_value] - return raw_value + return [_remove_namespace_prefix_(v) for v in str_list] + return str_list raise ValueError def _validate_value_pattern( value: str, pattern: str, - need: NeedsInfoType, + need: NeedItem, field: str, ): """Check if a value matches the given pattern and log the result. @@ -76,7 +81,7 @@ def _remove_namespace_prefix_(word: str) -> str: def validate_options( log: CheckLogger, need_type: ScoreNeedType, - need: NeedsInfoType, + need: NeedItem, ): """ Validates that options in a need match their expected patterns. @@ -103,7 +108,7 @@ def _validate(attributes_to_allowed_values: dict[str, str], mandatory: bool): def validate_links( log: CheckLogger, need_type: ScoreNeedType, - need: NeedsInfoType, + need: NeedItem, ): """ Validates that links in a need match the expected types or regexes. @@ -156,7 +161,7 @@ def _validate( @local_check def check_options( app: Sphinx, - need: NeedsInfoType, + need: NeedItem, log: CheckLogger, ): """ @@ -172,7 +177,7 @@ def check_options( @local_check def check_extra_options( app: Sphinx, - need: NeedsInfoType, + need: NeedItem, log: CheckLogger, ): """ @@ -224,7 +229,7 @@ def parse_milestone(value: str) -> tuple[int, int, int]: @local_check def check_validity_consistency( app: Sphinx, - need: NeedsInfoType, + need: NeedItem, log: CheckLogger, ): """ diff --git a/src/extensions/score_metamodel/checks/graph_checks.py b/src/extensions/score_metamodel/checks/graph_checks.py index e697419e..2335db22 100644 --- a/src/extensions/score_metamodel/checks/graph_checks.py +++ b/src/extensions/score_metamodel/checks/graph_checks.py @@ -21,10 +21,11 @@ ) from sphinx.application import Sphinx from sphinx_needs.config import NeedType -from sphinx_needs.data import NeedsInfoType, NeedsView +from sphinx_needs.data import NeedsView +from sphinx_needs.need_item import NeedItem -def eval_need_check(need: NeedsInfoType, check: str, log: CheckLogger) -> bool: +def eval_need_check(need: NeedItem, check: str, log: CheckLogger) -> bool: """ Perform a single check on a need: 1. Split the check into its parts @@ -57,7 +58,7 @@ def eval_need_check(need: NeedsInfoType, check: str, log: CheckLogger) -> bool: def eval_need_condition( - need: NeedsInfoType, condition: str | dict[str, list[Any]], log: CheckLogger + need: NeedItem, condition: str | dict[str, list[Any]], log: CheckLogger ) -> bool: """Evaluate a condition on a need: 1. Check if the condition is only a simple check (e.g. "status == valid") @@ -101,16 +102,16 @@ def eval_need_condition( def filter_needs_by_criteria( needs_types: list[NeedType], - needs: list[NeedsInfoType], + needs: list[NeedItem], needs_selection_criteria: dict[str, str], log: CheckLogger, -) -> list[NeedsInfoType]: +) -> list[NeedItem]: """Create a list of needs that match the selection criteria.: - If it is an include selection add the include to the pattern - If it is an exclude selection add a "^" to the pattern """ - selected_needs: list[NeedsInfoType] = [] + selected_needs: list[NeedItem] = [] pattern: list[str] = [] need_pattern: str = list(needs_selection_criteria.keys())[0] # Verify Inputs diff --git a/src/extensions/score_metamodel/checks/id_contains_feature.py b/src/extensions/score_metamodel/checks/id_contains_feature.py index a657b1c3..035cf18a 100644 --- a/src/extensions/score_metamodel/checks/id_contains_feature.py +++ b/src/extensions/score_metamodel/checks/id_contains_feature.py @@ -18,11 +18,11 @@ local_check, ) from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType +from sphinx_needs.need_item import NeedItem @local_check -def id_contains_feature(app: Sphinx, need: NeedsInfoType, log: CheckLogger): +def id_contains_feature(app: Sphinx, need: NeedItem, log: CheckLogger): """ The ID is expected to be in the format '____'. Most of this is ensured via regex in the metamodel. diff --git a/src/extensions/score_metamodel/checks/standards.py b/src/extensions/score_metamodel/checks/standards.py index 2c029e6f..7d27f5bf 100644 --- a/src/extensions/score_metamodel/checks/standards.py +++ b/src/extensions/score_metamodel/checks/standards.py @@ -12,7 +12,7 @@ # ******************************************************************************* # from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType +from sphinx_needs.need_item import NeedItem # from score_metamodel import ( # CheckLogger, @@ -20,7 +20,7 @@ # ) -def get_standards_needs(needs: list[NeedsInfoType]) -> dict[str, NeedsInfoType]: +def get_standards_needs(needs: list[NeedItem]) -> dict[str, NeedItem]: """ Return a dictionary of all standard requirements from the Sphinx app's needs. """ @@ -29,8 +29,8 @@ def get_standards_needs(needs: list[NeedsInfoType]) -> dict[str, NeedsInfoType]: def get_standards_workproducts( - needs: list[NeedsInfoType], -) -> dict[str, NeedsInfoType]: + needs: list[NeedItem], +) -> dict[str, NeedItem]: """ Return a dictionary of standard workproducts from the Sphinx app's needs. """ @@ -38,7 +38,7 @@ def get_standards_workproducts( return {need["id"]: need for need in needs if need["type"] == "std_wp"} -def get_workflows(needs: list[NeedsInfoType]) -> dict[str, NeedsInfoType]: +def get_workflows(needs: list[NeedItem]) -> dict[str, NeedItem]: """ Return a dictionary of all workflows from the Sphinx app's needs. """ @@ -46,7 +46,7 @@ def get_workflows(needs: list[NeedsInfoType]) -> dict[str, NeedsInfoType]: return {need["id"]: need for need in needs if need.get("type") == "workflow"} -def get_workproducts(needs: list[NeedsInfoType]) -> dict[str, NeedsInfoType]: +def get_workproducts(needs: list[NeedItem]) -> dict[str, NeedItem]: """ Return a dictionary of all workproducts from the Sphinx app's needs. """ @@ -54,7 +54,7 @@ def get_workproducts(needs: list[NeedsInfoType]) -> dict[str, NeedsInfoType]: return {need["id"]: need for need in needs if need.get("type") == "workproduct"} -def get_compliance_req_needs(needs: list[NeedsInfoType]) -> set[str]: +def get_compliance_req_needs(needs: list[NeedItem]) -> set[str]: """ Return a set of all compliance_req values from the Sphinx app's needs, but only if the need type is one of the specified process-related types. @@ -68,7 +68,7 @@ def get_compliance_req_needs(needs: list[NeedsInfoType]) -> set[str]: } -def get_compliance_wp_needs(needs: list[NeedsInfoType]) -> set[str]: +def get_compliance_wp_needs(needs: list[NeedItem]) -> set[str]: """ Return a set of all compliance_wp values from the Sphinx app's needs, but only if the need type is "workproduct". @@ -177,7 +177,7 @@ def get_compliance_wp_needs(needs: list[NeedsInfoType]) -> set[str]: def my_pie_linked_standard_requirements( - needs: list[NeedsInfoType], results: list[int], **kwargs: str | int | float + needs: list[NeedItem], results: list[int], **kwargs: str | int | float ) -> None: """ Function to render the chart of check for standard requirements linked @@ -210,7 +210,7 @@ def my_pie_linked_standard_requirements( def my_pie_linked_standard_requirements_by_tag( - needs: list[NeedsInfoType], results: list[int], **kwargs: str | int | float + needs: list[NeedItem], results: list[int], **kwargs: str | int | float ) -> None: """ Filter function used for 'needpie' directives. @@ -258,7 +258,7 @@ def my_pie_linked_standard_requirements_by_tag( def my_pie_linked_standard_workproducts( - needs: list[NeedsInfoType], results: list[int], **kwargs: str | int | float + needs: list[NeedItem], results: list[int], **kwargs: str | int | float ) -> None: """ Function to render the chart of check for standar workproducts linked @@ -292,7 +292,7 @@ def my_pie_linked_standard_workproducts( def my_pie_workproducts_contained_in_exactly_one_workflow( - needs: list[NeedsInfoType], results: list[int], **kwargs: str | int | float + needs: list[NeedItem], results: list[int], **kwargs: str | int | float ) -> None: """ Function to render the chart of check for workproducts that are contained diff --git a/src/extensions/score_metamodel/log.py b/src/extensions/score_metamodel/log.py index 7f433053..53456cf6 100644 --- a/src/extensions/score_metamodel/log.py +++ b/src/extensions/score_metamodel/log.py @@ -15,8 +15,8 @@ from docutils.nodes import Node from sphinx_needs import logging -from sphinx_needs.data import NeedsInfoType from sphinx_needs.logging import SphinxLoggerAdapter +from sphinx_needs.need_item import NeedItem Location = str | tuple[str | None, int | None] | Node | None NewCheck = tuple[str, Location] @@ -32,7 +32,7 @@ def __init__(self, log: SphinxLoggerAdapter, prefix: str): self._new_checks: list[NewCheck] = [] @staticmethod - def _location(need: NeedsInfoType, prefix: str): + def _location(need: NeedItem, prefix: str): def get(key: str) -> Any: return need.get(key, None) @@ -49,7 +49,7 @@ def get(key: str) -> Any: return None def warning_for_option( - self, need: NeedsInfoType, option: str, msg: str, is_new_check: bool = False + self, need: NeedItem, option: str, msg: str, is_new_check: bool = False ): full_msg = f"{need['id']}.{option} ({need.get(option, None)}): {msg}" location = CheckLogger._location(need, self._prefix) @@ -57,7 +57,7 @@ def warning_for_option( def warning_for_link( self, - need: NeedsInfoType, + need: NeedItem, option: str, problematic_value: str, allowed_values: list[str], @@ -75,9 +75,7 @@ def warning_for_link( self.warning_for_need(need, msg, is_new_check=is_new_check) - def warning_for_need( - self, need: NeedsInfoType, msg: str, is_new_check: bool = False - ): + def warning_for_need(self, need: NeedItem, msg: str, is_new_check: bool = False): full_msg = f"{need['id']}: {msg}" location = CheckLogger._location(need, self._prefix) self._log_message(full_msg, location, is_new_check) diff --git a/src/extensions/score_metamodel/tests/__init__.py b/src/extensions/score_metamodel/tests/__init__.py index 27055fa2..b6024ee7 100644 --- a/src/extensions/score_metamodel/tests/__init__.py +++ b/src/extensions/score_metamodel/tests/__init__.py @@ -10,13 +10,19 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -from typing import Any +from typing import Any, cast from unittest.mock import MagicMock import pytest from sphinx.util.logging import SphinxLoggerAdapter +from sphinx_needs.data import NeedsInfoType +from sphinx_needs.need_item import ( + NeedItem, + NeedItemSourceUnknown, + NeedsContent, +) -from src.extensions.score_metamodel import CheckLogger, NeedsInfoType +from src.extensions.score_metamodel import CheckLogger def fake_check_logger(): @@ -101,12 +107,81 @@ def assert_info(self, expected_substring: str, expect_location: bool = True): return FakeCheckLogger() -def need(**kwargs: Any) -> NeedsInfoType: - """Convinience function to create a NeedsInfoType object with some defaults.""" +def need(**kwargs: Any) -> NeedItem: + """Convenience function to create a NeedItem object with some defaults.""" - kwargs.setdefault("id", "test_need") - kwargs.setdefault("docname", "docname") - kwargs.setdefault("doctype", "rst") - kwargs.setdefault("lineno", "42") + # Extract links (any list field that's not a core field) + link_keys = { + "links", + } + links = {k: kwargs.pop(k, []) for k in list(link_keys) if k in kwargs} - return NeedsInfoType(**kwargs) + # Set defaults for core fields + kwargs.setdefault("id", "test_need") + kwargs.setdefault("type", "requirement") + kwargs.setdefault("title", "") + kwargs.setdefault("status", None) + kwargs.setdefault("tags", []) + kwargs.setdefault("collapse", False) + kwargs.setdefault("hide", False) + kwargs.setdefault("layout", None) + kwargs.setdefault("style", None) + kwargs.setdefault("external_css", "") + kwargs.setdefault("type_name", "") + kwargs.setdefault("type_prefix", "") + kwargs.setdefault("type_color", "") + kwargs.setdefault("type_style", "") + kwargs.setdefault("constraints", []) + kwargs.setdefault("arch", {}) + kwargs.setdefault("sections", ()) + kwargs.setdefault("signature", None) + kwargs.setdefault("has_dead_links", False) + kwargs.setdefault("has_forbidden_dead_links", False) + + # Build core dict (only NeedsInfoType fields) + core_keys = set(NeedsInfoType.__annotations__.keys()) + core = cast(NeedsInfoType, {k: kwargs[k] for k in core_keys}) + + # Source/content keys to exclude from extras + source_content_keys = { + "docname", + "lineno", + "lineno_content", + "external_url", + "is_import", + "is_external", + "doctype", + "content", + "pre_content", + "post_content", + } + + # Extract extras (any remaining kwargs not in core or source/content) + extras = { + k: v + for k, v in kwargs.items() + if k not in core_keys and k not in source_content_keys + } + + # Create source + source = NeedItemSourceUnknown( + docname=kwargs.get("docname", "docname"), + lineno=kwargs.get("lineno", 42), + lineno_content=kwargs.get("lineno_content"), + ) + + # Create content + content = NeedsContent( + doctype=kwargs.get("doctype", ".rst"), + content=kwargs.get("content", ""), + pre_content=kwargs.get("pre_content"), + post_content=kwargs.get("post_content"), + ) + + return NeedItem( + source=source, + content=content, + core=core, + extras=extras, + links=links, + ) diff --git a/src/extensions/score_metamodel/tests/test_metamodel__init__.py b/src/extensions/score_metamodel/tests/test_metamodel__init__.py index 6e7c773b..9a2241ed 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel__init__.py +++ b/src/extensions/score_metamodel/tests/test_metamodel__init__.py @@ -13,7 +13,8 @@ import pytest from attribute_plugin import add_test_properties # type: ignore[import-untyped] from sphinx.application import Sphinx -from sphinx_needs.data import NeedsInfoType, NeedsView +from sphinx_needs.data import NeedsView +from sphinx_needs.need_item import NeedItem from src.extensions.score_metamodel import CheckLogger from src.extensions.score_metamodel.__init__ import ( @@ -21,9 +22,10 @@ local_checks, parse_checks_filter, ) +from src.extensions.score_metamodel.tests import need -def dummy_local_check(app: Sphinx, need: NeedsInfoType, log: CheckLogger) -> None: +def dummy_local_check(app: Sphinx, need: NeedItem, log: CheckLogger) -> None: pass @@ -83,3 +85,94 @@ def test_raises_assertion_for_invalid_check(): parse_checks_filter("non_existing_check") assert "non_existing_check" in str(exc_info.value) assert "not one of the defined local or graph checks" in str(exc_info.value) + + +# ============================================================================= +# Tests for the need() helper function +# ============================================================================= + + +class TestNeedHelper: + """Tests for the need() convenience function that creates NeedItem objects.""" + + def test_default_values(self): + """Verify default values are set when no arguments provided.""" + n = need() + assert n["id"] == "test_need" + assert n["type"] == "requirement" + assert n["title"] == "" + assert n["status"] is None + assert n["tags"] == [] + assert n["collapse"] is False + assert n["hide"] is False + + def test_custom_values_override_defaults(self): + """Verify custom values override the defaults.""" + n = need( + id="custom_id", + type="custom_type", + title="Custom Title", + status="valid", + tags=["tag1", "tag2"], + ) + assert n["id"] == "custom_id" + assert n["type"] == "custom_type" + assert n["title"] == "Custom Title" + assert n["status"] == "valid" + assert n["tags"] == ["tag1", "tag2"] + + def test_link_fields_extracted(self): + """Verify link fields are extracted and accessible via .get().""" + n = need( + complies=["std_req_1", "std_req_2"], + input=["wp_input_1"], + output=["wp_output_1", "wp_output_2"], + contains=["item_1"], + satisfies=["req_1"], + ) + # Links should be accessible via .get() on NeedItem + assert n.get("complies", []) == ["std_req_1", "std_req_2"] + assert n.get("input", []) == ["wp_input_1"] + assert n.get("output", []) == ["wp_output_1", "wp_output_2"] + assert n.get("contains", []) == ["item_1"] + assert n.get("satisfies", []) == ["req_1"] + + def test_extra_fields_in_extras(self): + """Verify extra fields (not core, not links) go into extras.""" + n = need( + reqtype="Functional", + security="YES", + custom_field="custom_value", + ) + # Extra fields should be accessible via .get() + assert n.get("reqtype") == "Functional" + assert n.get("security") == "YES" + assert n.get("custom_field") == "custom_value" + + def test_empty_links_not_in_kwargs(self): + """Verify that link keys not provided default to empty list.""" + n = need() + # When link not provided, should return empty list + assert n.get("complies", []) == [] + assert n.get("input", []) == [] + assert n.get("output", []) == [] + + def test_combined_core_links_and_extras(self): + """Verify a need with core, link, and extra fields works correctly.""" + n = need( + id="combined_need", + type="workflow", + status="draft", + input=["input_wp"], + output=["output_wp"], + custom_attr="custom_value", + ) + # Core fields + assert n["id"] == "combined_need" + assert n["type"] == "workflow" + assert n["status"] == "draft" + # Link fields + assert n.get("input", []) == ["input_wp"] + assert n.get("output", []) == ["output_wp"] + # Extra fields + assert n.get("custom_attr") == "custom_value" diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py index de89fd9d..64916a90 100644 --- a/src/extensions/score_metamodel/yaml_parser.py +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -92,6 +92,9 @@ def default_options(): "tags", "arch", "parts", + # Introduced with sphinx-needs 6.3.0 + "is_import", + "constraints", } diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 876e4fcc..90b1663e 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -27,8 +27,9 @@ from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment -from sphinx_needs.data import NeedsInfoType, NeedsMutable, SphinxNeedsData +from sphinx_needs.data import NeedsMutable, SphinxNeedsData from sphinx_needs.logging import get_logger +from sphinx_needs.need_item import NeedItem from src.extensions.score_source_code_linker.generate_source_code_links_json import ( generate_source_code_links_json, @@ -292,7 +293,7 @@ def setup(app: Sphinx) -> dict[str, str | bool]: } -def find_need(all_needs: NeedsMutable, id: str) -> NeedsInfoType | None: +def find_need(all_needs: NeedsMutable, id: str) -> NeedItem | None: """ Finds a need by ID in the needs collection. """ diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py index a400ff77..60bb98f8 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -303,7 +303,7 @@ def basic_needs(): @pytest.fixture() -def example_source_link_text_all_ok(sphinx_base_dir: Path): +def example_source_link_text_all_ok(sphinx_base_dir: Path) -> dict[str, list[NeedLink]]: return { "TREQ_ID_1": [ NeedLink( @@ -511,14 +511,22 @@ def test_source_link_integration_ok( treq_info = needs_data[treq_id] print("Needs Data for", treq_id, ":", treq_info) - # verify codelinks + # verify codelinks (compare as sets since order may vary) expected_code_link = make_source_link( example_source_link_text_all_ok[treq_id] ) actual_source_code_link = treq_info.get( "source_code_link", "no source link" ) - assert expected_code_link == actual_source_code_link, treq_id + expected_links: set[str] = ( + set(expected_code_link.split(", ")) if expected_code_link else set() + ) + actual_links: set[str] = ( + set(actual_source_code_link.split(", ")) + if actual_source_code_link + else set() + ) + assert expected_links == actual_links, treq_id # verify testlinks expected_test_link = make_test_link(example_test_link_text_all_ok[treq_id]) diff --git a/src/requirements.in b/src/requirements.in index 1610e6a4..b616508d 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -1,9 +1,8 @@ -Sphinx +Sphinx>=8.2.3,<9 # At least 4.2.0, as it fixes a bug in combination with esbonio live preview: # https://github.com/useblocks/sphinx-needs/issues/1350 -# 6 needs some work, as it's a breaking change. -sphinx-needs>=4.2.0,<6 +sphinx-needs>=6.3.0,<7 # Due to needed bugfix in 0.3.1 sphinx-collections>=0.3.1 diff --git a/src/requirements.txt b/src/requirements.txt index 632beacd..b9ff06ce 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -23,9 +23,7 @@ attrs==25.4.0 \ --hash=sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373 # via # cattrs - # jsonschema # lsprotocol - # referencing babel==2.17.0 \ --hash=sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d \ --hash=sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2 @@ -35,7 +33,7 @@ babel==2.17.0 \ basedpyright==1.29.2 \ --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d - # via -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt + # via -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt beautifulsoup4==4.14.2 \ --hash=sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e \ --hash=sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515 @@ -524,7 +522,7 @@ iniconfig==2.1.0 \ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 # via - # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt + # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ @@ -533,14 +531,20 @@ jinja2==3.1.6 \ # myst-parser # sphinx # sphinx-collections -jsonschema==4.25.1 \ - --hash=sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63 \ - --hash=sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85 +jsonschema-rs==0.37.4 \ + --hash=sha256:03b34f911e99343fc388651688683010daee538a3cf8cf86a7997bca28fdf16b \ + --hash=sha256:0f17a61deb557faa57dffb9596e4f022873404f935114367788b1eebdec2bb00 \ + --hash=sha256:10fd978a145a6f8d11373879e7d0ff232b409f77c7faf608e6b4549a7f90aaed \ + --hash=sha256:1d3f8c8b376966c19fd4183fa979dbadc9fdd6070f2bfa4d127bdf70946963cc \ + --hash=sha256:393ece7037a0d19fd528f7a67a32749453876468871a0bd2267909a57d8d4e32 \ + --hash=sha256:5975e448092e99d6cc60793a71f0fee516dbf0fd1e6d2f6f1e4689627268f344 \ + --hash=sha256:67f36f1c445c70f9975d17a84ce37f79593f6234d7eb292830d7749e5fa58ff4 \ + --hash=sha256:75f3b4e0707dcb3dccf911ff49e387b4db54957fe1a19d3423015a65e3762057 \ + --hash=sha256:a56d154b638deb947dbd0dfc285c349eb23a877221f2b0496a2dfa25948cc239 \ + --hash=sha256:dedf72e5e673e3af5b9925979fd71484debada61fb7a3dfabf9bbc74b8012664 \ + --hash=sha256:e159075b1846718466998d5a9294c661113b347b8b4749767680a97c8ed2bf4d \ + --hash=sha256:e93a8720f6e858d872dc2882e7d3b3243ee76f7aa4d60048272773d44df466e7 # via sphinx-needs -jsonschema-specifications==2025.9.1 \ - --hash=sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe \ - --hash=sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d - # via jsonschema kiwisolver==1.4.9 \ --hash=sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c \ --hash=sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7 \ @@ -830,7 +834,7 @@ nodejs-wheel-binaries==22.16.0 \ --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e # via - # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt + # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt # basedpyright numpy==2.3.5 \ --hash=sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b \ @@ -914,7 +918,7 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via - # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt + # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt # matplotlib # pytest # sphinx @@ -1020,7 +1024,7 @@ pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via - # -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt + # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt # pytest pycparser==2.23 \ --hash=sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2 \ @@ -1090,7 +1094,7 @@ pyspellchecker==0.8.3 \ pytest==8.3.5 \ --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r /home/lla2hi/.cache/bazel/_bazel_lla2hi/e35bb7c4cc72b99eb76653ab839f4f8e/external/score_tooling+/python_basics/requirements.txt + # via -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 @@ -1172,12 +1176,6 @@ pyyaml==6.0.3 \ # via # myst-parser # sphinxcontrib-mermaid -referencing==0.37.0 \ - --hash=sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231 \ - --hash=sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8 - # via - # jsonschema - # jsonschema-specifications requests==2.32.5 \ --hash=sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6 \ --hash=sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf @@ -1198,125 +1196,6 @@ roman-numerals-py==3.1.0 \ --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d # via sphinx -rpds-py==0.29.0 \ - --hash=sha256:00e56b12d2199ca96068057e1ae7f9998ab6e99cda82431afafd32f3ec98cca9 \ - --hash=sha256:0248b19405422573621172ab8e3a1f29141362d13d9f72bafa2e28ea0cdca5a2 \ - --hash=sha256:05a2bd42768ea988294ca328206efbcc66e220d2d9b7836ee5712c07ad6340ea \ - --hash=sha256:070befbb868f257d24c3bb350dbd6e2f645e83731f31264b19d7231dd5c396c7 \ - --hash=sha256:0a8896986efaa243ab713c69e6491a4138410f0fe36f2f4c71e18bd5501e8014 \ - --hash=sha256:0ea962671af5cb9a260489e311fa22b2e97103e3f9f0caaea6f81390af96a9ed \ - --hash=sha256:115f48170fd4296a33938d8c11f697f5f26e0472e43d28f35624764173a60e4d \ - --hash=sha256:12597d11d97b8f7e376c88929a6e17acb980e234547c92992f9f7c058f1a7310 \ - --hash=sha256:1585648d0760b88292eecab5181f5651111a69d90eff35d6b78aa32998886a61 \ - --hash=sha256:16e9da2bda9eb17ea318b4c335ec9ac1818e88922cbe03a5743ea0da9ecf74fb \ - --hash=sha256:1a409b0310a566bfd1be82119891fefbdce615ccc8aa558aff7835c27988cbef \ - --hash=sha256:1c3c3e8101bb06e337c88eb0c0ede3187131f19d97d43ea0e1c5407ea74c0cbf \ - --hash=sha256:1d24564a700ef41480a984c5ebed62b74e6ce5860429b98b1fede76049e953e6 \ - --hash=sha256:1de2345af363d25696969befc0c1688a6cb5e8b1d32b515ef84fc245c6cddba3 \ - --hash=sha256:1ea59b23ea931d494459c8338056fe7d93458c0bf3ecc061cd03916505369d55 \ - --hash=sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2 \ - --hash=sha256:20c51ae86a0bb9accc9ad4e6cdeec58d5ebb7f1b09dd4466331fc65e1766aae7 \ - --hash=sha256:24a16cb7163933906c62c272de20ea3c228e4542c8c45c1d7dc2b9913e17369a \ - --hash=sha256:24a7231493e3c4a4b30138b50cca089a598e52c34cf60b2f35cebf62f274fdea \ - --hash=sha256:2549d833abdf8275c901313b9e8ff8fba57e50f6a495035a2a4e30621a2f7cc4 \ - --hash=sha256:28de03cf48b8a9e6ec10318f2197b83946ed91e2891f651a109611be4106ac4b \ - --hash=sha256:28fd300326dd21198f311534bdb6d7e989dd09b3418b3a91d54a0f384c700967 \ - --hash=sha256:295ce5ac7f0cf69a651ea75c8f76d02a31f98e5698e82a50a5f4d4982fbbae3b \ - --hash=sha256:2a21deb8e0d1571508c6491ce5ea5e25669b1dd4adf1c9d64b6314842f708b5d \ - --hash=sha256:2aba991e041d031c7939e1358f583ae405a7bf04804ca806b97a5c0e0af1ea5e \ - --hash=sha256:2b8e54d6e61f3ecd3abe032065ce83ea63417a24f437e4a3d73d2f85ce7b7cfe \ - --hash=sha256:2d6fb2ad1c36f91c4646989811e84b1ea5e0c3cf9690b826b6e32b7965853a63 \ - --hash=sha256:33ca7bdfedd83339ca55da3a5e1527ee5870d4b8369456b5777b197756f3ca22 \ - --hash=sha256:37d94eadf764d16b9a04307f2ab1d7af6dc28774bbe0535c9323101e14877b4c \ - --hash=sha256:3897924d3f9a0361472d884051f9a2460358f9a45b1d85a39a158d2f8f1ad71c \ - --hash=sha256:3919a3bbecee589300ed25000b6944174e07cd20db70552159207b3f4bbb45b8 \ - --hash=sha256:394d27e4453d3b4d82bb85665dc1fcf4b0badc30fc84282defed71643b50e1a1 \ - --hash=sha256:3fbd4e9aebf110473a420dea85a238b254cf8a15acb04b22a5a6b5ce8925b760 \ - --hash=sha256:3fd2164d73812026ce970d44c3ebd51e019d2a26a4425a5dcbdfa93a34abc383 \ - --hash=sha256:40f65470919dc189c833e86b2c4bd21bd355f98436a2cef9e0a9a92aebc8e57e \ - --hash=sha256:4448dad428f28a6a767c3e3b80cde3446a22a0efbddaa2360f4bb4dc836d0688 \ - --hash=sha256:44a91e0ab77bdc0004b43261a4b8cd6d6b451e8d443754cfda830002b5745b32 \ - --hash=sha256:453783477aa4f2d9104c4b59b08c871431647cb7af51b549bbf2d9eb9c827756 \ - --hash=sha256:4a097b7f7f7274164566ae90a221fd725363c0e9d243e2e9ed43d195ccc5495c \ - --hash=sha256:4aa195e5804d32c682e453b34474f411ca108e4291c6a0f824ebdc30a91c973c \ - --hash=sha256:4ae4b88c6617e1b9e5038ab3fccd7bac0842fdda2b703117b2aa99bc85379113 \ - --hash=sha256:521807963971a23996ddaf764c682b3e46459b3c58ccd79fefbe16718db43154 \ - --hash=sha256:534dc9df211387547267ccdb42253aa30527482acb38dd9b21c5c115d66a96d2 \ - --hash=sha256:539eb77eb043afcc45314d1be09ea6d6cafb3addc73e0547c171c6d636957f60 \ - --hash=sha256:55d827b2ae95425d3be9bc9a5838b6c29d664924f98146557f7715e331d06df8 \ - --hash=sha256:56838e1cd9174dc23c5691ee29f1d1be9eab357f27efef6bded1328b23e1ced2 \ - --hash=sha256:5a572911cd053137bbff8e3a52d31c5d2dba51d3a67ad902629c70185f3f2181 \ - --hash=sha256:5c9546cfdd5d45e562cc0444b6dddc191e625c62e866bf567a2c69487c7ad28a \ - --hash=sha256:5cc58aac218826d054c7da7f95821eba94125d88be673ff44267bb89d12a5866 \ - --hash=sha256:6410e66f02803600edb0b1889541f4b5cc298a5ccda0ad789cc50ef23b54813e \ - --hash=sha256:66786c3fb1d8de416a7fa8e1cb1ec6ba0a745b2b0eee42f9b7daa26f1a495545 \ - --hash=sha256:6e97846e9800a5d0fe7be4d008f0c93d0feeb2700da7b1f7528dabafb31dfadb \ - --hash=sha256:7033c1010b1f57bb44d8067e8c25aa6fa2e944dbf46ccc8c92b25043839c3fd2 \ - --hash=sha256:715b67eac317bf1c7657508170a3e011a1ea6ccb1c9d5f296e20ba14196be6b3 \ - --hash=sha256:72fdfd5ff8992e4636621826371e3ac5f3e3b8323e9d0e48378e9c13c3dac9d0 \ - --hash=sha256:76054d540061eda273274f3d13a21a4abdde90e13eaefdc205db37c05230efce \ - --hash=sha256:76fe96632d53f3bf0ea31ede2f53bbe3540cc2736d4aec3b3801b0458499ef3a \ - --hash=sha256:7971bdb7bf4ee0f7e6f67fa4c7fbc6019d9850cc977d126904392d363f6f8318 \ - --hash=sha256:799156ef1f3529ed82c36eb012b5d7a4cf4b6ef556dd7cc192148991d07206ae \ - --hash=sha256:7cdc0490374e31cedefefaa1520d5fe38e82fde8748cbc926e7284574c714d6b \ - --hash=sha256:7d9128ec9d8cecda6f044001fde4fb71ea7c24325336612ef8179091eb9596b9 \ - --hash=sha256:7f437026dbbc3f08c99cc41a5b2570c6e1a1ddbe48ab19a9b814254128d4ea7a \ - --hash=sha256:80fdf53d36e6c72819993e35d1ebeeb8e8fc688d0c6c2b391b55e335b3afba5a \ - --hash=sha256:8238d1d310283e87376c12f658b61e1ee23a14c0e54c7c0ce953efdbdc72deed \ - --hash=sha256:89ca2e673ddd5bde9b386da9a0aac0cab0e76f40c8f0aaf0d6311b6bbf2aa311 \ - --hash=sha256:8ae33ad9ce580c7a47452c3b3f7d8a9095ef6208e0a0c7e4e2384f9fc5bf8212 \ - --hash=sha256:8c5a8ecaa44ce2d8d9d20a68a2483a74c07f05d72e94a4dff88906c8807e77b0 \ - --hash=sha256:8e5bb73ffc029820f4348e9b66b3027493ae00bca6629129cd433fd7a76308ee \ - --hash=sha256:90f30d15f45048448b8da21c41703b31c61119c06c216a1bf8c245812a0f0c17 \ - --hash=sha256:923248a56dd8d158389a28934f6f69ebf89f218ef96a6b216a9be6861804d3f4 \ - --hash=sha256:9459a33f077130dbb2c7c3cea72ee9932271fb3126404ba2a2661e4fe9eb7b79 \ - --hash=sha256:97c817863ffc397f1e6a6e9d2d89fe5408c0a9922dac0329672fb0f35c867ea5 \ - --hash=sha256:9b9c764a11fd637e0322a488560533112837f5334ffeb48b1be20f6d98a7b437 \ - --hash=sha256:9ba8028597e824854f0f1733d8b964e914ae3003b22a10c2c664cb6927e0feb9 \ - --hash=sha256:9efe71687d6427737a0a2de9ca1c0a216510e6cd08925c44162be23ed7bed2d5 \ - --hash=sha256:9f84c549746a5be3bc7415830747a3a0312573afc9f95785eb35228bb17742ec \ - --hash=sha256:a0891cfd8db43e085c0ab93ab7e9b0c8fee84780d436d3b266b113e51e79f954 \ - --hash=sha256:a110e14508fd26fd2e472bb541f37c209409876ba601cf57e739e87d8a53cf95 \ - --hash=sha256:a5d9da3ff5af1ca1249b1adb8ef0573b94c76e6ae880ba1852f033bf429d4588 \ - --hash=sha256:a738f2da2f565989401bd6fd0b15990a4d1523c6d7fe83f300b7e7d17212feca \ - --hash=sha256:acd82a9e39082dc5f4492d15a6b6c8599aa21db5c35aaf7d6889aea16502c07d \ - --hash=sha256:ad7bd570be92695d89285a4b373006930715b78d96449f686af422debb4d3949 \ - --hash=sha256:b016eddf00dca7944721bf0cd85b6af7f6c4efaf83ee0b37c4133bd39757a8c7 \ - --hash=sha256:b1581fcde18fcdf42ea2403a16a6b646f8eb1e58d7f90a0ce693da441f76942e \ - --hash=sha256:b58f5c77f1af888b5fd1876c9a0d9858f6f88a39c9dd7c073a88e57e577da66d \ - --hash=sha256:b5f6134faf54b3cb83375db0f113506f8b7770785be1f95a631e7e2892101977 \ - --hash=sha256:b9cf2359a4fca87cfb6801fae83a76aedf66ee1254a7a151f1341632acf67f1b \ - --hash=sha256:ba5e1aeaf8dd6d8f6caba1f5539cddda87d511331714b7b5fc908b6cfc3636b7 \ - --hash=sha256:bb78b3a0d31ac1bde132c67015a809948db751cb4e92cdb3f0b242e430b6ed0d \ - --hash=sha256:bdb67151ea81fcf02d8f494703fb728d4d34d24556cbff5f417d74f6f5792e7c \ - --hash=sha256:c07d107b7316088f1ac0177a7661ca0c6670d443f6fe72e836069025e6266761 \ - --hash=sha256:c4695dd224212f6105db7ea62197144230b808d6b2bba52238906a2762f1d1e7 \ - --hash=sha256:c5523b0009e7c3c1263471b69d8da1c7d41b3ecb4cb62ef72be206b92040a950 \ - --hash=sha256:c661132ab2fb4eeede2ef69670fd60da5235209874d001a98f1542f31f2a8a94 \ - --hash=sha256:d37812c3da8e06f2bb35b3cf10e4a7b68e776a706c13058997238762b4e07f4f \ - --hash=sha256:d456e64724a075441e4ed648d7f154dc62e9aabff29bcdf723d0c00e9e1d352f \ - --hash=sha256:d472cf73efe5726a067dce63eebe8215b14beabea7c12606fd9994267b3cfe2b \ - --hash=sha256:d583d4403bcbf10cffc3ab5cee23d7643fcc960dff85973fd3c2d6c86e8dbb0c \ - --hash=sha256:de73e40ebc04dd5d9556f50180395322193a78ec247e637e741c1b954810f295 \ - --hash=sha256:def48ff59f181130f1a2cb7c517d16328efac3ec03951cca40c1dc2049747e83 \ - --hash=sha256:e6596b93c010d386ae46c9fba9bfc9fc5965fa8228edeac51576299182c2e31c \ - --hash=sha256:e71136fd0612556b35c575dc2726ae04a1669e6a6c378f2240312cf5d1a2ab10 \ - --hash=sha256:e7fa2ccc312bbd91e43aa5e0869e46bc03278a3dddb8d58833150a18b0f0283a \ - --hash=sha256:ea7173df5d86f625f8dde6d5929629ad811ed8decda3b60ae603903839ac9ac0 \ - --hash=sha256:f3b1b87a237cb2dba4db18bcfaaa44ba4cd5936b91121b62292ff21df577fc43 \ - --hash=sha256:f475f103488312e9bd4000bc890a95955a07b2d0b6e8884aef4be56132adbbf1 \ - --hash=sha256:f49196aec7c4b406495f60e6f947ad71f317a765f956d74bbd83996b9edc0352 \ - --hash=sha256:f49d41559cebd608042fdcf54ba597a4a7555b49ad5c1c0c03e0af82692661cd \ - --hash=sha256:f7728653900035fb7b8d06e1e5900545d8088efc9d5d4545782da7df03ec803f \ - --hash=sha256:f9f436aee28d13b9ad2c764fc273e0457e37c2e61529a07b928346b219fcde3b \ - --hash=sha256:fc31a07ed352e5462d3ee1b22e89285f4ce97d5266f6d1169da1142e78045626 \ - --hash=sha256:fc935f6b20b0c9f919a8ff024739174522abd331978f750a74bb68abd117bd19 \ - --hash=sha256:fcae1770b401167f8b9e1e3f566562e6966ffa9ce63639916248a9e25fa8a244 \ - --hash=sha256:fd7951c964069039acc9d67a8ff1f0a7f34845ae180ca542b17dc1456b1f1808 \ - --hash=sha256:fe55fe686908f50154d1dc599232016e50c243b438c3b7432f24e2895b0e5359 - # via - # jsonschema - # referencing ruamel-yaml==0.18.16 \ --hash=sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba \ --hash=sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a @@ -1437,9 +1316,9 @@ sphinx-design==0.6.1 \ --hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \ --hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632 # via -r src/requirements.in -sphinx-needs[plotting]==5.1.0 \ - --hash=sha256:23a0ca1dfe733a0a58e884b59ce53a8b63a530f0ac87ae5ab0d40f05f853fbe7 \ - --hash=sha256:7adf3763478e91171146918d8af4a22aa0fc062a73856f1ebeb6822a62cbe215 +sphinx-needs[plotting]==6.3.0 \ + --hash=sha256:761901765844c69f6181580065b099b31016895a86962a25e7860a9f5bea54a2 \ + --hash=sha256:a8a1cccc1525b94551e7a2f9525bf36eaae88654abceb5047b5470d57472b346 # via # -r src/requirements.in # needs-config-writer @@ -1539,7 +1418,7 @@ typing-extensions==4.15.0 \ # cattrs # pydata-sphinx-theme # pygithub - # referencing + # sphinx-needs # starlette urllib3==2.5.0 \ --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ From fe212a0e24a864edea530e068d21e8e8c1998f89 Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Thu, 29 Jan 2026 14:44:56 +0100 Subject: [PATCH 191/231] Source links as Bazel target (#358) * Have scan_code attribute instead of sourcelinks_json rule * Adapt to Sphinx-Needs 6 --------- Signed-off-by: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> --- BUILD | 5 +- MODULE.bazel | 2 +- docs.bzl | 95 +++++++++++++++-- docs/concepts/docs_deps.rst | 41 +++++++ docs/concepts/index.rst | 1 + docs/how-to/source_to_doc_links.rst | 46 +++++++- scripts/README.md | 3 + scripts_bazel/BUILD | 38 +++++++ scripts_bazel/README.md | 3 + scripts_bazel/generate_sourcelinks_cli.py | 72 +++++++++++++ scripts_bazel/merge_sourcelinks.py | 62 +++++++++++ scripts_bazel/tests/BUILD | 32 ++++++ .../tests/generate_sourcelinks_cli_test.py | 74 +++++++++++++ scripts_bazel/tests/merge_sourcelinks_test.py | 100 ++++++++++++++++++ src/BUILD | 28 +++-- src/extensions/score_draw_uml_funcs/BUILD | 10 +- src/extensions/score_header_service/BUILD | 12 ++- src/extensions/score_layout/BUILD | 22 ++-- src/extensions/score_metamodel/BUILD | 40 +++++-- src/extensions/score_source_code_linker/BUILD | 51 +++++---- .../score_source_code_linker/__init__.py | 24 ++++- .../score_source_code_linker/needlinks.py | 7 ++ .../tests/test_codelink.py | 56 +++++++++- src/extensions/score_sphinx_bundle/BUILD | 8 +- src/extensions/score_sync_toml/BUILD | 15 ++- src/find_runfiles/BUILD | 8 +- src/helper_lib/BUILD | 11 +- 27 files changed, 783 insertions(+), 83 deletions(-) create mode 100644 docs/concepts/docs_deps.rst create mode 100644 scripts/README.md create mode 100644 scripts_bazel/BUILD create mode 100644 scripts_bazel/README.md create mode 100644 scripts_bazel/generate_sourcelinks_cli.py create mode 100644 scripts_bazel/merge_sourcelinks.py create mode 100644 scripts_bazel/tests/BUILD create mode 100644 scripts_bazel/tests/generate_sourcelinks_cli_test.py create mode 100644 scripts_bazel/tests/merge_sourcelinks_test.py diff --git a/BUILD b/BUILD index 21a92e42..ff6103ec 100644 --- a/BUILD +++ b/BUILD @@ -11,7 +11,6 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -load("@aspect_rules_py//py:defs.bzl", "py_library") load("@score_tooling//:defs.bzl", "cli_helper", "copyright_checker") load("//:docs.bzl", "docs") @@ -33,6 +32,10 @@ docs( data = [ "@score_process//:needs_json", ], + scan_code = [ + "//scripts_bazel:sources", + "//src:all_sources", + ], source_dir = "docs", ) diff --git a/MODULE.bazel b/MODULE.bazel index c7f9c56a..33c76fc9 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -104,7 +104,7 @@ bazel_dep(name = "score_process", version = "1.4.2") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "1.0.2") +bazel_dep(name = "score_tooling", version = "1.0.5") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") diff --git a/docs.bzl b/docs.bzl index 00f1c676..53073af4 100644 --- a/docs.bzl +++ b/docs.bzl @@ -11,6 +11,10 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +""" +Easy streamlined way for S-CORE docs-as-code. +""" + # Multiple approaches are available to build the same documentation output: # # 1. **Esbonio via IDE support (`ide_support` target)**: @@ -37,12 +41,10 @@ # # For user-facing documentation, refer to `/README.md`. -load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements", "requirement") +load("@aspect_rules_py//py:defs.bzl", "py_binary") +load("@pip_process//:requirements.bzl", "all_requirements") load("@rules_pkg//pkg:mappings.bzl", "pkg_files", "strip_prefix") -load("@rules_pkg//pkg:tar.bzl", "pkg_tar") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") -load("@rules_python//sphinxdocs:sphinx_docs_library.bzl", "sphinx_docs_library") load("@score_tooling//:defs.bzl", "score_virtualenv") def _rewrite_needs_json_to_docs_sources(labels): @@ -56,10 +58,47 @@ def _rewrite_needs_json_to_docs_sources(labels): out.append(s) return out -def docs(source_dir = "docs", data = [], deps = []): +def _rewrite_needs_json_to_sourcelinks(labels): + """Replace '@repo//:needs_json' -> '@repo//:sourcelinks_json' for every item.""" + out = [] + for x in labels: + s = str(x) + if s.endswith("//:needs_json"): + out.append(s.replace("//:needs_json", "//:sourcelinks_json")) + else: + out.append(s) + return out + +def _merge_sourcelinks(name, sourcelinks): + """Merge multiple sourcelinks JSON files into a single file. + + Args: + name: Name for the merged sourcelinks target + sourcelinks: List of sourcelinks JSON file targets """ - Creates all targets related to documentation. + + native.genrule( + name = name, + srcs = sourcelinks, + outs = [name + ".json"], + cmd = """ + $(location @score_docs_as_code//scripts_bazel:merge_sourcelinks) \ + --output $@ \ + $(SRCS) + """, + tools = ["@score_docs_as_code//scripts_bazel:merge_sourcelinks"], + ) + +def docs(source_dir = "docs", data = [], deps = [], scan_code = []): + """Creates all targets related to documentation. + By using this function, you'll get any and all updates for documentation targets in one place. + + Args: + source_dir: The source directory containing documentation files. Defaults to "docs". + data: Additional data files to include in the documentation build. + deps: Additional dependencies for the documentation build. + scan_code: List of code targets to scan for source code links. """ call_path = native.package_name() @@ -100,18 +139,23 @@ def docs(source_dir = "docs", data = [], deps = []): visibility = ["//visibility:public"], ) + _sourcelinks_json(name = "sourcelinks_json", srcs = scan_code) + data_with_docs_sources = _rewrite_needs_json_to_docs_sources(data) + additional_combo_sourcelinks = _rewrite_needs_json_to_sourcelinks(data) + _merge_sourcelinks(name = "merged_sourcelinks", sourcelinks = [":sourcelinks_json"] + additional_combo_sourcelinks) py_binary( name = "docs", tags = ["cli_help=Build documentation:\nbazel run //:docs"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data, + data = data + [":sourcelinks_json"], deps = deps, env = { "SOURCE_DIRECTORY": source_dir, "DATA": str(data), "ACTION": "incremental", + "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", }, ) @@ -119,12 +163,13 @@ def docs(source_dir = "docs", data = [], deps = []): name = "docs_combo_experimental", tags = ["cli_help=Build full documentation with all dependencies:\nbazel run //:docs_combo_experimental"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data_with_docs_sources, + data = data_with_docs_sources + [":merged_sourcelinks"], deps = deps, env = { "SOURCE_DIRECTORY": source_dir, "DATA": str(data_with_docs_sources), "ACTION": "incremental", + "SCORE_SOURCELINKS": "$(location :merged_sourcelinks)", }, ) @@ -132,12 +177,13 @@ def docs(source_dir = "docs", data = [], deps = []): name = "docs_check", tags = ["cli_help=Verify documentation:\nbazel run //:docs_check"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data, + data = data + [":sourcelinks_json"], deps = deps, env = { "SOURCE_DIRECTORY": source_dir, "DATA": str(data), "ACTION": "check", + "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", }, ) @@ -145,12 +191,13 @@ def docs(source_dir = "docs", data = [], deps = []): name = "live_preview", tags = ["cli_help=Live preview documentation in the browser:\nbazel run //:live_preview"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data, + data = data + [":sourcelinks_json"], deps = deps, env = { "SOURCE_DIRECTORY": source_dir, "DATA": str(data), "ACTION": "live_preview", + "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", }, ) @@ -158,12 +205,13 @@ def docs(source_dir = "docs", data = [], deps = []): name = "live_preview_combo_experimental", tags = ["cli_help=Live preview full documentation with all dependencies in the browser:\nbazel run //:live_preview_combo_experimental"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data_with_docs_sources, + data = data_with_docs_sources + [":merged_sourcelinks"], deps = deps, env = { "SOURCE_DIRECTORY": source_dir, "DATA": str(data_with_docs_sources), "ACTION": "live_preview", + "SCORE_SOURCELINKS": "$(location :merged_sourcelinks)", }, ) @@ -193,3 +241,28 @@ def docs(source_dir = "docs", data = [], deps = []): tools = data, visibility = ["//visibility:public"], ) + +def _sourcelinks_json(name, srcs): + """ + Creates a target that generates a JSON file with source code links. + + See https://eclipse-score.github.io/docs-as-code/main/how-to/source_to_doc_links.html + + Args: + name: Name of the target + srcs: Source files to scan for traceability tags + """ + output_file = name + ".json" + + native.genrule( + name = name, + srcs = srcs, + outs = [output_file], + cmd = """ + $(location @score_docs_as_code//scripts_bazel:generate_sourcelinks) \ + --output $@ \ + $(SRCS) + """, + tools = ["@score_docs_as_code//scripts_bazel:generate_sourcelinks"], + visibility = ["//visibility:public"], + ) diff --git a/docs/concepts/docs_deps.rst b/docs/concepts/docs_deps.rst new file mode 100644 index 00000000..12aca5b4 --- /dev/null +++ b/docs/concepts/docs_deps.rst @@ -0,0 +1,41 @@ + +.. _docs_dependencies: + +========================== +Docs Dependencies +========================== + +When running ``bazel run :docs``, the documentation build system orchestrates multiple interconnected dependencies to produce HTML documentation. + +1. Gather inputs (Bazel may do this parallelized): + + * Extract source code links from files via ``sourcelinks_json`` rule. + + * Optionally, merge source links using the ``merge_sourcelinks`` rule. + + * Needs (requirements) are gathered from various ``needs_json`` targets specified in the ``data`` attribute. + +2. Documentation sources are read from the specified source directory (default: ``docs/``). + Sphinx processes the documentation sources along with the merged data to generate the final HTML output. + +.. plantuml:: + + @startuml + left to right direction + + collections "Documentation Sources" as DocsSource + collections "Needs JSON Targets" as NeedsTargets + collections "Source Code Links" as SourceLinks + artifact "Merge Data" as Merge + process "Sphinx Processing" as Sphinx + artifact "HTML Output" as HTMLOutput + collections "S-CORE extensions" as SCoreExt + + DocsSource --> Sphinx + NeedsTargets --> Sphinx + SCoreExt --> Sphinx + SourceLinks --> Merge + Merge --> Sphinx + Sphinx --> HTMLOutput + + @enduml diff --git a/docs/concepts/index.rst b/docs/concepts/index.rst index 6357bdf5..9351e15f 100644 --- a/docs/concepts/index.rst +++ b/docs/concepts/index.rst @@ -9,3 +9,4 @@ Here you find explanations how and why docs-as-code works the way it does. :maxdepth: 1 bidirectional_traceability + docs_deps diff --git a/docs/how-to/source_to_doc_links.rst b/docs/how-to/source_to_doc_links.rst index f36866a3..13b722be 100644 --- a/docs/how-to/source_to_doc_links.rst +++ b/docs/how-to/source_to_doc_links.rst @@ -2,14 +2,50 @@ Reference Docs in Source Code ============================= In your C++/Rust/Python source code, you want to reference requirements (needs). -The docs-as-code tool will create backlinks in the documentation. +The docs-as-code tool will create backlinks in the documentation in two steps: + +1. You add a special comment in your source code that references the need ID. +2. Scan for those comments and provide needs links to your documentation. + +For an example result, look at the attribute ``source_code_link`` +of :need:`tool_req__docs_common_attr_title`. + +Comments in Source Code +----------------------- Use a comment and start with ``req-Id:`` or ``req-traceability:`` followed by the need ID. .. code-block:: python - # req-Id: TOOL_REQ__EXAMPLE_ID - # req-traceability: TOOL_REQ__EXAMPLE_ID + # req-Id: TOOL_REQ__EXAMPLE_ID + # req-traceability: TOOL_REQ__EXAMPLE_ID -For an example, look at the attribute ``source_code_link`` -of :need:`tool_req__docs_common_attr_title`. +For other languages (C++, Rust, etc.), use the appropriate comment syntax. + +Scanning Source Code for Links +------------------------------ + +In you ``BUILD`` files, you specify which source files to scan +with ``filegroup`` or ``glob`` or whatever Bazel mechanism you prefer. +Finally, pass the scan results to the ``docs`` rule as ``scan_code`` attribute. + +.. code-block:: starlark + :emphasize-lines: 15 + :linenos: + + filegroup( + name = "some_sources", + srcs = [ + "foo.py", + "bar.cpp", + "data.yaml", + ] + glob(["subdir/**/.py"]), + ) + + docs( + data = [ + "@score_process//:needs_json", + ], + source_dir = "docs", + scan_code = [":some_sources"], + ) diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 00000000..e28c9eef --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,3 @@ +# Scripts + +The scripts directory is only for local development (linters) so far. diff --git a/scripts_bazel/BUILD b/scripts_bazel/BUILD new file mode 100644 index 00000000..f332a4af --- /dev/null +++ b/scripts_bazel/BUILD @@ -0,0 +1,38 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@aspect_rules_py//py:defs.bzl", "py_binary") +load("@pip_process//:requirements.bzl", "all_requirements") + +filegroup( + name = "sources", + srcs = glob(["**/*.py"]), + visibility = ["//visibility:public"], +) + +py_binary( + name = "generate_sourcelinks", + srcs = ["generate_sourcelinks_cli.py"], + main = "generate_sourcelinks_cli.py", + visibility = ["//visibility:public"], + deps = [ + "//src/extensions/score_source_code_linker", + ] + all_requirements, +) + +py_binary( + name = "merge_sourcelinks", + srcs = ["merge_sourcelinks.py"], + main = "merge_sourcelinks.py", + visibility = ["//visibility:public"], +) diff --git a/scripts_bazel/README.md b/scripts_bazel/README.md new file mode 100644 index 00000000..7728a173 --- /dev/null +++ b/scripts_bazel/README.md @@ -0,0 +1,3 @@ +# Scripts Bazel + +This folder contains executables to be used within Bazel rules. diff --git a/scripts_bazel/generate_sourcelinks_cli.py b/scripts_bazel/generate_sourcelinks_cli.py new file mode 100644 index 00000000..4291b97c --- /dev/null +++ b/scripts_bazel/generate_sourcelinks_cli.py @@ -0,0 +1,72 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +""" +CLI tool to generate source code links JSON from source files. +This is used by the Bazel sourcelinks_json rule to create a JSON file +with all source code links for documentation needs. +""" + +import argparse +import logging +import sys +from pathlib import Path + +from src.extensions.score_source_code_linker.generate_source_code_links_json import ( + _extract_references_from_file, # pyright: ignore[reportPrivateUsage] TODO: move it out of the extension and into this script +) +from src.extensions.score_source_code_linker.needlinks import ( + store_source_code_links_json, +) + +logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) + + +def main(): + parser = argparse.ArgumentParser( + description="Generate source code links JSON from source files" + ) + parser.add_argument( + "--output", + required=True, + type=Path, + help="Output JSON file path", + ) + parser.add_argument( + "files", + nargs="*", + type=Path, + help="Source files to scan for traceability tags", + ) + + args = parser.parse_args() + + all_need_references = [] + for file_path in args.files: + abs_file_path = file_path.resolve() + assert abs_file_path.exists(), abs_file_path + references = _extract_references_from_file( + abs_file_path.parent, Path(abs_file_path.name) + ) + all_need_references.extend(references) + + store_source_code_links_json(args.output, all_need_references) + logger.info( + f"Found {len(all_need_references)} need references in {len(args.files)} files" + ) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts_bazel/merge_sourcelinks.py b/scripts_bazel/merge_sourcelinks.py new file mode 100644 index 00000000..f194e19c --- /dev/null +++ b/scripts_bazel/merge_sourcelinks.py @@ -0,0 +1,62 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +""" +Merge multiple sourcelinks JSON files into a single JSON file. +""" + +import argparse +import json +import logging +import sys +from pathlib import Path + +logging.basicConfig(level=logging.INFO, format="%(message)s") +logger = logging.getLogger(__name__) + + +def main(): + parser = argparse.ArgumentParser( + description="Merge multiple sourcelinks JSON files into one" + ) + parser.add_argument( + "--output", + required=True, + type=Path, + help="Output merged JSON file path", + ) + parser.add_argument( + "files", + nargs="*", + type=Path, + help="Input JSON files to merge", + ) + + args = parser.parse_args() + + merged = [] + for json_file in args.files: + with open(json_file) as f: + data = json.load(f) + assert isinstance(data, list), repr(data) + merged.extend(data) + + with open(args.output, "w") as f: + json.dump(merged, f, indent=2, ensure_ascii=False) + + logger.info(f"Merged {len(args.files)} files into {len(merged)} total references") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts_bazel/tests/BUILD b/scripts_bazel/tests/BUILD new file mode 100644 index 00000000..2290a6a2 --- /dev/null +++ b/scripts_bazel/tests/BUILD @@ -0,0 +1,32 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@pip_process//:requirements.bzl", "all_requirements") +load("@score_tooling//:defs.bzl", "score_py_pytest") + +score_py_pytest( + name = "generate_sourcelinks_cli_test", + srcs = ["generate_sourcelinks_cli_test.py"], + deps = [ + "//scripts_bazel:generate_sourcelinks", + "//src/extensions/score_source_code_linker", + ] + all_requirements, +) + +score_py_pytest( + name = "merge_sourcelinks_test", + srcs = ["merge_sourcelinks_test.py"], + deps = [ + "//scripts_bazel:merge_sourcelinks", + ] + all_requirements, +) diff --git a/scripts_bazel/tests/generate_sourcelinks_cli_test.py b/scripts_bazel/tests/generate_sourcelinks_cli_test.py new file mode 100644 index 00000000..f25acc5a --- /dev/null +++ b/scripts_bazel/tests/generate_sourcelinks_cli_test.py @@ -0,0 +1,74 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Tests for generate_sourcelinks_cli.py""" + +import json +import subprocess +import sys +from pathlib import Path + +_MY_PATH = Path(__file__).parent + + +def test_generate_sourcelinks_cli_basic(tmp_path: Path) -> None: + """Test basic functionality of generate_sourcelinks_cli.""" + # Create a test source file with a traceability tag + test_file = tmp_path / "test_source.py" + test_file.write_text( + """ +# Some code here +# req-Id: tool_req__docs_arch_types +def some_function(): + pass +""" + ) + + output_file = tmp_path / "output.json" + + # Execute the script + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "generate_sourcelinks_cli.py", + "--output", + str(output_file), + str(test_file), + ], + ) + + assert result.returncode == 0 + assert output_file.exists() + + # Check the output content + with open(output_file) as f: + data: list[dict[str, str | int]] = json.load(f) + assert isinstance(data, list) + assert len(data) > 0 + + # Verify schema of each entry + for entry in data: + assert "file" in entry + assert "line" in entry + assert "tag" in entry + assert "need" in entry + assert "full_line" in entry + + # Verify types + assert isinstance(entry["file"], str) + assert isinstance(entry["line"], int) + assert isinstance(entry["tag"], str) + assert isinstance(entry["need"], str) + assert isinstance(entry["full_line"], str) + + assert any(entry["need"] == "tool_req__docs_arch_types" for entry in data) diff --git a/scripts_bazel/tests/merge_sourcelinks_test.py b/scripts_bazel/tests/merge_sourcelinks_test.py new file mode 100644 index 00000000..9f92cfd6 --- /dev/null +++ b/scripts_bazel/tests/merge_sourcelinks_test.py @@ -0,0 +1,100 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Tests for merge_sourcelinks.py""" + +import json +import subprocess +import sys +from pathlib import Path + +_MY_PATH = Path(__file__).parent + + +def test_merge_sourcelinks_basic(tmp_path: Path) -> None: + """Test basic merge functionality.""" + # Create test JSON files with correct schema + file1 = tmp_path / "links1.json" + file1.write_text( + json.dumps( + [ + { + "file": "test1.py", + "line": 10, + "tag": "# req-Id:", + "need": "tool_req__docs_arch_types", + "full_line": "# req-Id: tool_req__docs_arch_types", + } + ] + ) + ) + + file2 = tmp_path / "links2.json" + file2.write_text( + json.dumps( + [ + { + "file": "test2.py", + "line": 20, + "tag": "# req-Id:", + "need": "gd_req__req_validity", + "full_line": "# req-Id: gd_req__req_validity", + } + ] + ) + ) + + output_file = tmp_path / "merged.json" + + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "merge_sourcelinks.py", + "--output", + str(output_file), + str(file1), + str(file2), + ], + ) + + assert result.returncode == 0 + assert output_file.exists() + + with open(output_file) as f: + data: list[dict[str, str | int]] = json.load(f) + assert isinstance(data, list) + assert len(data) == 2 + + # Verify schema of merged entries + for entry in data: + assert "file" in entry + assert "line" in entry + assert "tag" in entry + assert "need" in entry + assert "full_line" in entry + + assert isinstance(entry["file"], str) + assert isinstance(entry["line"], int) + assert isinstance(entry["tag"], str) + assert isinstance(entry["need"], str) + assert isinstance(entry["full_line"], str) + + # Verify specific entries + assert any( + entry["need"] == "tool_req__docs_arch_types" and entry["file"] == "test1.py" + for entry in data + ) + assert any( + entry["need"] == "gd_req__req_validity" and entry["file"] == "test2.py" + for entry in data + ) diff --git a/src/BUILD b/src/BUILD index ddf506de..f45f14fd 100644 --- a/src/BUILD +++ b/src/BUILD @@ -10,14 +10,11 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* + load("@aspect_rules_lint//format:defs.bzl", "format_multirun", "format_test") -load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements", "requirement") -load("@rules_pkg//pkg:mappings.bzl", "pkg_files") -load("@rules_pkg//pkg:tar.bzl", "pkg_tar") +load("@aspect_rules_py//py:defs.bzl", "py_library") load("@rules_python//python:pip.bzl", "compile_pip_requirements") -load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary") -load("@score_tooling//:defs.bzl", "dash_license_checker", "score_virtualenv") +load("@score_tooling//:defs.bzl", "dash_license_checker") # These are only exported because they're passed as files to the //docs.bzl # macros, and thus must be visible to other packages. They should only be @@ -28,6 +25,25 @@ exports_files( "requirements.txt", "incremental.py", "dummy.py", + "generate_sourcelinks_cli.py", + ], + visibility = ["//visibility:public"], +) + +filegroup( + name = "all_sources", + srcs = glob( + ["*.py"], + ) + [ + "//src/extensions/score_draw_uml_funcs:all_sources", + "//src/extensions/score_header_service:all_sources", + "//src/extensions/score_layout:all_sources", + "//src/extensions/score_metamodel:all_sources", + "//src/extensions/score_source_code_linker:all_sources", + "//src/extensions/score_sphinx_bundle:all_sources", + "//src/extensions/score_sync_toml:all_sources", + "//src/find_runfiles:all_sources", + "//src/helper_lib:all_sources", ], visibility = ["//visibility:public"], ) diff --git a/src/extensions/score_draw_uml_funcs/BUILD b/src/extensions/score_draw_uml_funcs/BUILD index 21d8622e..b16000a6 100644 --- a/src/extensions/score_draw_uml_funcs/BUILD +++ b/src/extensions/score_draw_uml_funcs/BUILD @@ -13,11 +13,15 @@ load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") +filegroup( + name = "all_sources", + srcs = glob(["*.py"]), + visibility = ["//visibility:public"], +) + py_library( name = "score_draw_uml_funcs", - srcs = glob( - ["*.py"], - ), + srcs = [":all_sources"], imports = ["."], visibility = ["//visibility:public"], # TODO: Figure out if all requirements are needed or if we can break it down a bit diff --git a/src/extensions/score_header_service/BUILD b/src/extensions/score_header_service/BUILD index d7a7b65c..48185811 100644 --- a/src/extensions/score_header_service/BUILD +++ b/src/extensions/score_header_service/BUILD @@ -10,16 +10,20 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* + load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") +filegroup( + name = "all_sources", + srcs = glob(["*.py"]), + visibility = ["//visibility:public"], +) + py_library( name = "score_header_service", - srcs = glob( - ["*.py"], - exclude = ["test/**"], - ), + srcs = [":all_sources"], imports = ["."], visibility = ["//visibility:public"], # TODO: Figure out if all requirements are needed or if we can break it down a bit diff --git a/src/extensions/score_layout/BUILD b/src/extensions/score_layout/BUILD index d6d47274..8e21188d 100644 --- a/src/extensions/score_layout/BUILD +++ b/src/extensions/score_layout/BUILD @@ -13,17 +13,23 @@ load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "requirement") -py_library( - name = "score_layout", +filegroup( + name = "all_sources", srcs = glob([ "*.py", - # Adding assets as src instead of data ensures they are included in the - # library as they would normally be, and we do not need to go through bazel's - # RUNFILES_DIR mechanism to access them. This makes the code much simpler. - # And it makes the library far easier extractable from bazel into a normal - # python package if we ever want to do that. - "assets/**", + "assets/**/*", ]), + visibility = ["//visibility:public"], +) + +py_library( + name = "score_layout", + srcs = [":all_sources"], + # Adding assets as src instead of data ensures they are included in the + # library as they would normally be, and we do not need to go through bazel's + # RUNFILES_DIR mechanism to access them. This makes the code much simpler. + # And it makes the library far easier extractable from bazel into a normal + # python package if we ever want to do that. imports = ["."], visibility = ["//visibility:public"], deps = [requirement("sphinx")], diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index 40cb645f..30392ca0 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -10,20 +10,44 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* + load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") -py_library( - name = "score_metamodel", +filegroup( + name = "sources", + srcs = glob( + [ + "*.py", + "*.yaml", + "*.json", + "checks/*.py", + ], + ), +) + +filegroup( + name = "tests", srcs = glob( - ["**/*.py"], - ) + ["metamodel.yaml"], - data = glob(["*.yaml"]), # Needed to remove 'resolving of symlink' in score_metamodel.__init__ - imports = [ - ".", + ["tests/*.py"], + ), +) + +filegroup( + name = "all_sources", + srcs = [ + ":sources", + ":tests", ], visibility = ["//visibility:public"], +) + +py_library( + name = "score_metamodel", + srcs = [":sources"], + imports = ["."], + visibility = ["//visibility:public"], # TODO: Figure out if all requirements are needed or if we can break it down a bit deps = all_requirements + ["@score_docs_as_code//src/helper_lib"], ) @@ -38,6 +62,6 @@ score_py_pytest( "tests/**/*.rst", "tests/**/*.yaml", ], - ), + ) + ["tests/rst/conf.py"], deps = [":score_metamodel"], ) diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 7a662df3..758fc786 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -10,28 +10,39 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -#******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") + +load("@aspect_rules_py//py:defs.bzl", "py_library") load("@score_tooling//:defs.bzl", "score_py_pytest") -py_library( - name = "score_source_code_linker", +filegroup( + name = "sources", srcs = glob( - ["**/*.py"], - exclude = ["tests/*.py"], + ["*.py"], ), +) + +filegroup( + name = "tests", + srcs = glob( + [ + "tests/*.py", + "tests/*.json", + ], + ), +) + +filegroup( + name = "all_sources", + srcs = [ + ":sources", + ":tests", + ], + visibility = ["//visibility:public"], +) + +py_library( + name = "score_source_code_linker", + srcs = [":sources"], imports = ["."], visibility = ["//visibility:public"], deps = ["@score_docs_as_code//src/helper_lib"], @@ -58,10 +69,10 @@ score_py_pytest( "-s", "-vv", ], - data = glob(["**/*.json"]), + data = glob(["tests/*.json"]), imports = ["."], deps = [ ":score_source_code_linker", - "@score_docs_as_code//src/extensions/score_metamodel", + "//src/extensions/score_metamodel", ], ) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 90b1663e..094ebf4a 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -20,6 +20,7 @@ # req-Id: tool_req__docs_dd_link_source_code_link # This whole directory implements the above mentioned tool requirements +import os from collections import defaultdict from copy import deepcopy from pathlib import Path @@ -130,9 +131,18 @@ def build_and_save_combined_file(outdir: Path): Reads the saved partial caches of codelink & testlink Builds the combined JSON cache & saves it """ - source_code_links = load_source_code_links_json( - get_cache_filename(outdir, "score_source_code_linker_cache.json") - ) + source_code_links_json = os.environ.get("SCORE_SOURCELINKS") + if not source_code_links_json: + # Fallback to the obsolete way of doing source code links, + # just in case someone is not using the docs(sourcelinks=...) attribute. + # TODO: Remove this once backwards compatibility is not needed anymore. + source_code_links_json = get_cache_filename( + outdir, "score_source_code_linker_cache.json" + ) + else: + source_code_links_json = Path(source_code_links_json) + + source_code_links = load_source_code_links_json(source_code_links_json) test_code_links = load_test_xml_parsed_json( get_cache_filename(outdir, "score_xml_parser_cache.json") ) @@ -172,6 +182,12 @@ def setup_source_code_linker(app: Sphinx, ws_root: Path): }, } + score_sourcelinks_json = os.environ.get("SCORE_SOURCELINKS") + if score_sourcelinks_json: + # No need to generate the JSON file if this env var is set + # because it points to an existing file with the needed data. + return + scl_cache_json = get_cache_filename( app.outdir, "score_source_code_linker_cache.json" ) @@ -196,6 +212,7 @@ def register_test_code_linker(app: Sphinx): def setup_test_code_linker(app: Sphinx, env: BuildEnvironment): + # TODO instead of implementing our own caching here, we should rely on Bazel tl_cache_json = get_cache_filename(app.outdir, "score_xml_parser_cache.json") if ( not tl_cache_json.exists() @@ -245,6 +262,7 @@ def register_combined_linker(app: Sphinx): def setup_combined_linker(app: Sphinx, _: BuildEnvironment): grouped_cache = get_cache_filename(app.outdir, "score_scl_grouped_cache.json") gruped_cache_exists = grouped_cache.exists() + # TODO this cache should be done via Bazel if not gruped_cache_exists or not app.config.skip_rescanning_via_source_code_linker: LOGGER.debug( "Did not find combined json 'score_scl_grouped_cache.json' in _build." diff --git a/src/extensions/score_source_code_linker/needlinks.py b/src/extensions/score_source_code_linker/needlinks.py index c890b13e..34814729 100644 --- a/src/extensions/score_source_code_linker/needlinks.py +++ b/src/extensions/score_source_code_linker/needlinks.py @@ -13,6 +13,7 @@ # req-Id: tool_req__docs_dd_link_source_code_link import json +import os from dataclasses import asdict, dataclass from pathlib import Path from typing import Any @@ -80,6 +81,12 @@ def store_source_code_links_json(file: Path, needlist: list[NeedLink]): def load_source_code_links_json(file: Path) -> list[NeedLink]: + if not file.is_absolute(): + # use env variable set by Bazel + ws_root = os.environ.get("BUILD_WORKSPACE_DIRECTORY") + if ws_root: + file = Path(ws_root) / file + links: list[NeedLink] = json.loads( file.read_text(encoding="utf-8"), object_hook=needlink_decoder, diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index 9e360d1a..29ddc723 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -21,9 +21,12 @@ import pytest from attribute_plugin import add_test_properties # type: ignore[import-untyped] -from sphinx_needs.data import NeedsMutable - -from src.extensions.score_metamodel.tests import need as test_need +from sphinx_needs.data import NeedsInfoType, NeedsMutable +from sphinx_needs.need_item import ( + NeedItem, + NeedItemSourceUnknown, + NeedsContent, +) # Import the module under test # Note: You'll need to adjust these imports based on your actual module structure @@ -56,6 +59,53 @@ """ +def test_need(**kwargs: Any) -> NeedItem: + """Convenience function to create a NeedItem object with some defaults.""" + + kwargs.setdefault("id", "test_need") + kwargs.setdefault("type", "requirement") + kwargs.setdefault("title", "") + kwargs.setdefault("status", None) + kwargs.setdefault("tags", []) + kwargs.setdefault("collapse", False) + kwargs.setdefault("hide", False) + kwargs.setdefault("layout", None) + kwargs.setdefault("style", None) + kwargs.setdefault("external_css", "") + kwargs.setdefault("type_name", "") + kwargs.setdefault("type_prefix", "") + kwargs.setdefault("type_color", "") + kwargs.setdefault("type_style", "") + kwargs.setdefault("constraints", []) + kwargs.setdefault("arch", {}) + kwargs.setdefault("sections", ()) + kwargs.setdefault("signature", None) + kwargs.setdefault("has_dead_links", False) + kwargs.setdefault("has_forbidden_dead_links", False) + + # Create source + source = NeedItemSourceUnknown( + docname=kwargs.get("docname", "docname"), + lineno=kwargs.get("lineno", 42), + lineno_content=kwargs.get("lineno_content"), + ) + + # Create content + content = NeedsContent( + doctype=kwargs.get("doctype", ".rst"), + content=kwargs.get("content", ""), + pre_content=kwargs.get("pre_content"), + post_content=kwargs.get("post_content"), + ) + return NeedItem( + source=source, + content=content, + core=NeedsInfoType(**kwargs), + extras={}, + links={}, + ) + + def encode_comment(s: str) -> str: return s.replace(" ", "-----", 1) diff --git a/src/extensions/score_sphinx_bundle/BUILD b/src/extensions/score_sphinx_bundle/BUILD index 10aa50a3..26bb0289 100644 --- a/src/extensions/score_sphinx_bundle/BUILD +++ b/src/extensions/score_sphinx_bundle/BUILD @@ -13,9 +13,15 @@ load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") +filegroup( + name = "all_sources", + srcs = ["__init__.py"], + visibility = ["//visibility:public"], +) + py_library( name = "score_sphinx_bundle", - srcs = ["__init__.py"], + srcs = [":all_sources"], visibility = ["//visibility:public"], deps = all_requirements + [ "@score_docs_as_code//src/extensions:score_plantuml", diff --git a/src/extensions/score_sync_toml/BUILD b/src/extensions/score_sync_toml/BUILD index e9be6926..fdb8acb3 100644 --- a/src/extensions/score_sync_toml/BUILD +++ b/src/extensions/score_sync_toml/BUILD @@ -10,15 +10,22 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* + load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "requirement") +filegroup( + name = "all_sources", + srcs = glob([ + "*.py", + "*.toml", + ]), + visibility = ["//visibility:public"], +) + py_library( name = "score_sync_toml", - srcs = [ - "__init__.py", - "shared.toml", - ], + srcs = [":all_sources"], imports = ["."], visibility = ["//visibility:public"], deps = [ diff --git a/src/find_runfiles/BUILD b/src/find_runfiles/BUILD index a286c57f..2112815c 100644 --- a/src/find_runfiles/BUILD +++ b/src/find_runfiles/BUILD @@ -14,9 +14,15 @@ load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") +filegroup( + name = "all_sources", + srcs = glob(["*.py"]), + visibility = ["//visibility:public"], +) + py_library( name = "find_runfiles", - srcs = ["__init__.py"], + srcs = [":all_sources"], imports = ["."], visibility = ["//visibility:public"], ) diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD index 61d94175..c2c3161f 100644 --- a/src/helper_lib/BUILD +++ b/src/helper_lib/BUILD @@ -14,12 +14,15 @@ load("@aspect_rules_py//py:defs.bzl", "py_library") load("@pip_process//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") +filegroup( + name = "all_sources", + srcs = glob(["*.py"]), + visibility = ["//visibility:public"], +) + py_library( name = "helper_lib", - srcs = [ - "__init__.py", - "additional_functions.py", - ], + srcs = [":all_sources"], imports = ["."], visibility = ["//visibility:public"], deps = ["@score_docs_as_code//src/extensions/score_source_code_linker:source_code_linker_helpers"], From 072693fb121a8fba5e92b6a66d8f3561f526ba25 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:01:23 +0100 Subject: [PATCH 192/231] bugfix: linking of plat_saf_dfa (#374) --- src/extensions/score_metamodel/metamodel.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 451f25d8..422c4258 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -435,9 +435,9 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: includes: ^logic_arc_int(_op)*__.+$ - fulfils: feat_req + fulfils: feat_req # TODO: make it mandatory optional_links: - belongs_to: feat # for evaluation + belongs_to: feat # make it mandatory for evaluation tags: - architecture_element - architecture_view @@ -457,9 +457,9 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: - fulfils: feat_req + fulfils: feat_req # TODO: make it mandatory optional_links: - belongs_to: feat # for evaluation + belongs_to: feat # make in mandatory for evaluation tags: - architecture_view - architecture_element @@ -481,7 +481,7 @@ needs_types: status: ^(valid|invalid)$ optional_links: includes: logic_arc_int_op - fulfils: feat_req + fulfils: feat_req # TODO: make it mandatory tags: - architecture_element - architecture_view @@ -583,7 +583,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - fulfils: comp_req + fulfils: comp_req # TODO: make it mandatory implements: logic_arc_int, real_arc_int_op # deprecated, views does not implement anything. Now moved to comp includes: comp # deprecated uses: logic_arc_int, real_arc_int_op @@ -606,7 +606,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ optional_links: - fulfils: comp_req + fulfils: comp_req # TODO: make it mandatory belongs_to: comp # TODO: make it mandatory tags: - architecture_view @@ -726,7 +726,7 @@ needs_types: # DFA (Dependent Failure Analysis) # No requirement!! plat_saf_dfa: - title: Feature Dependent Failure Analysis + title: Platform Dependent Failure Analysis mandatory_options: failure_id: ^.*$ failure_effect: ^.*$ @@ -738,7 +738,7 @@ needs_types: optional_options: mitigation_issue: ^https://github.com/.*$ optional_links: - mitigated_by: feat_req, aou_req + mitigated_by: stkh_req, aou_req parts: 3 # req-Id: tool_req__docs_saf_types From a145da7b53c7196ebb45433bbbde36c85dd9ba4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Tue, 3 Feb 2026 11:38:39 +0100 Subject: [PATCH 193/231] fix(test): consumer handle present git override (#380) --- src/tests/test_consumer.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/tests/test_consumer.py b/src/tests/test_consumer.py index c57e700e..81416f5f 100644 --- a/src/tests/test_consumer.py +++ b/src/tests/test_consumer.py @@ -189,6 +189,28 @@ def filter_repos(repo_filter: str | None) -> list[ConsumerRepo]: return filtered_repos +def comment_out_git_override(module_content: str) -> str: + """ + Comment out existing git_override blocks for score_docs_as_code if found + """ + + pattern = ( + r"^(git_override\s*\(\s*" + r"[^)]*?module_name\s*=\s*['\"]score_docs_as_code['\"]" + r"[^)]*\)\s*)" + ) + + def comment_out_block(match: re.Match[str]) -> str: + # Comment out each line of the found block + return "\n".join("# " + line for line in match.group(0).splitlines()) + + # First, comment out old override(s) + out = re.sub( + pattern, comment_out_block, module_content, flags=re.MULTILINE | re.DOTALL + ) + return out.strip() + + def replace_bazel_dep_with_local_override(module_content: str) -> str: """ """ @@ -498,10 +520,6 @@ def run_cmd( return results, is_success -def run_test_commands(): - pass - - def setup_test_environment(sphinx_base_dir: Path, pytestconfig: Config): """Set up the test environment and return necessary paths and metadata.""" git_root = find_git_root() @@ -587,9 +605,10 @@ def prepare_repo_overrides( module_orig = f.read() # Prepare override versions - module_local_override = replace_bazel_dep_with_local_override(module_orig) + module_orig_clean = comment_out_git_override(module_orig) + module_local_override = replace_bazel_dep_with_local_override(module_orig_clean) module_git_override = replace_bazel_dep_with_git_override( - module_orig, current_hash, gh_url + module_orig_clean, current_hash, gh_url ) return module_local_override, module_git_override From 3be12881aad2a7fe821ba352cb170445a8ce0c47 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Tue, 3 Feb 2026 14:23:37 +0100 Subject: [PATCH 194/231] cleanup meta model (#379) --- MODULE.bazel | 5 ++++ src/extensions/score_metamodel/metamodel.yaml | 26 ++++++++----------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 33c76fc9..46a9ad28 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -101,6 +101,11 @@ http_file( # Note: requirements were last aligned with 1.2.0, # the switch to 1.3.1 is purely to drop the dependency on docs-as-code 1.x. bazel_dep(name = "score_process", version = "1.4.2") +git_override( + module_name = "score_process", + commit = "43b3a13eae17f2e539fb8cca2beedb69717b2e12", + remote = "https://github.com/eclipse-score/process_description.git", +) # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 422c4258..398195c7 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -324,6 +324,7 @@ needs_types: mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: feat_req + belongs_to: comp optional_options: codelink: ^.*$ testlink: ^.*$ @@ -332,8 +333,6 @@ needs_types: # req-Id: tool_req__docs_req_attr_testcov testcovered: ^(YES|NO)$ hash: ^.*$ - optional_links: - belongs_to: comp # TODO: make it mandatory tags: - requirement - requirement_excl_process @@ -435,8 +434,7 @@ needs_types: status: ^(valid|invalid)$ mandatory_links: includes: ^logic_arc_int(_op)*__.+$ - fulfils: feat_req # TODO: make it mandatory - optional_links: + fulfils: feat_req belongs_to: feat # make it mandatory for evaluation tags: - architecture_element @@ -457,9 +455,8 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ mandatory_links: - fulfils: feat_req # TODO: make it mandatory - optional_links: - belongs_to: feat # make in mandatory for evaluation + fulfils: feat_req + belongs_to: feat tags: - architecture_view - architecture_element @@ -481,7 +478,7 @@ needs_types: status: ^(valid|invalid)$ optional_links: includes: logic_arc_int_op - fulfils: feat_req # TODO: make it mandatory + fulfils: feat_req tags: - architecture_element - architecture_view @@ -582,12 +579,11 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ + mandatory_links: + belongs_to: comp + fulfils: comp_req optional_links: - fulfils: comp_req # TODO: make it mandatory - implements: logic_arc_int, real_arc_int_op # deprecated, views does not implement anything. Now moved to comp - includes: comp # deprecated uses: logic_arc_int, real_arc_int_op - belongs_to: comp # TODO: make it mandatory tags: - architecture_view parts: 3 @@ -605,9 +601,9 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - optional_links: - fulfils: comp_req # TODO: make it mandatory - belongs_to: comp # TODO: make it mandatory + mandatory_links: + belongs_to: comp + fulfils: comp_req tags: - architecture_view parts: 3 From 4b8ddcdae4b69d6d4108b5a549d94f07b2b1d325 Mon Sep 17 00:00:00 2001 From: Piotr Korkus <piotr.korkus.ext@qorix.ai> Date: Wed, 4 Feb 2026 10:17:34 +0100 Subject: [PATCH 195/231] cicd: build docs on release (#377) --- .github/workflows/test_and_docs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test_and_docs.yml b/.github/workflows/test_and_docs.yml index c5243895..371ab3e7 100644 --- a/.github/workflows/test_and_docs.yml +++ b/.github/workflows/test_and_docs.yml @@ -27,6 +27,8 @@ on: - main merge_group: types: [checks_requested] + release: + types: [created] jobs: docs-verify: From a6cf1d9233ad03ef0cd4add5a35a1194bf44b07f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Wed, 4 Feb 2026 10:20:11 +0100 Subject: [PATCH 196/231] Disable ubproject.toml warning on diff (#381) --- src/extensions/score_sync_toml/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/extensions/score_sync_toml/__init__.py b/src/extensions/score_sync_toml/__init__.py index f767e81d..79ebfb7a 100644 --- a/src/extensions/score_sync_toml/__init__.py +++ b/src/extensions/score_sync_toml/__init__.py @@ -34,7 +34,9 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.needscfg_exclude_defaults = True """Exclude default values from the generated configuration.""" - app.config.needscfg_warn_on_diff = True + # This is disabled for right now as it causes a lot of issues + # While we are not using the generated file anywhere + app.config.needscfg_warn_on_diff = False """Running Sphinx with -W will fail the CI for uncommitted TOML changes.""" app.config.needscfg_merge_toml_files = [ From 4e124002c108c442e8cbb0bad06a2ccd65169268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Wed, 4 Feb 2026 11:48:08 +0100 Subject: [PATCH 197/231] Update Rules_Py & Consolidate internal structure (#378) Change all 'os.getenv[RUNFILES]' to now use the find_runfiles file. This is in preperation for the rules_python upgrade which changes this implementation again. The solution to the upgrade of the Runfiles was based on the work from @NEOatNHNG and his commit: dbb6261e7707406e69c953c5c52319c9ef602030 And also done with the help of @PiotrKorkus --- MODULE.bazel | 4 +- src/BUILD | 1 - src/extensions/BUILD | 1 + src/extensions/score_metamodel/BUILD | 4 +- .../score_metamodel/external_needs.py | 43 +++---- src/extensions/score_plantuml.py | 38 +----- src/extensions/score_sphinx_bundle/BUILD | 1 - src/find_runfiles/BUILD | 37 ------ src/find_runfiles/__init__.py | 117 ------------------ src/find_runfiles/test_find_runfiles.py | 93 -------------- src/helper_lib/BUILD | 5 +- src/helper_lib/__init__.py | 32 +++++ src/helper_lib/test_helper_lib.py | 54 ++++++++ src/requirements.in | 3 +- src/requirements.txt | 52 ++++---- 15 files changed, 141 insertions(+), 344 deletions(-) delete mode 100644 src/find_runfiles/BUILD delete mode 100644 src/find_runfiles/__init__.py delete mode 100644 src/find_runfiles/test_find_runfiles.py diff --git a/MODULE.bazel b/MODULE.bazel index 46a9ad28..bd71747c 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -30,7 +30,7 @@ bazel_dep(name = "rules_pkg", version = "1.1.0") # Python version # ############################################################################### -bazel_dep(name = "rules_python", version = "1.4.1") +bazel_dep(name = "rules_python", version = "1.8.3") PYTHON_VERSION = "3.12" @@ -109,7 +109,7 @@ git_override( # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "1.0.5") +bazel_dep(name = "score_tooling", version = "1.1.0") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") diff --git a/src/BUILD b/src/BUILD index f45f14fd..476b03b4 100644 --- a/src/BUILD +++ b/src/BUILD @@ -42,7 +42,6 @@ filegroup( "//src/extensions/score_source_code_linker:all_sources", "//src/extensions/score_sphinx_bundle:all_sources", "//src/extensions/score_sync_toml:all_sources", - "//src/find_runfiles:all_sources", "//src/helper_lib:all_sources", ], visibility = ["//visibility:public"], diff --git a/src/extensions/BUILD b/src/extensions/BUILD index d4db4293..4f5d6c91 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -20,4 +20,5 @@ py_library( srcs = ["@score_docs_as_code//src/extensions:score_plantuml.py"], imports = ["."], visibility = ["//visibility:public"], + deps = ["@score_docs_as_code//src/helper_lib"], ) diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index 30392ca0..4a5267e6 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -49,7 +49,9 @@ py_library( imports = ["."], visibility = ["//visibility:public"], # TODO: Figure out if all requirements are needed or if we can break it down a bit - deps = all_requirements + ["@score_docs_as_code//src/helper_lib"], + deps = all_requirements + [ + "@score_docs_as_code//src/helper_lib", + ], ) score_py_pytest( diff --git a/src/extensions/score_metamodel/external_needs.py b/src/extensions/score_metamodel/external_needs.py index 0c48a56a..a3973666 100644 --- a/src/extensions/score_metamodel/external_needs.py +++ b/src/extensions/score_metamodel/external_needs.py @@ -12,9 +12,7 @@ # ******************************************************************************* import json -import os import subprocess -import sys from dataclasses import dataclass from pathlib import Path @@ -23,6 +21,8 @@ from sphinx.util import logging from sphinx_needs.needsfile import NeedsList +from src.helper_lib import get_runfiles_dir + logger = logging.getLogger(__name__) @@ -61,6 +61,7 @@ def parse_external_needs_sources_from_DATA(v: str) -> list[ExternalNeedsSource]: return [] logger.debug(f"Parsing external needs sources: {v}") + data = json.loads(v) res = [res for el in data if (res := _parse_bazel_external_need(el))] @@ -138,37 +139,21 @@ def temp(self: NeedsList): def get_external_needs_source(external_needs_source: str) -> list[ExternalNeedsSource]: - bazel = external_needs_source or os.getenv("RUNFILES_DIR") - - if bazel: + if external_needs_source: + # Path taken for all invocations via `bazel` external_needs = parse_external_needs_sources_from_DATA(external_needs_source) else: + # This is the path taken for anything that doesn't + # run via `bazel` e.g. esbonio or other direct executions external_needs = parse_external_needs_sources_from_bazel_query() # pyright: ignore[reportAny] - return external_needs def add_external_needs_json(e: ExternalNeedsSource, config: Config): - json_file = f"{e.bazel_module}+/{e.target}/_build/needs/needs.json" - if r := os.getenv("RUNFILES_DIR"): - logger.debug("Using runfiles to determine external needs JSON file.") - fixed_json_file = Path(r) / json_file - else: - logger.debug( - "Running outside bazel. " - + "Determining git root for external needs JSON file." - ) - git_root = Path.cwd().resolve() - while not (git_root / ".git").exists(): - git_root = git_root.parent - if git_root == Path("/"): - sys.exit("Could not find git root.") - logger.debug(f"Git root found: {git_root}") - fixed_json_file = git_root / "bazel-bin" / "ide_support.runfiles" / json_file - - logger.debug(f"Fixed JSON file path: {json_file} -> {fixed_json_file}") - json_file = fixed_json_file - + json_file_raw = f"{e.bazel_module}+/{e.target}/_build/needs/needs.json" + r = get_runfiles_dir() + json_file = r / json_file_raw + logger.debug(f"External needs.json: {json_file}") try: needs_json_data = json.loads(Path(json_file).read_text(encoding="utf-8")) # pyright: ignore[reportAny] except FileNotFoundError: @@ -192,11 +177,11 @@ def add_external_needs_json(e: ExternalNeedsSource, config: Config): def add_external_docs_sources(e: ExternalNeedsSource, config: Config): # Note that bazel does NOT write the files under e.target! # {e.bazel_module}+ matches the original git layout! - if r := os.getenv("RUNFILES_DIR"): - docs_source_path = Path(r) / f"{e.bazel_module}+" - else: + r = get_runfiles_dir() + if "ide_support.runfiles" in str(r): logger.error("Combo builds are currently only supported with Bazel.") return + docs_source_path = Path(r) / f"{e.bazel_module}+" if "collections" not in config: config.collections = {} diff --git a/src/extensions/score_plantuml.py b/src/extensions/score_plantuml.py index c452315e..3dbbd138 100644 --- a/src/extensions/score_plantuml.py +++ b/src/extensions/score_plantuml.py @@ -24,48 +24,14 @@ In addition it sets common PlantUML options, like output to svg_obj. """ -import os -import sys from pathlib import Path from sphinx.application import Sphinx from sphinx.util import logging -logger = logging.getLogger(__name__) - - -def get_runfiles_dir() -> Path: - if r := os.getenv("RUNFILES_DIR"): - # Runfiles are only available when running in Bazel. - # bazel build and bazel run are both supported. - # i.e. `bazel build //:docs` and `bazel run //:docs`. - logger.debug("Using runfiles to determine plantuml path.") +from src.helper_lib import get_runfiles_dir - runfiles_dir = Path(r) - - else: - # The only way to land here is when running from within the virtual - # environment created by the `:ide_support` rule in the BUILD file. - # i.e. esbonio or manual sphinx-build execution within the virtual - # environment. - # We'll still use the plantuml binary from the bazel build. - # But we need to find it first. - logger.debug("Running outside bazel.") - - git_root = Path.cwd().resolve() - while not (git_root / ".git").exists(): - git_root = git_root.parent - if git_root == Path("/"): - sys.exit("Could not find git root.") - - runfiles_dir = git_root / "bazel-bin" / "ide_support.runfiles" - - if not runfiles_dir.exists(): - sys.exit( - f"Could not find runfiles_dir at {runfiles_dir}. " - "Have a look at README.md for instructions on how to build docs." - ) - return runfiles_dir +logger = logging.getLogger(__name__) def find_correct_path(runfiles: Path) -> Path: diff --git a/src/extensions/score_sphinx_bundle/BUILD b/src/extensions/score_sphinx_bundle/BUILD index 26bb0289..53c3b721 100644 --- a/src/extensions/score_sphinx_bundle/BUILD +++ b/src/extensions/score_sphinx_bundle/BUILD @@ -31,7 +31,6 @@ py_library( "@score_docs_as_code//src/extensions/score_metamodel", "@score_docs_as_code//src/extensions/score_source_code_linker", "@score_docs_as_code//src/extensions/score_sync_toml", - "@score_docs_as_code//src/find_runfiles", "@score_docs_as_code//src/helper_lib", ], ) diff --git a/src/find_runfiles/BUILD b/src/find_runfiles/BUILD deleted file mode 100644 index 2112815c..00000000 --- a/src/find_runfiles/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") -load("@score_tooling//:defs.bzl", "score_py_pytest") - -filegroup( - name = "all_sources", - srcs = glob(["*.py"]), - visibility = ["//visibility:public"], -) - -py_library( - name = "find_runfiles", - srcs = [":all_sources"], - imports = ["."], - visibility = ["//visibility:public"], -) - -score_py_pytest( - name = "find_runfiles_test", - size = "small", - srcs = ["test_find_runfiles.py"], - deps = [ - ":find_runfiles", - ] + all_requirements, -) diff --git a/src/find_runfiles/__init__.py b/src/find_runfiles/__init__.py deleted file mode 100644 index e4662004..00000000 --- a/src/find_runfiles/__init__.py +++ /dev/null @@ -1,117 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -import logging -import os -import sys -from pathlib import Path - -logger = logging.getLogger(__name__) -# logger.setLevel(logging.DEBUG) - - -def _log_debug(message: str): - # TODO: why does logger not print anything? - if logger.hasHandlers(): - logger.debug(message) - else: - print(message) - - -def find_git_root() -> Path: - # TODO: is __file__ ever resolved into the bazel cache directories? - # Then this function will not work! - workspace = os.getenv("BUILD_WORKSPACE_DIRECTORY") - if workspace: - return Path(workspace) - - for parent in Path(__file__).resolve().parents: - if (parent / ".git").exists(): - return parent - - sys.exit( - "Could not find git root. " - "Please run this script from the root of the repository." - ) - - -def get_runfiles_dir_impl( - cwd: Path, - conf_dir: Path, - env_runfiles: Path | None, - git_root: Path, -) -> Path: - """Functional (and therefore testable) logic to determine the runfiles directory.""" - - _log_debug( - f"get_runfiles_dir_impl(\n cwd={cwd},\n conf_dir={conf_dir},\n" - f" env_runfiles={env_runfiles},\n git_root={git_root}\n)" - ) - - if env_runfiles: - # Runfiles are only available when running in Bazel. - # Both `bazel build` and `bazel run` are supported. - # i.e. `bazel build //:docs` and `bazel run //:docs`. - _log_debug("Using env[RUNFILES_DIR] to find the runfiles...") - - if env_runfiles.is_absolute() and "bazel-out" in env_runfiles.parts: - # In case of `bazel run` it will point to the global cache directory, - # which has a new hash every time. And it's not pretty. - # However, `bazel-out` is a symlink to that same cache directory! - try: - idx = env_runfiles.parts.index("bazel-out") - runfiles_dir = git_root.joinpath(*env_runfiles.parts[idx:]) - _log_debug(f"Made runfiles dir pretty: {runfiles_dir}") - except ValueError: - sys.exit("Could not find bazel-out in runfiles path.") - else: - runfiles_dir = git_root / env_runfiles - - else: - # The only way to land here is when running from within the virtual - # environment created by the `:ide_support` rule. - # i.e. esbonio or manual sphinx-build execution within the virtual - # environment. - _log_debug("Running outside bazel.") - - # TODO: "process-docs" is in SOURCE_DIR!! - runfiles_dir = git_root / "bazel-bin" / "process-docs" / "ide_support.runfiles" - - return runfiles_dir - - -def get_runfiles_dir() -> Path: - """Runfiles directory relative to conf.py""" - - # FIXME CONF_DIRECTORY is our invention. When running from esbonio, this is not - # set. It seems to provide app.confdir instead... - conf_dir = os.getenv("CONF_DIRECTORY") - assert conf_dir - - env_runfiles = os.getenv("RUNFILES_DIR") - - runfiles = Path( - get_runfiles_dir_impl( - cwd=Path(os.getcwd()), - conf_dir=Path(conf_dir), - env_runfiles=Path(env_runfiles) if env_runfiles else None, - git_root=find_git_root(), - ) - ) - - if not runfiles.exists(): - sys.exit( - f"Could not find runfiles at {runfiles}. Have a look at " - "README.md for instructions on how to build docs." - ) - - return runfiles diff --git a/src/find_runfiles/test_find_runfiles.py b/src/find_runfiles/test_find_runfiles.py deleted file mode 100644 index 97d73d84..00000000 --- a/src/find_runfiles/test_find_runfiles.py +++ /dev/null @@ -1,93 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -from pathlib import Path - -# TODO: why is there an __init__.py file in tooling? -from src import find_runfiles - - -def get_runfiles_dir_impl( - cwd: str, conf_dir: str, env_runfiles: str | None, git_root: str -): - return str( - find_runfiles.get_runfiles_dir_impl( - cwd=Path(cwd), - conf_dir=Path(conf_dir), - env_runfiles=Path(env_runfiles) if env_runfiles else None, - git_root=Path(git_root), - ) - ) - - -def test_run_incremental(): - """bazel run //process-docs:incremental""" - # in incremental.py: - assert get_runfiles_dir_impl( - cwd="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles/_main", - conf_dir="process-docs", - env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", - git_root="/workspaces/process", - ) == ( - "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/" - "incremental.runfiles" - ) - - # in conf.py: - assert get_runfiles_dir_impl( - cwd="/workspaces/process/process-docs", - conf_dir="process-docs", - env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/process-docs/incremental.runfiles", - git_root="/workspaces/process", - ) == ( - "/workspaces/process/bazel-out/k8-fastbuild/bin/process-docs/" - "incremental.runfiles" - ) - - -def test_build_incremental_and_exec_it(): - """bazel build //process-docs:incremental && bazel-bin/process-docs/incremental""" - assert ( - get_runfiles_dir_impl( - cwd="/workspaces/process/process-docs", - conf_dir="process-docs", - env_runfiles="bazel-bin/process-docs/incremental.runfiles", - git_root="/workspaces/process", - ) - == "/workspaces/process/bazel-bin/process-docs/incremental.runfiles" - ) - - -def test_esbonio_old(): - """Observed with esbonio 0.x""" - assert ( - get_runfiles_dir_impl( - cwd="/workspaces/process/process-docs", - conf_dir="process-docs", - env_runfiles=None, - git_root="/workspaces/process", - ) - == "/workspaces/process/bazel-bin/process-docs/ide_support.runfiles" - ) - - -def test3(): - # docs named differently, just to make sure nothing is hardcoded - # bazel run //other-docs:incremental - assert get_runfiles_dir_impl( - cwd="/workspaces/process/other-docs", - conf_dir="other-docs", - env_runfiles="/home/vscode/.cache/bazel/_bazel_vscode/6084288f00f33db17acb4220ce8f1999/execroot/_main/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles", - git_root="/workspaces/process", - ) == ( - "/workspaces/process/bazel-out/k8-fastbuild/bin/other-docs/incremental.runfiles" - ) diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD index c2c3161f..545a7ea2 100644 --- a/src/helper_lib/BUILD +++ b/src/helper_lib/BUILD @@ -25,7 +25,10 @@ py_library( srcs = [":all_sources"], imports = ["."], visibility = ["//visibility:public"], - deps = ["@score_docs_as_code//src/extensions/score_source_code_linker:source_code_linker_helpers"], + deps = [ + "@rules_python//python/runfiles", + "@score_docs_as_code//src/extensions/score_source_code_linker:source_code_linker_helpers", + ], ) score_py_pytest( diff --git a/src/helper_lib/__init__.py b/src/helper_lib/__init__.py index 001977d2..fb43cff1 100644 --- a/src/helper_lib/__init__.py +++ b/src/helper_lib/__init__.py @@ -13,8 +13,10 @@ import os import subprocess +import sys from pathlib import Path +from python.runfiles import Runfiles from sphinx_needs.logging import get_logger LOGGER = get_logger(__name__) @@ -174,3 +176,33 @@ def get_current_git_hash(git_root: Path) -> str: exc_info=e, ) raise + + +def get_runfiles_dir() -> Path: + """ + Find the Bazel runfiles directory using bazel_runfiles convention, + fallback to RUNFILES_DIR or relative traversal if needed. + """ + if (r := Runfiles.Create()) and (rd := r.EnvVars().get("RUNFILES_DIR")): + runfiles_dir = Path(rd) + else: + # The only way to land here is when running from within the virtual + # environment created by the `:ide_support` rule in the BUILD file. + # i.e. esbonio or manual sphinx-build execution within the virtual + # environment. + # We'll still use the plantuml binary from the bazel build. + # But we need to find it first. + LOGGER.debug("Running outside bazel.") + + git_root = find_git_root() + if git_root is None: + sys.exit("Could not find git root.") + + runfiles_dir = git_root / "bazel-bin" / "ide_support.runfiles" + + if not runfiles_dir.exists(): + sys.exit( + f"Could not find runfiles_dir at {runfiles_dir}. " + "Have a look at README.md for instructions on how to build docs." + ) + return runfiles_dir diff --git a/src/helper_lib/test_helper_lib.py b/src/helper_lib/test_helper_lib.py index 0025d1ca..83bbc30e 100644 --- a/src/helper_lib/test_helper_lib.py +++ b/src/helper_lib/test_helper_lib.py @@ -20,6 +20,7 @@ from src.helper_lib import ( get_current_git_hash, get_github_repo_info, + get_runfiles_dir, parse_remote_git_output, ) @@ -238,3 +239,56 @@ def test_get_current_git_hash_invalid_repo(temp_dir: Path): """Test getting git hash from invalid repository.""" with pytest.raises(subprocess.CalledProcessError): get_current_git_hash(temp_dir) + + +def test_runfiles_dir_found(temp_dir: Path): + """Test Runfiles dir found when provided and it's actually there""" + runfiles_dir = temp_dir / "runfiles_here" + runfiles_dir.mkdir(parents=True) + os.environ["RUNFILES_DIR"] = str(runfiles_dir) + os.chdir(runfiles_dir) + result = get_runfiles_dir() + assert Path(result) == runfiles_dir + os.environ.pop("RUNFILES_DIR", None) + + +def test_runfiles_dir_missing_triggers_exit(temp_dir: Path): + """Testing if the runfiles exit via sys.exit if runfiles are set but don't exist""" + runfiles_dir = temp_dir / "does_not_exist" + os.environ["RUNFILES_DIR"] = str(runfiles_dir) + with pytest.raises(SystemExit) as e: + get_runfiles_dir() + assert "Could not find runfiles_dir" in str(e.value) + os.environ.pop("RUNFILES_DIR", None) + + +def test_git_root_search_success(git_repo: Path, monkeypatch: pytest.MonkeyPatch): + """Testing if Git Root can be found successfully with unset RUNFILES""" + docs_dir = git_repo / "docs" + runfiles_dir = git_repo / "bazel-bin" / "ide_support.runfiles" + docs_dir.mkdir() + runfiles_dir.mkdir(parents=True) + os.environ.pop("RUNFILES_DIR", None) + + # Have to monkeypatch in order to allow us to test + # the 'else' path inside 'get_runfiles_dir' + monkeypatch.setattr(Path, "cwd", lambda: docs_dir) + result = get_runfiles_dir() + assert Path(result) == runfiles_dir + os.environ.pop("RUNFILES_DIR", None) + + +def test_git_root_search_not_found(tmp_path: Path, monkeypatch: pytest.MonkeyPatch): + """ + Test fallback when no .git is found (should sys.exit). + """ + nowhere = tmp_path / "nowhere" + nowhere.mkdir(parents=True) + os.environ.pop("RUNFILES_DIR", None) + # Have to monkeypatch in order to allow us to + # test the 'else' path inside 'get_runfiles_dir' + monkeypatch.setattr(Path, "cwd", lambda: nowhere) + with pytest.raises(SystemExit) as excinfo: + get_runfiles_dir() + assert "Could not find git root" in str(excinfo.value) + os.environ.pop("RUNFILES_DIR", None) diff --git a/src/requirements.in b/src/requirements.in index b616508d..442e74c7 100644 --- a/src/requirements.in +++ b/src/requirements.in @@ -1,4 +1,4 @@ -Sphinx>=8.2.3,<9 +Sphinx>=8.1.3,<9 # At least 4.2.0, as it fixes a bug in combination with esbonio live preview: # https://github.com/useblocks/sphinx-needs/issues/1350 @@ -30,3 +30,4 @@ rich needs-config-writer == 0.2.4 # use this for a specific commit for fast development iterations # needs-config-writer @ https://github.com/useblocks/needs-config-writer/archive/032a5f8.zip + diff --git a/src/requirements.txt b/src/requirements.txt index b9ff06ce..37c646f3 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -30,10 +30,10 @@ babel==2.17.0 \ # via # pydata-sphinx-theme # sphinx -basedpyright==1.29.2 \ - --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ - --hash=sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d - # via -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt +basedpyright==1.35.0 \ + --hash=sha256:2a7e0bd476623d48499e2b18ff6ed19dc28c51909cf9e1152ad355b5809049ad \ + --hash=sha256:4f4f84023df5a0cd4ee154916ba698596682ac98bacfa22c941ed6aaf07bba4e + # via -r /external/score_tooling+/python_basics/requirements.txt beautifulsoup4==4.14.2 \ --hash=sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e \ --hash=sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515 @@ -518,11 +518,11 @@ imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -iniconfig==2.1.0 \ - --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \ - --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760 +iniconfig==2.3.0 \ + --hash=sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730 \ + --hash=sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12 # via - # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt + # -r /external/score_tooling+/python_basics/requirements.txt # pytest jinja2==3.1.6 \ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ @@ -823,18 +823,18 @@ needs-config-writer==0.2.4 \ --hash=sha256:0f0702574081bb8ed7d896aadfb73c0e48af099dc0d4227cc2bac957ed8ea4f6 \ --hash=sha256:7c89375848c822e891b3cca48783f3cc3f7cbd3c02cba19418de146ca077f212 # via -r src/requirements.in -nodejs-wheel-binaries==22.16.0 \ - --hash=sha256:2728972d336d436d39ee45988978d8b5d963509e06f063e80fe41b203ee80b28 \ - --hash=sha256:2fffb4bf1066fb5f660da20819d754f1b424bca1b234ba0f4fa901c52e3975fb \ - --hash=sha256:447ad796850eb52ca20356ad39b2d296ed8fef3f214921f84a1ccdad49f2eba1 \ - --hash=sha256:4ae3cf22138891cb44c3ee952862a257ce082b098b29024d7175684a9a77b0c0 \ - --hash=sha256:71f2de4dc0b64ae43e146897ce811f80ac4f9acfbae6ccf814226282bf4ef174 \ - --hash=sha256:7f526ca6a132b0caf633566a2a78c6985fe92857e7bfdb37380f76205a10b808 \ - --hash=sha256:986b715a96ed703f8ce0c15712f76fc42895cf09067d72b6ef29e8b334eccf64 \ - --hash=sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2 \ - --hash=sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e +nodejs-wheel-binaries==24.11.1 \ + --hash=sha256:0e14874c3579def458245cdbc3239e37610702b0aa0975c1dc55e2cb80e42102 \ + --hash=sha256:10197b1c9c04d79403501766f76508b0dac101ab34371ef8a46fcf51773497d0 \ + --hash=sha256:376b9ea1c4bc1207878975dfeb604f7aa5668c260c6154dcd2af9d42f7734116 \ + --hash=sha256:413dfffeadfb91edb4d8256545dea797c237bba9b3faefea973cde92d96bb922 \ + --hash=sha256:5ef598101b0fb1c2bf643abb76dfbf6f76f1686198ed17ae46009049ee83c546 \ + --hash=sha256:78bc5bb889313b565df8969bb7423849a9c7fc218bf735ff0ce176b56b3e96f0 \ + --hash=sha256:c2741525c9874b69b3e5a6d6c9179a6fe484ea0c3d5e7b7c01121c8e5d78b7e2 \ + --hash=sha256:c79a7e43869ccecab1cae8183778249cceb14ca2de67b5650b223385682c6239 \ + --hash=sha256:cde41d5e4705266688a8d8071debf4f8a6fcea264c61292782672ee75a6905f9 # via - # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt + # -r /external/score_tooling+/python_basics/requirements.txt # basedpyright numpy==2.3.5 \ --hash=sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b \ @@ -918,7 +918,7 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via - # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt + # -r /external/score_tooling+/python_basics/requirements.txt # matplotlib # pytest # sphinx @@ -1024,7 +1024,7 @@ pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 # via - # -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt + # -r /external/score_tooling+/python_basics/requirements.txt # pytest pycparser==2.23 \ --hash=sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2 \ @@ -1046,8 +1046,10 @@ pygments==2.19.2 \ --hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \ --hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b # via + # -r /external/score_tooling+/python_basics/requirements.txt # accessible-pygments # pydata-sphinx-theme + # pytest # rich # sphinx pyjwt[crypto]==2.10.1 \ @@ -1091,10 +1093,10 @@ pyspellchecker==0.8.3 \ --hash=sha256:cb06eeafe124837f321e0d02f8e21deab713e966e28e0360319a28a089c43978 \ --hash=sha256:e993076e98b0da5a99b7cc31085c3022c77a9dc37c5e95f5cf6304b5dbb8b9d2 # via esbonio -pytest==8.3.5 \ - --hash=sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820 \ - --hash=sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845 - # via -r /var/cache/bazel/a6d5860d75352e2ea44147474e2a3020/external/score_tooling+/python_basics/requirements.txt +pytest==9.0.1 \ + --hash=sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8 \ + --hash=sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad + # via -r /external/score_tooling+/python_basics/requirements.txt python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 From a1a3e159c85734f4d2743ec947fec1c0f426d887 Mon Sep 17 00:00:00 2001 From: Chidananda-Swamy <chidananda.swamy@ltts.com> Date: Wed, 4 Feb 2026 16:21:15 +0530 Subject: [PATCH 198/231] Fix typo in source_to_doc_links.rst (#383) Update the typo error in "Scanning Source Code" section Signed-off-by: Chidananda-Swamy <chidananda.swamy@ltts.com> --- docs/how-to/source_to_doc_links.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/how-to/source_to_doc_links.rst b/docs/how-to/source_to_doc_links.rst index 13b722be..c52c765c 100644 --- a/docs/how-to/source_to_doc_links.rst +++ b/docs/how-to/source_to_doc_links.rst @@ -25,7 +25,7 @@ For other languages (C++, Rust, etc.), use the appropriate comment syntax. Scanning Source Code for Links ------------------------------ -In you ``BUILD`` files, you specify which source files to scan +In your ``BUILD`` files, you specify which source files to scan with ``filegroup`` or ``glob`` or whatever Bazel mechanism you prefer. Finally, pass the scan results to the ``docs`` rule as ``scan_code`` attribute. From 5102059086dd68e965133305f3a2e0e6e4d2a1d9 Mon Sep 17 00:00:00 2001 From: Alexander Lanin <Alexander.Lanin@etas.com> Date: Wed, 4 Feb 2026 14:01:36 +0100 Subject: [PATCH 199/231] chore: update dependencies (#384) * chore: update dependencies * chore: try invalidating old caches by changing env_version to 3.0.0 --- .../score_sphinx_bundle/__init__.py | 4 +- src/requirements.txt | 1196 ++++++++--------- 2 files changed, 559 insertions(+), 641 deletions(-) diff --git a/src/extensions/score_sphinx_bundle/__init__.py b/src/extensions/score_sphinx_bundle/__init__.py index bba802cc..d30df6c3 100644 --- a/src/extensions/score_sphinx_bundle/__init__.py +++ b/src/extensions/score_sphinx_bundle/__init__.py @@ -71,9 +71,9 @@ def setup(app: Sphinx) -> dict[str, object]: app.setup_extension(e) return { - "version": "0.1", + "version": "3.0.0", # Keep this in sync with the score_docs_as_code version in MODULE.bazel - "env_version": 200, # 2.0.0 + "env_version": 300, # 3.0.0 "parallel_read_safe": True, "parallel_write_safe": True, } diff --git a/src/requirements.txt b/src/requirements.txt index 37c646f3..251c28fb 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -12,9 +12,9 @@ alabaster==1.0.0 \ --hash=sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e \ --hash=sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b # via sphinx -anyio==4.11.0 \ - --hash=sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc \ - --hash=sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4 +anyio==4.12.1 \ + --hash=sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703 \ + --hash=sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c # via # starlette # watchfiles @@ -24,9 +24,9 @@ attrs==25.4.0 \ # via # cattrs # lsprotocol -babel==2.17.0 \ - --hash=sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d \ - --hash=sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2 +babel==2.18.0 \ + --hash=sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d \ + --hash=sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35 # via # pydata-sphinx-theme # sphinx @@ -34,9 +34,9 @@ basedpyright==1.35.0 \ --hash=sha256:2a7e0bd476623d48499e2b18ff6ed19dc28c51909cf9e1152ad355b5809049ad \ --hash=sha256:4f4f84023df5a0cd4ee154916ba698596682ac98bacfa22c941ed6aaf07bba4e # via -r /external/score_tooling+/python_basics/requirements.txt -beautifulsoup4==4.14.2 \ - --hash=sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e \ - --hash=sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515 +beautifulsoup4==4.14.3 \ + --hash=sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb \ + --hash=sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86 # via pydata-sphinx-theme cattrs==25.3.0 \ --hash=sha256:1ac88d9e5eda10436c4517e390a4142d88638fe682c436c93db7ce4a277b884a \ @@ -44,9 +44,9 @@ cattrs==25.3.0 \ # via # lsprotocol # pygls -certifi==2025.11.12 \ - --hash=sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b \ - --hash=sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316 +certifi==2026.1.4 \ + --hash=sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c \ + --hash=sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120 # via requests cffi==2.0.0 \ --hash=sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb \ @@ -333,97 +333,92 @@ contourpy==1.3.3 \ --hash=sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9 \ --hash=sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a # via matplotlib -cryptography==46.0.3 \ - --hash=sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217 \ - --hash=sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d \ - --hash=sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc \ - --hash=sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71 \ - --hash=sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971 \ - --hash=sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a \ - --hash=sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926 \ - --hash=sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc \ - --hash=sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d \ - --hash=sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b \ - --hash=sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20 \ - --hash=sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044 \ - --hash=sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3 \ - --hash=sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715 \ - --hash=sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4 \ - --hash=sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506 \ - --hash=sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f \ - --hash=sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0 \ - --hash=sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683 \ - --hash=sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3 \ - --hash=sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21 \ - --hash=sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91 \ - --hash=sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c \ - --hash=sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8 \ - --hash=sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df \ - --hash=sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c \ - --hash=sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb \ - --hash=sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7 \ - --hash=sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04 \ - --hash=sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db \ - --hash=sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459 \ - --hash=sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea \ - --hash=sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914 \ - --hash=sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717 \ - --hash=sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9 \ - --hash=sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac \ - --hash=sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32 \ - --hash=sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec \ - --hash=sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1 \ - --hash=sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb \ - --hash=sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac \ - --hash=sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665 \ - --hash=sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e \ - --hash=sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb \ - --hash=sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5 \ - --hash=sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936 \ - --hash=sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de \ - --hash=sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372 \ - --hash=sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54 \ - --hash=sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422 \ - --hash=sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849 \ - --hash=sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c \ - --hash=sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963 \ - --hash=sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018 +cryptography==46.0.4 \ + --hash=sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa \ + --hash=sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc \ + --hash=sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da \ + --hash=sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255 \ + --hash=sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2 \ + --hash=sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485 \ + --hash=sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0 \ + --hash=sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d \ + --hash=sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616 \ + --hash=sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947 \ + --hash=sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0 \ + --hash=sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908 \ + --hash=sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81 \ + --hash=sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc \ + --hash=sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd \ + --hash=sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b \ + --hash=sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019 \ + --hash=sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7 \ + --hash=sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b \ + --hash=sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973 \ + --hash=sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b \ + --hash=sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5 \ + --hash=sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80 \ + --hash=sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef \ + --hash=sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0 \ + --hash=sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b \ + --hash=sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e \ + --hash=sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c \ + --hash=sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2 \ + --hash=sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af \ + --hash=sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4 \ + --hash=sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab \ + --hash=sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82 \ + --hash=sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3 \ + --hash=sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59 \ + --hash=sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da \ + --hash=sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061 \ + --hash=sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085 \ + --hash=sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b \ + --hash=sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263 \ + --hash=sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e \ + --hash=sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829 \ + --hash=sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4 \ + --hash=sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c \ + --hash=sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f \ + --hash=sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095 \ + --hash=sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32 \ + --hash=sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976 \ + --hash=sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822 # via pyjwt cycler==0.12.1 \ --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c # via matplotlib -debugpy==1.8.17 \ - --hash=sha256:045290c010bcd2d82bc97aa2daf6837443cd52f6328592698809b4549babcee1 \ - --hash=sha256:1440fd514e1b815edd5861ca394786f90eb24960eb26d6f7200994333b1d79e3 \ - --hash=sha256:17e456da14848d618662354e1dccfd5e5fb75deec3d1d48dc0aa0baacda55860 \ - --hash=sha256:24693179ef9dfa20dca8605905a42b392be56d410c333af82f1c5dff807a64cc \ - --hash=sha256:3a32c0af575749083d7492dc79f6ab69f21b2d2ad4cd977a958a07d5865316e4 \ - --hash=sha256:3bea3b0b12f3946e098cce9b43c3c46e317b567f79570c3f43f0b96d00788088 \ - --hash=sha256:5c59b74aa5630f3a5194467100c3b3d1c77898f9ab27e3f7dc5d40fc2f122670 \ - --hash=sha256:60c7dca6571efe660ccb7a9508d73ca14b8796c4ed484c2002abba714226cfef \ - --hash=sha256:6a4e9dacf2cbb60d2514ff7b04b4534b0139facbf2abdffe0639ddb6088e59cf \ - --hash=sha256:6c5cd6f009ad4fca8e33e5238210dc1e5f42db07d4b6ab21ac7ffa904a196420 \ - --hash=sha256:857c1dd5d70042502aef1c6d1c2801211f3ea7e56f75e9c335f434afb403e464 \ - --hash=sha256:893cba7bb0f55161de4365584b025f7064e1f88913551bcd23be3260b231429c \ - --hash=sha256:8deb4e31cd575c9f9370042876e078ca118117c1b5e1f22c32befcfbb6955f0c \ - --hash=sha256:a3aad0537cf4d9c1996434be68c6c9a6d233ac6f76c2a482c7803295b4e4f99a \ - --hash=sha256:b13eea5587e44f27f6c48588b5ad56dcb74a4f3a5f89250443c94587f3eb2ea1 \ - --hash=sha256:b532282ad4eca958b1b2d7dbcb2b7218e02cb934165859b918e3b6ba7772d3f4 \ - --hash=sha256:b69b6bd9dba6a03632534cdf67c760625760a215ae289f7489a452af1031fe1f \ - --hash=sha256:b75868b675949a96ab51abc114c7163f40ff0d8f7d6d5fd63f8932fd38e9c6d7 \ - --hash=sha256:bb1bbf92317e1f35afcf3ef0450219efb3afe00be79d8664b250ac0933b9015f \ - --hash=sha256:c41d2ce8bbaddcc0009cc73f65318eedfa3dbc88a8298081deb05389f1ab5542 \ - --hash=sha256:c6bdf134457ae0cac6fb68205776be635d31174eeac9541e1d0c062165c6461f \ - --hash=sha256:d3fce3f0e3de262a3b67e69916d001f3e767661c6e1ee42553009d445d1cd840 \ - --hash=sha256:e34ee844c2f17b18556b5bbe59e1e2ff4e86a00282d2a46edab73fd7f18f4a83 \ - --hash=sha256:e79a195f9e059edfe5d8bf6f3749b2599452d3e9380484cd261f6b7cd2c7c4da \ - --hash=sha256:e851beb536a427b5df8aa7d0c7835b29a13812f41e46292ff80b2ef77327355a \ - --hash=sha256:e8f8f61c518952fb15f74a302e068b48d9c4691768ade433e4adeea961993464 \ - --hash=sha256:eaa85bce251feca8e4c87ce3b954aba84b8c645b90f0e6a515c00394a9f5c0e7 \ - --hash=sha256:f14467edef672195c6f6b8e27ce5005313cb5d03c9239059bc7182b60c176e2d \ - --hash=sha256:f2ac8055a0c4a09b30b931100996ba49ef334c6947e7ae365cdd870416d7513e \ - --hash=sha256:fd723b47a8c08892b1a16b2c6239a8b96637c62a59b94bb5dab4bac592a58a8e +debugpy==1.8.20 \ + --hash=sha256:077a7447589ee9bc1ff0cdf443566d0ecf540ac8aa7333b775ebcb8ce9f4ecad \ + --hash=sha256:0dfd9adb4b3c7005e9c33df430bcdd4e4ebba70be533e0066e3a34d210041b66 \ + --hash=sha256:157e96ffb7f80b3ad36d808646198c90acb46fdcfd8bb1999838f0b6f2b59c64 \ + --hash=sha256:1f7650546e0eded1902d0f6af28f787fa1f1dbdbc97ddabaf1cd963a405930cb \ + --hash=sha256:20d6e64ea177ab6732bffd3ce8fc6fb8879c60484ce14c3b3fe183b1761459ca \ + --hash=sha256:352036a99dd35053b37b7803f748efc456076f929c6a895556932eaf2d23b07f \ + --hash=sha256:3ca85463f63b5dd0aa7aaa933d97cbc47c174896dcae8431695872969f981893 \ + --hash=sha256:4057ac68f892064e5f98209ab582abfee3b543fb55d2e87610ddc133a954d390 \ + --hash=sha256:4ae3135e2089905a916909ef31922b2d733d756f66d87345b3e5e52b7a55f13d \ + --hash=sha256:55bc8701714969f1ab89a6d5f2f3d40c36f91b2cbe2f65d98bf8196f6a6a2c33 \ + --hash=sha256:5be9bed9ae3be00665a06acaa48f8329d2b9632f15fd09f6a9a8c8d9907e54d7 \ + --hash=sha256:5dff4bb27027821fdfcc9e8f87309a28988231165147c31730128b1c983e282a \ + --hash=sha256:60f89411a6c6afb89f18e72e9091c3dfbcfe3edc1066b2043a1f80a3bbb3e11f \ + --hash=sha256:70ad9ae09b98ac307b82c16c151d27ee9d68ae007a2e7843ba621b5ce65333b5 \ + --hash=sha256:760813b4fff517c75bfe7923033c107104e76acfef7bda011ffea8736e9a66f8 \ + --hash=sha256:773e839380cf459caf73cc533ea45ec2737a5cc184cf1b3b796cd4fd98504fec \ + --hash=sha256:7de0b7dfeedc504421032afba845ae2a7bcc32ddfb07dae2c3ca5442f821c344 \ + --hash=sha256:84562982dd7cf5ebebfdea667ca20a064e096099997b175fe204e86817f64eaf \ + --hash=sha256:88f47850a4284b88bd2bfee1f26132147d5d504e4e86c22485dfa44b97e19b4b \ + --hash=sha256:9c74df62fc064cd5e5eaca1353a3ef5a5d50da5eb8058fcef63106f7bebe6173 \ + --hash=sha256:9eeed9f953f9a23850c85d440bf51e3c56ed5d25f8560eeb29add815bd32f7ee \ + --hash=sha256:a1a8f851e7cf171330679ef6997e9c579ef6dd33c9098458bd9986a0f4ca52e3 \ + --hash=sha256:a98eec61135465b062846112e5ecf2eebb855305acc1dfbae43b72903b8ab5be \ + --hash=sha256:b773eb026a043e4d9c76265742bc846f2f347da7e27edf7fe97716ea19d6bfc5 \ + --hash=sha256:bff8990f040dacb4c314864da95f7168c5a58a30a66e0eea0fb85e2586a92cd6 \ + --hash=sha256:c1178ae571aff42e61801a38b007af504ec8e05fde1c5c12e5a7efef21009642 \ + --hash=sha256:c29dd9d656c0fbd77906a6e6a82ae4881514aa3294b94c903ff99303e789b4a2 \ + --hash=sha256:da11dea6447b2cadbf8ce2bec59ecea87cc18d2c574980f643f2d2dfe4862393 \ + --hash=sha256:eada6042ad88fa1571b74bd5402ee8b86eded7a8f7b827849761700aff171f1b \ + --hash=sha256:eb506e45943cab2efb7c6eafdd65b842f3ae779f020c82221f55aca9de135ed7 # via -r src/requirements.in docutils==0.21.2 \ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ @@ -436,73 +431,65 @@ esbonio==0.16.5 \ --hash=sha256:04ba926e3603f7b1fde1abc690b47afd60749b64b1029b6bce8e1de0bb284921 \ --hash=sha256:acab2e16c6cf8f7232fb04e0d48514ce50566516b1f6fcf669ccf2f247e8b10f # via -r src/requirements.in -fonttools==4.60.1 \ - --hash=sha256:022beaea4b73a70295b688f817ddc24ed3e3418b5036ffcd5658141184ef0d0c \ - --hash=sha256:026290e4ec76583881763fac284aca67365e0be9f13a7fb137257096114cb3bc \ - --hash=sha256:0b0835ed15dd5b40d726bb61c846a688f5b4ce2208ec68779bc81860adb5851a \ - --hash=sha256:0eae96373e4b7c9e45d099d7a523444e3554360927225c1cdae221a58a45b856 \ - --hash=sha256:122e1a8ada290423c493491d002f622b1992b1ab0b488c68e31c413390dc7eb2 \ - --hash=sha256:1410155d0e764a4615774e5c2c6fc516259fe3eca5882f034eb9bfdbee056259 \ - --hash=sha256:145daa14bf24824b677b9357c5e44fd8895c2a8f53596e1b9ea3496081dc692c \ - --hash=sha256:1525796c3ffe27bb6268ed2a1bb0dcf214d561dfaf04728abf01489eb5339dce \ - --hash=sha256:154cb6ee417e417bf5f7c42fe25858c9140c26f647c7347c06f0cc2d47eff003 \ - --hash=sha256:2299df884c11162617a66b7c316957d74a18e3758c0274762d2cc87df7bc0272 \ - --hash=sha256:2409d5fb7b55fd70f715e6d34e7a6e4f7511b8ad29a49d6df225ee76da76dd77 \ - --hash=sha256:268ecda8ca6cb5c4f044b1fb9b3b376e8cd1b361cef275082429dc4174907038 \ - --hash=sha256:282dafa55f9659e8999110bd8ed422ebe1c8aecd0dc396550b038e6c9a08b8ea \ - --hash=sha256:2ee06fc57512144d8b0445194c2da9f190f61ad51e230f14836286470c99f854 \ - --hash=sha256:3630e86c484263eaac71d117085d509cbcf7b18f677906824e4bace598fb70d2 \ - --hash=sha256:398447f3d8c0c786cbf1209711e79080a40761eb44b27cdafffb48f52bcec258 \ - --hash=sha256:4ba4bd646e86de16160f0fb72e31c3b9b7d0721c3e5b26b9fa2fc931dfdb2652 \ - --hash=sha256:5664fd1a9ea7f244487ac8f10340c4e37664675e8667d6fee420766e0fb3cf08 \ - --hash=sha256:583b7f8e3c49486e4d489ad1deacfb8d5be54a8ef34d6df824f6a171f8511d99 \ - --hash=sha256:596ecaca36367027d525b3b426d8a8208169d09edcf8c7506aceb3a38bfb55c7 \ - --hash=sha256:5c1015318e4fec75dd4943ad5f6a206d9727adf97410d58b7e32ab644a807914 \ - --hash=sha256:66929e2ea2810c6533a5184f938502cfdaea4bc3efb7130d8cc02e1c1b4108d6 \ - --hash=sha256:6ec722ee589e89a89f5b7574f5c45604030aa6ae24cb2c751e2707193b466fed \ - --hash=sha256:6f68576bb4bbf6060c7ab047b1574a1ebe5c50a17de62830079967b211059ebb \ - --hash=sha256:7473a8ed9ed09aeaa191301244a5a9dbe46fe0bf54f9d6cd21d83044c3321217 \ - --hash=sha256:7b0c6d57ab00dae9529f3faf187f2254ea0aa1e04215cf2f1a8ec277c96661bc \ - --hash=sha256:7b4c32e232a71f63a5d00259ca3d88345ce2a43295bb049d21061f338124246f \ - --hash=sha256:8177ec9676ea6e1793c8a084a90b65a9f778771998eb919d05db6d4b1c0b114c \ - --hash=sha256:839565cbf14645952d933853e8ade66a463684ed6ed6c9345d0faf1f0e868877 \ - --hash=sha256:875cb7764708b3132637f6c5fb385b16eeba0f7ac9fa45a69d35e09b47045801 \ - --hash=sha256:8a44788d9d91df72d1a5eac49b31aeb887a5f4aab761b4cffc4196c74907ea85 \ - --hash=sha256:8b4eb332f9501cb1cd3d4d099374a1e1306783ff95489a1026bde9eb02ccc34a \ - --hash=sha256:906306ac7afe2156fcf0042173d6ebbb05416af70f6b370967b47f8f00103bbb \ - --hash=sha256:992775c9fbe2cf794786fa0ffca7f09f564ba3499b8fe9f2f80bd7197db60383 \ - --hash=sha256:996a4d1834524adbb423385d5a629b868ef9d774670856c63c9a0408a3063401 \ - --hash=sha256:9a52f254ce051e196b8fe2af4634c2d2f02c981756c6464dc192f1b6050b4e28 \ - --hash=sha256:9d0ced62b59e0430b3690dbc5373df1c2aa7585e9a8ce38eff87f0fd993c5b01 \ - --hash=sha256:a140761c4ff63d0cb9256ac752f230460ee225ccef4ad8f68affc723c88e2036 \ - --hash=sha256:a184b2ea57b13680ab6d5fbde99ccef152c95c06746cb7718c583abd8f945ccc \ - --hash=sha256:a3db56f153bd4c5c2b619ab02c5db5192e222150ce5a1bc10f16164714bc39ac \ - --hash=sha256:a46b2f450bc79e06ef3b6394f0c68660529ed51692606ad7f953fc2e448bc903 \ - --hash=sha256:a884aef09d45ba1206712c7dbda5829562d3fea7726935d3289d343232ecb0d3 \ - --hash=sha256:b2cf105cee600d2de04ca3cfa1f74f1127f8455b71dbad02b9da6ec266e116d6 \ - --hash=sha256:b33a7884fabd72bdf5f910d0cf46be50dce86a0362a65cfc746a4168c67eb96c \ - --hash=sha256:b42d86938e8dda1cd9a1a87a6d82f1818eaf933348429653559a458d027446da \ - --hash=sha256:b6379e7546ba4ae4b18f8ae2b9bc5960936007a1c0e30b342f662577e8bc3299 \ - --hash=sha256:c7420a2696a44650120cdd269a5d2e56a477e2bfa9d95e86229059beb1c19e15 \ - --hash=sha256:c8651e0d4b3bdeda6602b85fdc2abbefc1b41e573ecb37b6779c4ca50753a199 \ - --hash=sha256:d066ea419f719ed87bc2c99a4a4bfd77c2e5949cb724588b9dd58f3fd90b92bf \ - --hash=sha256:e6c58beb17380f7c2ea181ea11e7db8c0ceb474c9dd45f48e71e2cb577d146a1 \ - --hash=sha256:e852d9dda9f93ad3651ae1e3bb770eac544ec93c3807888798eccddf84596537 \ - --hash=sha256:ec3681a0cb34c255d76dd9d865a55f260164adb9fa02628415cdc2d43ee2c05d \ - --hash=sha256:ee0c0b3b35b34f782afc673d503167157094a16f442ace7c6c5e0ca80b08f50c \ - --hash=sha256:eedacb5c5d22b7097482fa834bda0dafa3d914a4e829ec83cdea2a01f8c813c4 \ - --hash=sha256:ef00af0439ebfee806b25f24c8f92109157ff3fac5731dc7867957812e87b8d9 \ - --hash=sha256:f0e8817c7d1a0c2eedebf57ef9a9896f3ea23324769a9a2061a80fe8852705ed \ - --hash=sha256:f3d5be054c461d6a2268831f04091dc82753176f6ea06dc6047a5e168265a987 \ - --hash=sha256:f4b5c37a5f40e4d733d3bbaaef082149bee5a5ea3156a785ff64d949bd1353fa +fonttools==4.61.1 \ + --hash=sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87 \ + --hash=sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796 \ + --hash=sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75 \ + --hash=sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d \ + --hash=sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371 \ + --hash=sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b \ + --hash=sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b \ + --hash=sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2 \ + --hash=sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3 \ + --hash=sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9 \ + --hash=sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd \ + --hash=sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c \ + --hash=sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c \ + --hash=sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56 \ + --hash=sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37 \ + --hash=sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0 \ + --hash=sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958 \ + --hash=sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5 \ + --hash=sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118 \ + --hash=sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69 \ + --hash=sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9 \ + --hash=sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261 \ + --hash=sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb \ + --hash=sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47 \ + --hash=sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24 \ + --hash=sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c \ + --hash=sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba \ + --hash=sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c \ + --hash=sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91 \ + --hash=sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1 \ + --hash=sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19 \ + --hash=sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6 \ + --hash=sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5 \ + --hash=sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2 \ + --hash=sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d \ + --hash=sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881 \ + --hash=sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063 \ + --hash=sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7 \ + --hash=sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09 \ + --hash=sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da \ + --hash=sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e \ + --hash=sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e \ + --hash=sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8 \ + --hash=sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa \ + --hash=sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6 \ + --hash=sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e \ + --hash=sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a \ + --hash=sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c \ + --hash=sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7 \ + --hash=sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd # via matplotlib gitdb==4.0.12 \ --hash=sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571 \ --hash=sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf # via gitpython -gitpython==3.1.45 \ - --hash=sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c \ - --hash=sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77 +gitpython==3.1.46 \ + --hash=sha256:400124c7d0ef4ea03f7310ac2fbf7151e09ff97f2a3288d64a440c584a29c37f \ + --hash=sha256:79812ed143d9d25b6d176a10bb511de0f9c67b1fa641d82097b0ab90398a2058 # via sphinx-collections h11==0.16.0 \ --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \ @@ -531,6 +518,7 @@ jinja2==3.1.6 \ # myst-parser # sphinx # sphinx-collections + # sphinxcontrib-mermaid jsonschema-rs==0.37.4 \ --hash=sha256:03b34f911e99343fc388651688683010daee538a3cf8cf86a7997bca28fdf16b \ --hash=sha256:0f17a61deb557faa57dffb9596e4f022873404f935114367788b1eebdec2bb00 \ @@ -652,9 +640,9 @@ lsprotocol==2023.0.1 \ --hash=sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2 \ --hash=sha256:cc5c15130d2403c18b734304339e51242d3018a05c4f7d0f198ad6e0cd21861d # via pygls -markdown-it-py==3.0.0 \ - --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ - --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb +markdown-it-py==4.0.0 \ + --hash=sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147 \ + --hash=sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3 # via # mdit-py-plugins # myst-parser @@ -750,62 +738,62 @@ markupsafe==3.0.3 \ --hash=sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a \ --hash=sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50 # via jinja2 -matplotlib==3.10.7 \ - --hash=sha256:07124afcf7a6504eafcb8ce94091c5898bbdd351519a1beb5c45f7a38c67e77f \ - --hash=sha256:09d7945a70ea43bf9248f4b6582734c2fe726723204a76eca233f24cffc7ef67 \ - --hash=sha256:0d8c32b7ea6fb80b1aeff5a2ceb3fb9778e2759e899d9beff75584714afcc5ee \ - --hash=sha256:11ae579ac83cdf3fb72573bb89f70e0534de05266728740d478f0f818983c695 \ - --hash=sha256:15112bcbaef211bd663fa935ec33313b948e214454d949b723998a43357b17b0 \ - --hash=sha256:1d9d3713a237970569156cfb4de7533b7c4eacdd61789726f444f96a0d28f57f \ - --hash=sha256:1e4bbad66c177a8fdfa53972e5ef8be72a5f27e6a607cec0d8579abd0f3102b1 \ - --hash=sha256:2222c7ba2cbde7fe63032769f6eb7e83ab3227f47d997a8453377709b7fe3a5a \ - --hash=sha256:22df30ffaa89f6643206cf13877191c63a50e8f800b038bc39bee9d2d4957632 \ - --hash=sha256:31963603041634ce1a96053047b40961f7a29eb8f9a62e80cc2c0427aa1d22a2 \ - --hash=sha256:37a1fea41153dd6ee061d21ab69c9cf2cf543160b1b85d89cd3d2e2a7902ca4c \ - --hash=sha256:3886e47f64611046bc1db523a09dd0a0a6bed6081e6f90e13806dd1d1d1b5e91 \ - --hash=sha256:4645fc5d9d20ffa3a39361fcdbcec731382763b623b72627806bf251b6388866 \ - --hash=sha256:4a11c2e9e72e7de09b7b72e62f3df23317c888299c875e2b778abf1eda8c0a42 \ - --hash=sha256:4a74f79fafb2e177f240579bc83f0b60f82cc47d2f1d260f422a0627207008ca \ - --hash=sha256:4c14b6acd16cddc3569a2d515cfdd81c7a68ac5639b76548cfc1a9e48b20eb65 \ - --hash=sha256:53b492410a6cd66c7a471de6c924f6ede976e963c0f3097a3b7abfadddc67d0a \ - --hash=sha256:53cc80662dd197ece414dd5b66e07370201515a3eaf52e7c518c68c16814773b \ - --hash=sha256:5c09cf8f2793f81368f49f118b6f9f937456362bee282eac575cca7f84cda537 \ - --hash=sha256:5e38c2d581d62ee729a6e144c47a71b3f42fb4187508dbbf4fe71d5612c3433b \ - --hash=sha256:5f3f6d315dcc176ba7ca6e74c7768fb7e4cf566c49cb143f6bc257b62e634ed8 \ - --hash=sha256:6516ce375109c60ceec579e699524e9d504cd7578506f01150f7a6bc174a775e \ - --hash=sha256:667ecd5d8d37813a845053d8f5bf110b534c3c9f30e69ebd25d4701385935a6d \ - --hash=sha256:6f1851eab59ca082c95df5a500106bad73672645625e04538b3ad0f69471ffcc \ - --hash=sha256:702590829c30aada1e8cef0568ddbffa77ca747b4d6e36c6d173f66e301f89cc \ - --hash=sha256:7146d64f561498764561e9cd0ed64fcf582e570fc519e6f521e2d0cfd43365e1 \ - --hash=sha256:744991e0cc863dd669c8dc9136ca4e6e0082be2070b9d793cbd64bec872a6815 \ - --hash=sha256:786656bb13c237bbcebcd402f65f44dd61ead60ee3deb045af429d889c8dbc67 \ - --hash=sha256:7a0edb7209e21840e8361e91ea84ea676658aa93edd5f8762793dec77a4a6748 \ - --hash=sha256:7ac81eee3b7c266dd92cee1cd658407b16c57eed08c7421fa354ed68234de380 \ - --hash=sha256:90ad854c0a435da3104c01e2c6f0028d7e719b690998a2333d7218db80950722 \ - --hash=sha256:9257be2f2a03415f9105c486d304a321168e61ad450f6153d77c69504ad764bb \ - --hash=sha256:932c55d1fa7af4423422cb6a492a31cbcbdbe68fd1a9a3f545aa5e7a143b5355 \ - --hash=sha256:a06ba7e2a2ef9131c79c49e63dad355d2d878413a0376c1727c8b9335ff731c7 \ - --hash=sha256:aebed7b50aa6ac698c90f60f854b47e48cd2252b30510e7a1feddaf5a3f72cbf \ - --hash=sha256:b172db79759f5f9bc13ef1c3ef8b9ee7b37b0247f987fbbbdaa15e4f87fd46a9 \ - --hash=sha256:b3c4ea4948d93c9c29dc01c0c23eef66f2101bf75158c291b88de6525c55c3d1 \ - --hash=sha256:b498e9e4022f93de2d5a37615200ca01297ceebbb56fe4c833f46862a490f9e3 \ - --hash=sha256:b4d41379b05528091f00e1728004f9a8d7191260f3862178b88e8fd770206318 \ - --hash=sha256:b69676845a0a66f9da30e87f48be36734d6748024b525ec4710be40194282c84 \ - --hash=sha256:c17398b709a6cce3d9fdb1595c33e356d91c098cd9486cb2cc21ea2ea418e715 \ - --hash=sha256:c380371d3c23e0eadf8ebff114445b9f970aff2010198d498d4ab4c3b41eea4f \ - --hash=sha256:cb783436e47fcf82064baca52ce748af71725d0352e1d31564cbe9c95df92b9c \ - --hash=sha256:cc1c51b846aca49a5a8b44fbba6a92d583a35c64590ad9e1e950dc88940a4297 \ - --hash=sha256:d0b181e9fa8daf1d9f2d4c547527b167cb8838fc587deabca7b5c01f97199e84 \ - --hash=sha256:d2a959c640cdeecdd2ec3136e8ea0441da59bcaf58d67e9c590740addba2cb68 \ - --hash=sha256:d5f256d49fea31f40f166a5e3131235a5d2f4b7f44520b1cf0baf1ce568ccff0 \ - --hash=sha256:d883460c43e8c6b173fef244a2341f7f7c0e9725c7fe68306e8e44ed9c8fb100 \ - --hash=sha256:d8eb7194b084b12feb19142262165832fc6ee879b945491d1c3d4660748020c4 \ - --hash=sha256:d9749313deb729f08207718d29c86246beb2ea3fdba753595b55901dee5d2fd6 \ - --hash=sha256:de66744b2bb88d5cd27e80dfc2ec9f0517d0a46d204ff98fe9e5f2864eb67657 \ - --hash=sha256:e91f61a064c92c307c5a9dc8c05dc9f8a68f0a3be199d9a002a0622e13f874a1 \ - --hash=sha256:f19410b486fdd139885ace124e57f938c1e6a3210ea13dd29cab58f5d4bc12c7 \ - --hash=sha256:f79d5de970fc90cd5591f60053aecfce1fcd736e0303d9f0bf86be649fa68fb8 \ - --hash=sha256:fba2974df0bf8ce3c995fa84b79cde38326e0f7b5409e7a3a481c1141340bcf7 +matplotlib==3.10.8 \ + --hash=sha256:00270d217d6b20d14b584c521f810d60c5c78406dc289859776550df837dcda7 \ + --hash=sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a \ + --hash=sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f \ + --hash=sha256:12d90df9183093fcd479f4172ac26b322b1248b15729cb57f42f71f24c7e37a3 \ + --hash=sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5 \ + --hash=sha256:18821ace09c763ec93aef5eeff087ee493a24051936d7b9ebcad9662f66501f9 \ + --hash=sha256:1ae029229a57cd1e8fe542485f27e7ca7b23aa9e8944ddb4985d0bc444f1eca2 \ + --hash=sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3 \ + --hash=sha256:238b7ce5717600615c895050239ec955d91f321c209dd110db988500558e70d6 \ + --hash=sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f \ + --hash=sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b \ + --hash=sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8 \ + --hash=sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008 \ + --hash=sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b \ + --hash=sha256:37b3c1cc42aa184b3f738cfa18c1c1d72fd496d85467a6cf7b807936d39aa656 \ + --hash=sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958 \ + --hash=sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04 \ + --hash=sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b \ + --hash=sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6 \ + --hash=sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908 \ + --hash=sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c \ + --hash=sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1 \ + --hash=sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d \ + --hash=sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1 \ + --hash=sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c \ + --hash=sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a \ + --hash=sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce \ + --hash=sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a \ + --hash=sha256:6be43b667360fef5c754dda5d25a32e6307a03c204f3c0fc5468b78fa87b4160 \ + --hash=sha256:6da7c2ce169267d0d066adcf63758f0604aa6c3eebf67458930f9d9b79ad1db1 \ + --hash=sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11 \ + --hash=sha256:9153c3292705be9f9c64498a8872118540c3f4123d1a1c840172edf262c8be4a \ + --hash=sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466 \ + --hash=sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486 \ + --hash=sha256:a2b336e2d91a3d7006864e0990c83b216fcdca64b5a6484912902cef87313d78 \ + --hash=sha256:a48f2b74020919552ea25d222d5cc6af9ca3f4eb43a93e14d068457f545c2a17 \ + --hash=sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077 \ + --hash=sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565 \ + --hash=sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f \ + --hash=sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50 \ + --hash=sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58 \ + --hash=sha256:bab485bcf8b1c7d2060b4fcb6fc368a9e6f4cd754c9c2fea281f4be21df394a2 \ + --hash=sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645 \ + --hash=sha256:d56a1efd5bfd61486c8bc968fa18734464556f0fb8e51690f4ac25d85cbbbbc2 \ + --hash=sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39 \ + --hash=sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf \ + --hash=sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149 \ + --hash=sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22 \ + --hash=sha256:ee40c27c795bda6a5292e9cff9890189d32f7e3a0bf04e0e3c9430c4a00c37df \ + --hash=sha256:efb30e3baaea72ce5928e32bab719ab4770099079d66726a62b11b1ef7273be4 \ + --hash=sha256:f254d118d14a7f99d616271d6c3c27922c092dac11112670b157798b89bf4933 \ + --hash=sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6 \ + --hash=sha256:f97aeb209c3d2511443f8797e3e5a569aebb040d4f8bc79aa3ee78a8fb9e3dd8 \ + --hash=sha256:f9b587c9c7274c1613a30afabf65a272114cd6cdbe67b3406f818c79d7ab2e2a \ + --hash=sha256:fb061f596dad3a0f52b60dc6a5dec4a0c300dec41e058a7efe09256188d170b7 # via sphinx-needs mdit-py-plugins==0.5.0 \ --hash=sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f \ @@ -815,9 +803,9 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -myst-parser==4.0.1 \ - --hash=sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4 \ - --hash=sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d +myst-parser==5.0.0 \ + --hash=sha256:ab31e516024918296e169139072b81592336f2fef55b8986aa31c9f04b5f7211 \ + --hash=sha256:f6f231452c56e8baa662cc352c548158f6a16fcbd6e3800fc594978002b94f3a # via -r src/requirements.in needs-config-writer==0.2.4 \ --hash=sha256:0f0702574081bb8ed7d896aadfb73c0e48af099dc0d4227cc2bac957ed8ea4f6 \ @@ -836,81 +824,79 @@ nodejs-wheel-binaries==24.11.1 \ # via # -r /external/score_tooling+/python_basics/requirements.txt # basedpyright -numpy==2.3.5 \ - --hash=sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b \ - --hash=sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae \ - --hash=sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3 \ - --hash=sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0 \ - --hash=sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b \ - --hash=sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa \ - --hash=sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28 \ - --hash=sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e \ - --hash=sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017 \ - --hash=sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41 \ - --hash=sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e \ - --hash=sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63 \ - --hash=sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9 \ - --hash=sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8 \ - --hash=sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff \ - --hash=sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7 \ - --hash=sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139 \ - --hash=sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4 \ - --hash=sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748 \ - --hash=sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952 \ - --hash=sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd \ - --hash=sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b \ - --hash=sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce \ - --hash=sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f \ - --hash=sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5 \ - --hash=sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42 \ - --hash=sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7 \ - --hash=sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248 \ - --hash=sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e \ - --hash=sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3 \ - --hash=sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b \ - --hash=sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e \ - --hash=sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0 \ - --hash=sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa \ - --hash=sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a \ - --hash=sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5 \ - --hash=sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d \ - --hash=sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4 \ - --hash=sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c \ - --hash=sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52 \ - --hash=sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5 \ - --hash=sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d \ - --hash=sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1 \ - --hash=sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c \ - --hash=sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18 \ - --hash=sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7 \ - --hash=sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188 \ - --hash=sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218 \ - --hash=sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2 \ - --hash=sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903 \ - --hash=sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c \ - --hash=sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c \ - --hash=sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234 \ - --hash=sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82 \ - --hash=sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39 \ - --hash=sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf \ - --hash=sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20 \ - --hash=sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946 \ - --hash=sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0 \ - --hash=sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9 \ - --hash=sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff \ - --hash=sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad \ - --hash=sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227 \ - --hash=sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10 \ - --hash=sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e \ - --hash=sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf \ - --hash=sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769 \ - --hash=sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310 \ - --hash=sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425 \ - --hash=sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013 \ - --hash=sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c \ - --hash=sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb \ - --hash=sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d \ - --hash=sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520 +numpy==2.4.2 \ + --hash=sha256:00ab83c56211a1d7c07c25e3217ea6695e50a3e2f255053686b081dc0b091a82 \ + --hash=sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75 \ + --hash=sha256:0f01dcf33e73d80bd8dc0f20a71303abbafa26a19e23f6b68d1aa9990af90257 \ + --hash=sha256:0fece1d1f0a89c16b03442eae5c56dc0be0c7883b5d388e0c03f53019a4bfd71 \ + --hash=sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a \ + --hash=sha256:1ae241bbfc6ae276f94a170b14785e561cb5e7f626b6688cf076af4110887413 \ + --hash=sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181 \ + --hash=sha256:209fae046e62d0ce6435fcfe3b1a10537e858249b3d9b05829e2a05218296a85 \ + --hash=sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef \ + --hash=sha256:21982668592194c609de53ba4933a7471880ccbaadcc52352694a59ecc860b3a \ + --hash=sha256:25f2059807faea4b077a2b6837391b5d830864b3543627f381821c646f31a63c \ + --hash=sha256:2653de5c24910e49c2b106499803124dde62a5a1fe0eedeaecf4309a5f639390 \ + --hash=sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e \ + --hash=sha256:2fb882da679409066b4603579619341c6d6898fc83a8995199d5249f986e8e8f \ + --hash=sha256:40397bda92382fcec844066efb11f13e1c9a3e2a8e8f318fb72ed8b6db9f60f1 \ + --hash=sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b \ + --hash=sha256:47c5a6ed21d9452b10227e5e8a0e1c22979811cad7dcc19d8e3e2fb8fa03f1a3 \ + --hash=sha256:4f069069931240b3fc703f1e23df63443dbd6390614c8c44a87d96cd0ec81eb1 \ + --hash=sha256:52b913ec40ff7ae845687b0b34d8d93b60cb66dcee06996dd5c99f2fc9328657 \ + --hash=sha256:5633c0da313330fd20c484c78cdd3f9b175b55e1a766c4a174230c6b70ad8262 \ + --hash=sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a \ + --hash=sha256:5eea80d908b2c1f91486eb95b3fb6fab187e569ec9752ab7d9333d2e66bf2d6b \ + --hash=sha256:602f65afdef699cda27ec0b9224ae5dc43e328f4c24c689deaf77133dbee74d0 \ + --hash=sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae \ + --hash=sha256:66cb9422236317f9d44b67b4d18f44efe6e9c7f8794ac0462978513359461554 \ + --hash=sha256:6d82351358ffbcdcd7b686b90742a9b86632d6c1c051016484fa0b326a0a1548 \ + --hash=sha256:6e9f61981ace1360e42737e2bae58b27bf28a1b27e781721047d84bd754d32e7 \ + --hash=sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05 \ + --hash=sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1 \ + --hash=sha256:7df2de1e4fba69a51c06c28f5a3de36731eb9639feb8e1cf7e4a7b0daf4cf622 \ + --hash=sha256:7edc794af8b36ca37ef5fcb5e0d128c7e0595c7b96a2318d1badb6fcd8ee86b1 \ + --hash=sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a \ + --hash=sha256:805cc8de9fd6e7a22da5aed858e0ab16be5a4db6c873dde1d7451c541553aa27 \ + --hash=sha256:8906e71fd8afcb76580404e2a950caef2685df3d2a57fe82a86ac8d33cc007ba \ + --hash=sha256:89f7268c009bc492f506abd6f5265defa7cb3f7487dc21d357c3d290add45082 \ + --hash=sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443 \ + --hash=sha256:8e4549f8a3c6d13d55041925e912bfd834285ef1dd64d6bc7d542583355e2e98 \ + --hash=sha256:8e9afaeb0beff068b4d9cd20d322ba0ee1cecfb0b08db145e4ab4dd44a6b5110 \ + --hash=sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308 \ + --hash=sha256:9e35d3e0144137d9fdae62912e869136164534d64a169f86438bc9561b6ad49f \ + --hash=sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5 \ + --hash=sha256:adb6ed2ad29b9e15321d167d152ee909ec73395901b70936f029c3bc6d7f4460 \ + --hash=sha256:aea4f66ff44dfddf8c2cffd66ba6538c5ec67d389285292fe428cb2c738c8aef \ + --hash=sha256:b21041e8cb6a1eb5312dd1d2f80a94d91efffb7a06b70597d44f1bd2dfc315ab \ + --hash=sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909 \ + --hash=sha256:b3a24467af63c67829bfaa61eecf18d5432d4f11992688537be59ecd6ad32f5e \ + --hash=sha256:b9c618d56a29c9cb1c4da979e9899be7578d2e0b3c24d52079c166324c9e8695 \ + --hash=sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325 \ + --hash=sha256:bd3a7a9f5847d2fb8c2c6d1c862fa109c31a9abeca1a3c2bd5a64572955b2979 \ + --hash=sha256:be71bf1edb48ebbbf7f6337b5bfd2f895d1902f6335a5830b20141fc126ffba0 \ + --hash=sha256:c02ef4401a506fb60b411467ad501e1429a3487abca4664871d9ae0b46c8ba32 \ + --hash=sha256:c3cd545784805de05aafe1dde61752ea49a359ccba9760c1e5d1c88a93bbf2b7 \ + --hash=sha256:c7ac672d699bf36275c035e16b65539931347d68b70667d28984c9fb34e07fa7 \ + --hash=sha256:cb7bbb88aa74908950d979eeaa24dbdf1a865e3c7e45ff0121d8f70387b55f73 \ + --hash=sha256:cd2bd2bbed13e213d6b55dc1d035a4f91748a7d3edc9480c13898b0353708920 \ + --hash=sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74 \ + --hash=sha256:cf28c0c1d4c4bf00f509fa7eb02c58d7caf221b50b467bcb0d9bbf1584d5c821 \ + --hash=sha256:d0d9b7c93578baafcbc5f0b83eaf17b79d345c6f36917ba0c67f45226911d499 \ + --hash=sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000 \ + --hash=sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a \ + --hash=sha256:d9f64d786b3b1dd742c946c42d15b07497ed14af1a1f3ce840cce27daa0ce913 \ + --hash=sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8 \ + --hash=sha256:df1b10187212b198dd45fa943d8985a3c8cf854aed4923796e0e019e113a1bda \ + --hash=sha256:e04ae107ac591763a47398bb45b568fc38f02dbc4aa44c063f67a131f99346cb \ + --hash=sha256:e6dee3bb76aa4009d5a912180bf5b2de012532998d094acee25d9cb8dee3e44a \ + --hash=sha256:e7e88598032542bd49af7c4747541422884219056c268823ef6e5e89851c8825 \ + --hash=sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d \ + --hash=sha256:ec055f6dae239a6299cace477b479cca2fc125c5675482daf1dd886933a1076f \ + --hash=sha256:f74f0f7779cc7ae07d1810aab8ac6b1464c3eafb9e283a40da7309d5e6e48fbb \ + --hash=sha256:fbde1b0c6e81d56f5dccd95dd4a711d9b95df1ae4009a60887e56b27e8d903fa \ + --hash=sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236 \ + --hash=sha256:fd49860271d52127d61197bb50b64f58454e9f578cb4b2c001a6de8b1f50b0b1 # via # contourpy # matplotlib @@ -923,102 +909,102 @@ packaging==25.0 \ # pytest # sphinx # sphinx-collections -pillow==12.0.0 \ - --hash=sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643 \ - --hash=sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e \ - --hash=sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e \ - --hash=sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc \ - --hash=sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642 \ - --hash=sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6 \ - --hash=sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1 \ - --hash=sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b \ - --hash=sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399 \ - --hash=sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba \ - --hash=sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad \ - --hash=sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47 \ - --hash=sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739 \ - --hash=sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b \ - --hash=sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f \ - --hash=sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10 \ - --hash=sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52 \ - --hash=sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d \ - --hash=sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b \ - --hash=sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a \ - --hash=sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9 \ - --hash=sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d \ - --hash=sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098 \ - --hash=sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905 \ - --hash=sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b \ - --hash=sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3 \ - --hash=sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371 \ - --hash=sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953 \ - --hash=sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01 \ - --hash=sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca \ - --hash=sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e \ - --hash=sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7 \ - --hash=sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27 \ - --hash=sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082 \ - --hash=sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e \ - --hash=sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d \ - --hash=sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8 \ - --hash=sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a \ - --hash=sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad \ - --hash=sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3 \ - --hash=sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a \ - --hash=sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d \ - --hash=sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353 \ - --hash=sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee \ - --hash=sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b \ - --hash=sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b \ - --hash=sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a \ - --hash=sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7 \ - --hash=sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef \ - --hash=sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a \ - --hash=sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a \ - --hash=sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257 \ - --hash=sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07 \ - --hash=sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4 \ - --hash=sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c \ - --hash=sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c \ - --hash=sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4 \ - --hash=sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe \ - --hash=sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8 \ - --hash=sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5 \ - --hash=sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6 \ - --hash=sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e \ - --hash=sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8 \ - --hash=sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e \ - --hash=sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275 \ - --hash=sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3 \ - --hash=sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76 \ - --hash=sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227 \ - --hash=sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9 \ - --hash=sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5 \ - --hash=sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79 \ - --hash=sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca \ - --hash=sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa \ - --hash=sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b \ - --hash=sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e \ - --hash=sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197 \ - --hash=sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab \ - --hash=sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79 \ - --hash=sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2 \ - --hash=sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363 \ - --hash=sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0 \ - --hash=sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e \ - --hash=sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782 \ - --hash=sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925 \ - --hash=sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0 \ - --hash=sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b \ - --hash=sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced \ - --hash=sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c \ - --hash=sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344 \ - --hash=sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9 \ - --hash=sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1 +pillow==12.1.0 \ + --hash=sha256:00162e9ca6d22b7c3ee8e61faa3c3253cd19b6a37f126cad04f2f88b306f557d \ + --hash=sha256:079af2fb0c599c2ec144ba2c02766d1b55498e373b3ac64687e43849fbbef5bc \ + --hash=sha256:0b022eaaf709541b391ee069f0022ee5b36c709df71986e3f7be312e46f42c84 \ + --hash=sha256:0c27407a2d1b96774cbc4a7594129cc027339fd800cd081e44497722ea1179de \ + --hash=sha256:0ddedfaa8b5f0b4ffbc2fa87b556dc59f6bb4ecb14a53b33f9189713ae8053c0 \ + --hash=sha256:0deedf2ea233722476b3a81e8cdfbad786f7adbed5d848469fa59fe52396e4ef \ + --hash=sha256:0ed07dca4a8464bada6139ab38f5382f83e5f111698caf3191cb8dbf27d908b4 \ + --hash=sha256:0fde7ec5538ab5095cc02df38ee99b0443ff0e1c847a045554cf5f9af1f4aa82 \ + --hash=sha256:15c794d74303828eaa957ff8070846d0efe8c630901a1c753fdc63850e19ecd9 \ + --hash=sha256:1a949604f73eb07a8adab38c4fe50791f9919344398bdc8ac6b307f755fc7030 \ + --hash=sha256:1f345e7bc9d7f368887c712aa5054558bad44d2a301ddf9248599f4161abc7c0 \ + --hash=sha256:1fcc52d86ce7a34fd17cb04e87cfdb164648a3662a6f20565910a99653d66c18 \ + --hash=sha256:21e686a21078b0f9cb8c8a961d99e6a4ddb88e0fc5ea6e130172ddddc2e5221a \ + --hash=sha256:2415373395a831f53933c23ce051021e79c8cd7979822d8cc478547a3f4da8ef \ + --hash=sha256:277518bf4fe74aa91489e1b20577473b19ee70fb97c374aa50830b279f25841b \ + --hash=sha256:27b9baecb428899db6c0de572d6d305cfaf38ca1596b5c0542a5182e3e74e8c6 \ + --hash=sha256:29a4cef9cb672363926f0470afc516dbf7305a14d8c54f7abbb5c199cd8f8179 \ + --hash=sha256:3413c2ae377550f5487991d444428f1a8ae92784aac79caa8b1e3b89b175f77e \ + --hash=sha256:351889afef0f485b84078ea40fe33727a0492b9af3904661b0abbafee0355b72 \ + --hash=sha256:3ffaa2f0659e2f740473bcf03c702c39a8d4b2b7ffc629052028764324842c64 \ + --hash=sha256:40a8e3b9e8773876d6e30daed22f016509e3987bab61b3b7fe309d7019a87451 \ + --hash=sha256:414b9a78e14ffeb98128863314e62c3f24b8a86081066625700b7985b3f529bd \ + --hash=sha256:43aca0a55ce1eefc0aefa6253661cb54571857b1a7b2964bd8a1e3ef4b729924 \ + --hash=sha256:43b4899cfd091a9693a1278c4982f3e50f7fb7cff5153b05174b4afc9593b616 \ + --hash=sha256:461f9dfdafa394c59cd6d818bdfdbab4028b83b02caadaff0ffd433faf4c9a7a \ + --hash=sha256:4f9f6a650743f0ddee5593ac9e954ba1bdbc5e150bc066586d4f26127853ab94 \ + --hash=sha256:53d8b764726d3af1a138dd353116f774e3862ec7e3794e0c8781e30db0f35dfc \ + --hash=sha256:565c986f4b45c020f5421a4cea13ef294dde9509a8577f29b2fc5edc7587fff8 \ + --hash=sha256:5c5ae0a06e9ea030ab786b0251b32c7e4ce10e58d983c0d5c56029455180b5b9 \ + --hash=sha256:5cb7bc1966d031aec37ddb9dcf15c2da5b2e9f7cc3ca7c54473a20a927e1eb91 \ + --hash=sha256:5da841d81b1a05ef940a8567da92decaa15bc4d7dedb540a8c219ad83d91808a \ + --hash=sha256:5fee4c04aad8932da9f8f710af2c1a15a83582cfb884152a9caa79d4efcdbf9c \ + --hash=sha256:609e89d9f90b581c8d16358c9087df76024cf058fa693dd3e1e1620823f39670 \ + --hash=sha256:6258f3260986990ba2fa8a874f8b6e808cf5abb51a94015ca3dc3c68aa4f30ea \ + --hash=sha256:64efdf00c09e31efd754448a383ea241f55a994fd079866b92d2bbff598aad91 \ + --hash=sha256:65b80c1ee7e14a87d6a068dd3b0aea268ffcabfe0498d38661b00c5b4b22e74c \ + --hash=sha256:6741e6f3074a35e47c77b23a4e4f2d90db3ed905cb1c5e6e0d49bff2045632bc \ + --hash=sha256:681088909d7e8fa9e31b9799aaa59ba5234c58e5e4f1951b4c4d1082a2e980e0 \ + --hash=sha256:6b7a9d1db5dad90e2991645874f708e87d9a3c370c243c2d7684d28f7e133e6b \ + --hash=sha256:7315f9137087c4e0ee73a761b163fc9aa3b19f5f606a7fc08d83fd3e4379af65 \ + --hash=sha256:742aea052cf5ab5034a53c3846165bc3ce88d7c38e954120db0ab867ca242661 \ + --hash=sha256:75af0b4c229ac519b155028fa1be632d812a519abba9b46b20e50c6caa184f19 \ + --hash=sha256:7b5dd7cbae20285cdb597b10eb5a2c13aa9de6cde9bb64a3c1317427b1db1ae1 \ + --hash=sha256:7d6daa89a00b58c37cb1747ec9fb7ac3bc5ffd5949f5888657dfddde6d1312e0 \ + --hash=sha256:800429ac32c9b72909c671aaf17ecd13110f823ddb7db4dfef412a5587c2c24e \ + --hash=sha256:806f3987ffe10e867bab0ddad45df1148a2b98221798457fa097ad85d6e8bc75 \ + --hash=sha256:808b99604f7873c800c4840f55ff389936ef1948e4e87645eaf3fccbc8477ac4 \ + --hash=sha256:80941e6d573197a0c28f394753de529bb436b1ca990ed6e765cf42426abc39f8 \ + --hash=sha256:84cabc7095dd535ca934d57e9ce2a72ffd216e435a84acb06b2277b1de2689bd \ + --hash=sha256:8637e29d13f478bc4f153d8daa9ffb16455f0a6cb287da1b432fdad2bfbd66c7 \ + --hash=sha256:896866d2d436563fa2a43a9d72f417874f16b5545955c54a64941e87c1376c61 \ + --hash=sha256:8e178e3e99d3c0ea8fc64b88447f7cac8ccf058af422a6cedc690d0eadd98c51 \ + --hash=sha256:907bfa8a9cb790748a9aa4513e37c88c59660da3bcfffbd24a7d9e6abf224551 \ + --hash=sha256:9212d6b86917a2300669511ed094a9406888362e085f2431a7da985a6b124f45 \ + --hash=sha256:92a7fe4225365c5e3a8e598982269c6d6698d3e783b3b1ae979e7819f9cd55c1 \ + --hash=sha256:935b9d1aed48fcfb3f838caac506f38e29621b44ccc4f8a64d575cb1b2a88644 \ + --hash=sha256:97e9993d5ed946aba26baf9c1e8cf18adbab584b99f452ee72f7ee8acb882796 \ + --hash=sha256:983976c2ab753166dc66d36af6e8ec15bb511e4a25856e2227e5f7e00a160587 \ + --hash=sha256:9f5fefaca968e700ad1a4a9de98bf0869a94e397fe3524c4c9450c1445252304 \ + --hash=sha256:a332ac4ccb84b6dde65dbace8431f3af08874bf9770719d32a635c4ef411b18b \ + --hash=sha256:a40905599d8079e09f25027423aed94f2823adaf2868940de991e53a449e14a8 \ + --hash=sha256:a6dfc2af5b082b635af6e08e0d1f9f1c4e04d17d4e2ca0ef96131e85eda6eb17 \ + --hash=sha256:a786bf667724d84aa29b5db1c61b7bfdde380202aaca12c3461afd6b71743171 \ + --hash=sha256:a83e0850cb8f5ac975291ebfc4170ba481f41a28065277f7f735c202cd8e0af3 \ + --hash=sha256:aa0c9cc0b82b14766a99fbe6084409972266e82f459821cd26997a488a7261a7 \ + --hash=sha256:b17fbdbe01c196e7e159aacb889e091f28e61020a8abeac07b68079b6e626988 \ + --hash=sha256:b63e13dd27da389ed9475b3d28510f0f954bca0041e8e551b2a4eb1eab56a39a \ + --hash=sha256:b6e53e82ec2db0717eabb276aa56cf4e500c9a7cec2c2e189b55c24f65a3e8c0 \ + --hash=sha256:bb0984b30e973f7e2884362b7d23d0a348c7143ee559f38ef3eaab640144204c \ + --hash=sha256:bc11908616c8a283cf7d664f77411a5ed2a02009b0097ff8abbba5e79128ccf2 \ + --hash=sha256:bdec5e43377761c5dbca620efb69a77f6855c5a379e32ac5b158f54c84212b14 \ + --hash=sha256:bef9768cab184e7ae6e559c032e95ba8d07b3023c289f79a2bd36e8bf85605a5 \ + --hash=sha256:c990547452ee2800d8506c4150280757f88532f3de2a58e3022e9b179107862a \ + --hash=sha256:ca94b6aac0d7af2a10ba08c0f888b3d5114439b6b3ef39968378723622fed377 \ + --hash=sha256:cad302dc10fac357d3467a74a9561c90609768a6f73a1923b0fd851b6486f8b0 \ + --hash=sha256:d0a7735df32ccbcc98b98a1ac785cc4b19b580be1bdf0aeb5c03223220ea09d5 \ + --hash=sha256:d70347c8a5b7ccd803ec0c85c8709f036e6348f1e6a5bf048ecd9c64d3550b8b \ + --hash=sha256:d70534cea9e7966169ad29a903b99fc507e932069a881d0965a1a84bb57f6c6d \ + --hash=sha256:db44d5c160a90df2d24a24760bbd37607d53da0b34fb546c4c232af7192298ac \ + --hash=sha256:e115c15e3bc727b1ca3e641a909f77f8ca72a64fff150f666fcc85e57701c26c \ + --hash=sha256:e2479c7f02f9d505682dc47df8c0ea1fc5e264c4d1629a5d63fe3e2334b89554 \ + --hash=sha256:e5dcbe95016e88437ecf33544ba5db21ef1b8dd6e1b434a2cb2a3d605299e643 \ + --hash=sha256:e6bdb408f7c9dd2a5ff2b14a3b0bb6d4deb29fb9961e6eb3ae2031ae9a5cec13 \ + --hash=sha256:e75d3dba8fc1ddfec0cd752108f93b83b4f8d6ab40e524a95d35f016b9683b09 \ + --hash=sha256:efdc140e7b63b8f739d09a99033aa430accce485ff78e6d311973a67b6bf3208 \ + --hash=sha256:f10c98f49227ed8383d28174ee95155a675c4ed7f85e2e573b04414f7e371bda \ + --hash=sha256:f188028b5af6b8fb2e9a76ac0f841a575bd1bd396e46ef0840d9b88a48fdbcea \ + --hash=sha256:f188d580bd870cda1e15183790d1cc2fa78f666e76077d103edf048eed9c356e \ + --hash=sha256:f45bd71d1fa5e5749587613037b172e0b3b23159d1c00ef2fc920da6f470e6f0 \ + --hash=sha256:f61333d817698bdcdd0f9d7793e365ac3d2a21c1f1eb02b32ad6aefb8d8ea831 \ + --hash=sha256:fb125d860738a09d363a88daa0f59c4533529a90e564785e20fe875b200b6dbd # via matplotlib -platformdirs==4.5.0 \ - --hash=sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312 \ - --hash=sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3 +platformdirs==4.5.1 \ + --hash=sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda \ + --hash=sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31 # via esbonio pluggy==1.6.0 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ @@ -1026,9 +1012,9 @@ pluggy==1.6.0 \ # via # -r /external/score_tooling+/python_basics/requirements.txt # pytest -pycparser==2.23 \ - --hash=sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2 \ - --hash=sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934 +pycparser==3.0 \ + --hash=sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29 \ + --hash=sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992 # via cffi pydata-sphinx-theme==0.16.1 \ --hash=sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde \ @@ -1052,46 +1038,44 @@ pygments==2.19.2 \ # pytest # rich # sphinx -pyjwt[crypto]==2.10.1 \ - --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ - --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb +pyjwt[crypto]==2.11.0 \ + --hash=sha256:35f95c1f0fbe5d5ba6e43f00271c275f7a1a4db1dab27bf708073b75318ea623 \ + --hash=sha256:94a6bde30eb5c8e04fee991062b534071fd1439ef58d2adc9ccb823e7bcd0469 # via pygithub -pynacl==1.6.1 \ - --hash=sha256:262a8de6bba4aee8a66f5edf62c214b06647461c9b6b641f8cd0cb1e3b3196fe \ - --hash=sha256:2b12f1b97346f177affcdfdc78875ff42637cb40dcf79484a97dae3448083a78 \ - --hash=sha256:319de653ef84c4f04e045eb250e6101d23132372b0a61a7acf91bac0fda8e58c \ - --hash=sha256:3206fa98737fdc66d59b8782cecc3d37d30aeec4593d1c8c145825a345bba0f0 \ - --hash=sha256:3384a454adf5d716a9fadcb5eb2e3e72cd49302d1374a60edc531c9957a9b014 \ - --hash=sha256:3cd787ec1f5c155dc8ecf39b1333cfef41415dc96d392f1ce288b4fe970df489 \ - --hash=sha256:4ce50d19f1566c391fedc8dc2f2f5be265ae214112ebe55315e41d1f36a7f0a9 \ - --hash=sha256:53543b4f3d8acb344f75fd4d49f75e6572fce139f4bfb4815a9282296ff9f4c0 \ - --hash=sha256:543f869140f67d42b9b8d47f922552d7a967e6c116aad028c9bfc5f3f3b3a7b7 \ - --hash=sha256:5953e8b8cfadb10889a6e7bd0f53041a745d1b3d30111386a1bb37af171e6daf \ - --hash=sha256:5a3becafc1ee2e5ea7f9abc642f56b82dcf5be69b961e782a96ea52b55d8a9fc \ - --hash=sha256:5f5b35c1a266f8a9ad22525049280a600b19edd1f785bccd01ae838437dcf935 \ - --hash=sha256:6b35d93ab2df03ecb3aa506be0d3c73609a51449ae0855c2e89c7ed44abde40b \ - --hash=sha256:7713f8977b5d25f54a811ec9efa2738ac592e846dd6e8a4d3f7578346a841078 \ - --hash=sha256:7d7c09749450c385301a3c20dca967a525152ae4608c0a096fe8464bfc3df93d \ - --hash=sha256:8d361dac0309f2b6ad33b349a56cd163c98430d409fa503b10b70b3ad66eaa1d \ - --hash=sha256:9fd1a4eb03caf8a2fe27b515a998d26923adb9ddb68db78e35ca2875a3830dde \ - --hash=sha256:a2bb472458c7ca959aeeff8401b8efef329b0fc44a89d3775cffe8fad3398ad8 \ - --hash=sha256:a569a4069a7855f963940040f35e87d8bc084cb2d6347428d5ad20550a0a1a21 \ - --hash=sha256:a6f9fd6d6639b1e81115c7f8ff16b8dedba1e8098d2756275d63d208b0e32021 \ - --hash=sha256:c2228054f04bf32d558fb89bb99f163a8197d5a9bf4efa13069a7fa8d4b93fc3 \ - --hash=sha256:d8615ee34d01c8e0ab3f302dcdd7b32e2bcf698ba5f4809e7cc407c8cdea7717 \ - --hash=sha256:d984c91fe3494793b2a1fb1e91429539c6c28e9ec8209d26d25041ec599ccf63 \ - --hash=sha256:dece79aecbb8f4640a1adbb81e4aa3bfb0e98e99834884a80eb3f33c7c30e708 \ - --hash=sha256:e49a3f3d0da9f79c1bec2aa013261ab9fa651c7da045d376bd306cf7c1792993 \ - --hash=sha256:e735c3a1bdfde3834503baf1a6d74d4a143920281cb724ba29fb84c9f49b9c48 \ - --hash=sha256:fc734c1696ffd49b40f7c1779c89ba908157c57345cf626be2e0719488a076d3 +pynacl==1.6.2 \ + --hash=sha256:018494d6d696ae03c7e656e5e74cdfd8ea1326962cc401bcf018f1ed8436811c \ + --hash=sha256:04316d1fc625d860b6c162fff704eb8426b1a8bcd3abacea11142cbd99a6b574 \ + --hash=sha256:22de65bb9010a725b0dac248f353bb072969c94fa8d6b1f34b87d7953cf7bbe4 \ + --hash=sha256:26bfcd00dcf2cf160f122186af731ae30ab120c18e8375684ec2670dccd28130 \ + --hash=sha256:2fef529ef3ee487ad8113d287a593fa26f48ee3620d92ecc6f1d09ea38e0709b \ + --hash=sha256:320ef68a41c87547c91a8b58903c9caa641ab01e8512ce291085b5fe2fcb7590 \ + --hash=sha256:3bffb6d0f6becacb6526f8f42adfb5efb26337056ee0831fb9a7044d1a964444 \ + --hash=sha256:44081faff368d6c5553ccf55322ef2819abb40e25afaec7e740f159f74813634 \ + --hash=sha256:46065496ab748469cdd999246d17e301b2c24ae2fdf739132e580a0e94c94a87 \ + --hash=sha256:5811c72b473b2f38f7e2a3dc4f8642e3a3e9b5e7317266e4ced1fba85cae41aa \ + --hash=sha256:622d7b07cc5c02c666795792931b50c91f3ce3c2649762efb1ef0d5684c81594 \ + --hash=sha256:62985f233210dee6548c223301b6c25440852e13d59a8b81490203c3227c5ba0 \ + --hash=sha256:68be3a09455743ff9505491220b64440ced8973fe930f270c8e07ccfa25b1f9e \ + --hash=sha256:834a43af110f743a754448463e8fd61259cd4ab5bbedcf70f9dabad1d28a394c \ + --hash=sha256:8845c0631c0be43abdd865511c41eab235e0be69c81dc66a50911594198679b0 \ + --hash=sha256:8a66d6fb6ae7661c58995f9c6435bda2b1e68b54b598a6a10247bfcdadac996c \ + --hash=sha256:8b097553b380236d51ed11356c953bf8ce36a29a3e596e934ecabe76c985a577 \ + --hash=sha256:a84bf1c20339d06dc0c85d9aea9637a24f718f375d861b2668b2f9f96fa51145 \ + --hash=sha256:a9f9932d8d2811ce1a8ffa79dcbdf3970e7355b5c8eb0c1a881a57e7f7d96e88 \ + --hash=sha256:bc4a36b28dd72fb4845e5d8f9760610588a96d5a51f01d84d8c6ff9849968c14 \ + --hash=sha256:c8a231e36ec2cab018c4ad4358c386e36eede0319a0c41fed24f840b1dac59f6 \ + --hash=sha256:c949ea47e4206af7c8f604b8278093b674f7c79ed0d4719cc836902bf4517465 \ + --hash=sha256:d071c6a9a4c94d79eb665db4ce5cedc537faf74f2355e4d502591d850d3913c0 \ + --hash=sha256:d29bfe37e20e015a7d8b23cfc8bd6aa7909c92a1b8f41ee416bbb3e79ef182b2 \ + --hash=sha256:fe9847ca47d287af41e82be1dd5e23023d3c31a951da134121ab02e42ac218c9 # via pygithub -pyparsing==3.2.5 \ - --hash=sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6 \ - --hash=sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e +pyparsing==3.3.2 \ + --hash=sha256:850ba148bd908d7e2411587e247a1e4f0327839c40e2e5e6d05a007ecc69911d \ + --hash=sha256:c777f4d763f140633dcb6d8a3eda953bf7a214dc4eff598413c070bcdc117cbc # via matplotlib -pyspellchecker==0.8.3 \ - --hash=sha256:cb06eeafe124837f321e0d02f8e21deab713e966e28e0360319a28a089c43978 \ - --hash=sha256:e993076e98b0da5a99b7cc31085c3022c77a9dc37c5e95f5cf6304b5dbb8b9d2 +pyspellchecker==0.8.4 \ + --hash=sha256:20c53d119568011bbaef68608aceb956b888bcd9d563798f7770721a112b8255 \ + --hash=sha256:48cafa76d65c30ce4d81d0831938f25e8f34ede243c1cc6840014b0eb51ad629 # via esbonio pytest==9.0.1 \ --hash=sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8 \ @@ -1190,81 +1174,22 @@ requests-file==2.1.0 \ --hash=sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658 \ --hash=sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c # via sphinx-needs -rich==14.2.0 \ - --hash=sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4 \ - --hash=sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd +rich==14.3.2 \ + --hash=sha256:08e67c3e90884651da3239ea668222d19bea7b589149d8014a21c633420dbb69 \ + --hash=sha256:e712f11c1a562a11843306f5ed999475f09ac31ffb64281f73ab29ffdda8b3b8 # via -r src/requirements.in -roman-numerals-py==3.1.0 \ - --hash=sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c \ - --hash=sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d +roman-numerals==4.1.0 \ + --hash=sha256:1af8b147eb1405d5839e78aeb93131690495fe9da5c91856cb33ad55a7f1e5b2 \ + --hash=sha256:647ba99caddc2cc1e55a51e4360689115551bf4476d90e8162cf8c345fe233c7 + # via roman-numerals-py +roman-numerals-py==4.1.0 \ + --hash=sha256:553114c1167141c1283a51743759723ecd05604a1b6b507225e91dc1a6df0780 \ + --hash=sha256:f5d7b2b4ca52dd855ef7ab8eb3590f428c0b1ea480736ce32b01fef2a5f8daf9 # via sphinx -ruamel-yaml==0.18.16 \ - --hash=sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba \ - --hash=sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a +ruamel-yaml==0.19.1 \ + --hash=sha256:27592957fedf6e0b62f281e96effd28043345e0e66001f97683aa9a40c667c93 \ + --hash=sha256:53eb66cd27849eff968ebf8f0bf61f46cdac2da1d1f3576dd4ccee9b25c31993 # via -r src/requirements.in -ruamel-yaml-clib==0.2.15 \ - --hash=sha256:014181cdec565c8745b7cbc4de3bf2cc8ced05183d986e6d1200168e5bb59490 \ - --hash=sha256:04d21dc9c57d9608225da28285900762befbb0165ae48482c15d8d4989d4af14 \ - --hash=sha256:05c70f7f86be6f7bee53794d80050a28ae7e13e4a0087c1839dcdefd68eb36b6 \ - --hash=sha256:0ba6604bbc3dfcef844631932d06a1a4dcac3fee904efccf582261948431628a \ - --hash=sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9 \ - --hash=sha256:1b45498cc81a4724a2d42273d6cfc243c0547ad7c6b87b4f774cb7bcc131c98d \ - --hash=sha256:1bb7b728fd9f405aa00b4a0b17ba3f3b810d0ccc5f77f7373162e9b5f0ff75d5 \ - --hash=sha256:1f66f600833af58bea694d5892453f2270695b92200280ee8c625ec5a477eed3 \ - --hash=sha256:27dc656e84396e6d687f97c6e65fb284d100483628f02d95464fd731743a4afe \ - --hash=sha256:2812ff359ec1f30129b62372e5f22a52936fac13d5d21e70373dbca5d64bb97c \ - --hash=sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc \ - --hash=sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf \ - --hash=sha256:3cb75a3c14f1d6c3c2a94631e362802f70e83e20d1f2b2ef3026c05b415c4900 \ - --hash=sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a \ - --hash=sha256:424ead8cef3939d690c4b5c85ef5b52155a231ff8b252961b6516ed7cf05f6aa \ - --hash=sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6 \ - --hash=sha256:468858e5cbde0198337e6a2a78eda8c3fb148bdf4c6498eaf4bc9ba3f8e780bd \ - --hash=sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25 \ - --hash=sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600 \ - --hash=sha256:480894aee0b29752560a9de46c0e5f84a82602f2bc5c6cde8db9a345319acfdf \ - --hash=sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642 \ - --hash=sha256:4be366220090d7c3424ac2b71c90d1044ea34fca8c0b88f250064fd06087e614 \ - --hash=sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf \ - --hash=sha256:4d3b58ab2454b4747442ac76fab66739c72b1e2bb9bd173d7694b9f9dbc9c000 \ - --hash=sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb \ - --hash=sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690 \ - --hash=sha256:542d77b72786a35563f97069b9379ce762944e67055bea293480f7734b2c7e5e \ - --hash=sha256:56ea19c157ed8c74b6be51b5fa1c3aff6e289a041575f0556f66e5fb848bb137 \ - --hash=sha256:5d3c9210219cbc0f22706f19b154c9a798ff65a6beeafbf77fc9c057ec806f7d \ - --hash=sha256:5fea0932358e18293407feb921d4f4457db837b67ec1837f87074667449f9401 \ - --hash=sha256:617d35dc765715fa86f8c3ccdae1e4229055832c452d4ec20856136acc75053f \ - --hash=sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2 \ - --hash=sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471 \ - --hash=sha256:6f1d38cbe622039d111b69e9ca945e7e3efebb30ba998867908773183357f3ed \ - --hash=sha256:713cd68af9dfbe0bb588e144a61aad8dcc00ef92a82d2e87183ca662d242f524 \ - --hash=sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60 \ - --hash=sha256:753faf20b3a5906faf1fc50e4ddb8c074cb9b251e00b14c18b28492f933ac8ef \ - --hash=sha256:7e74ea87307303ba91073b63e67f2c667e93f05a8c63079ee5b7a5c8d0d7b043 \ - --hash=sha256:88eea8baf72f0ccf232c22124d122a7f26e8a24110a0273d9bcddcb0f7e1fa03 \ - --hash=sha256:923816815974425fbb1f1bf57e85eca6e14d8adc313c66db21c094927ad01815 \ - --hash=sha256:9b6f7d74d094d1f3a4e157278da97752f16ee230080ae331fcc219056ca54f77 \ - --hash=sha256:a8220fd4c6f98485e97aea65e1df76d4fed1678ede1fe1d0eed2957230d287c4 \ - --hash=sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d \ - --hash=sha256:ac9b8d5fa4bb7fd2917ab5027f60d4234345fd366fe39aa711d5dca090aa1467 \ - --hash=sha256:badd1d7283f3e5894779a6ea8944cc765138b96804496c91812b2829f70e18a7 \ - --hash=sha256:bdc06ad71173b915167702f55d0f3f027fc61abd975bd308a0968c02db4a4c3e \ - --hash=sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec \ - --hash=sha256:bfd309b316228acecfa30670c3887dcedf9b7a44ea39e2101e75d2654522acd4 \ - --hash=sha256:c583229f336682b7212a43d2fa32c30e643d3076178fb9f7a6a14dde85a2d8bd \ - --hash=sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff \ - --hash=sha256:d290eda8f6ada19e1771b54e5706b8f9807e6bb08e873900d5ba114ced13e02c \ - --hash=sha256:da3d6adadcf55a93c214d23941aef4abfd45652110aed6580e814152f385b862 \ - --hash=sha256:dcc7f3162d3711fd5d52e2267e44636e3e566d1e5675a5f0b30e98f2c4af7974 \ - --hash=sha256:def5663361f6771b18646620fca12968aae730132e104688766cf8a3b1d65922 \ - --hash=sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a \ - --hash=sha256:e9fde97ecb7bb9c41261c2ce0da10323e9227555c674989f8d9eb7572fc2098d \ - --hash=sha256:ef71831bd61fbdb7aa0399d5c4da06bea37107ab5c79ff884cc07f2450910262 \ - --hash=sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144 \ - --hash=sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1 \ - --hash=sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51 \ - --hash=sha256:fe239bdfdae2302e93bd6e8264bd9b71290218fff7084a9db250b55caaccf43f - # via ruamel-yaml six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 @@ -1273,17 +1198,13 @@ smmap==5.0.2 \ --hash=sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5 \ --hash=sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e # via gitdb -sniffio==1.3.1 \ - --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ - --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc - # via anyio snowballstemmer==3.0.1 \ --hash=sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064 \ --hash=sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895 # via sphinx -soupsieve==2.8 \ - --hash=sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c \ - --hash=sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f +soupsieve==2.8.3 \ + --hash=sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349 \ + --hash=sha256:ed64f2ba4eebeab06cc4962affce381647455978ffc1e36bb79a545b91f45a95 # via beautifulsoup4 sphinx==8.2.3 \ --hash=sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348 \ @@ -1314,9 +1235,9 @@ sphinx-data-viewer==0.1.5 \ --hash=sha256:a7d5e58613562bb745380bfe61ca8b69997998167fd6fa9aea55606c9a4b17e4 \ --hash=sha256:b74b1d304c505c464d07c7b225ed0d84ea02dcc88bc1c49cdad7c2275fbbdad4 # via sphinx-needs -sphinx-design==0.6.1 \ - --hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \ - --hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632 +sphinx-design==0.7.0 \ + --hash=sha256:d2a3f5b19c24b916adb52f97c5f00efab4009ca337812001109084a740ec9b7a \ + --hash=sha256:f82bf179951d58f55dca78ab3706aeafa496b741a91b1911d371441127d64282 # via -r src/requirements.in sphinx-needs[plotting]==6.3.0 \ --hash=sha256:761901765844c69f6181580065b099b31016895a86962a25e7860a9f5bea54a2 \ @@ -1344,9 +1265,9 @@ sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 # via sphinx -sphinxcontrib-mermaid==1.2.3 \ - --hash=sha256:358699d0ec924ef679b41873d9edd97d0773446daf9760c75e18dc0adfd91371 \ - --hash=sha256:5be782b27026bef97bfb15ccb2f7868b674a1afc0982b54cb149702cfc25aa02 +sphinxcontrib-mermaid==2.0.0 \ + --hash=sha256:59a73249bbee2c74b1a4db036f8e8899ade65982bdda6712cf22b4f4e9874bb5 \ + --hash=sha256:cf4f7d453d001132eaba5d1fdf53d42049f02e913213cf8337427483bfca26f4 # via -r src/requirements.in sphinxcontrib-plantuml==0.31 \ --hash=sha256:fd74752f8ea070e641c3f8a402fccfa1d4a4056e0967b56033d2a76282d9f956 @@ -1359,53 +1280,58 @@ sphinxcontrib-serializinghtml==2.0.0 \ --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d # via sphinx -starlette==0.50.0 \ - --hash=sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca \ - --hash=sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca +starlette==0.52.1 \ + --hash=sha256:0029d43eb3d273bc4f83a08720b4912ea4b071087a3b48db01b7c839f7954d74 \ + --hash=sha256:834edd1b0a23167694292e94f597773bc3f89f362be6effee198165a35d62933 # via sphinx-autobuild -tomli==2.3.0 \ - --hash=sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456 \ - --hash=sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845 \ - --hash=sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999 \ - --hash=sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0 \ - --hash=sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878 \ - --hash=sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf \ - --hash=sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3 \ - --hash=sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be \ - --hash=sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52 \ - --hash=sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b \ - --hash=sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67 \ - --hash=sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549 \ - --hash=sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba \ - --hash=sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22 \ - --hash=sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c \ - --hash=sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f \ - --hash=sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6 \ - --hash=sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba \ - --hash=sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45 \ - --hash=sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f \ - --hash=sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77 \ - --hash=sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606 \ - --hash=sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441 \ - --hash=sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0 \ - --hash=sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f \ - --hash=sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530 \ - --hash=sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05 \ - --hash=sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8 \ - --hash=sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005 \ - --hash=sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879 \ - --hash=sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae \ - --hash=sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc \ - --hash=sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b \ - --hash=sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b \ - --hash=sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e \ - --hash=sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf \ - --hash=sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac \ - --hash=sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8 \ - --hash=sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b \ - --hash=sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf \ - --hash=sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463 \ - --hash=sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876 +tomli==2.4.0 \ + --hash=sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729 \ + --hash=sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b \ + --hash=sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d \ + --hash=sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df \ + --hash=sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576 \ + --hash=sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d \ + --hash=sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1 \ + --hash=sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a \ + --hash=sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e \ + --hash=sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc \ + --hash=sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702 \ + --hash=sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6 \ + --hash=sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd \ + --hash=sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4 \ + --hash=sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776 \ + --hash=sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a \ + --hash=sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66 \ + --hash=sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87 \ + --hash=sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2 \ + --hash=sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f \ + --hash=sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475 \ + --hash=sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f \ + --hash=sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95 \ + --hash=sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9 \ + --hash=sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3 \ + --hash=sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9 \ + --hash=sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76 \ + --hash=sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da \ + --hash=sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8 \ + --hash=sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51 \ + --hash=sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86 \ + --hash=sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8 \ + --hash=sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0 \ + --hash=sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b \ + --hash=sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1 \ + --hash=sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e \ + --hash=sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d \ + --hash=sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c \ + --hash=sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867 \ + --hash=sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a \ + --hash=sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c \ + --hash=sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0 \ + --hash=sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4 \ + --hash=sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614 \ + --hash=sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132 \ + --hash=sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa \ + --hash=sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087 # via needs-config-writer tomli-w==1.2.0 \ --hash=sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90 \ @@ -1422,15 +1348,15 @@ typing-extensions==4.15.0 \ # pygithub # sphinx-needs # starlette -urllib3==2.5.0 \ - --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ - --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc +urllib3==2.6.3 \ + --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ + --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 # via # pygithub # requests -uvicorn==0.38.0 \ - --hash=sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02 \ - --hash=sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d +uvicorn==0.40.0 \ + --hash=sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea \ + --hash=sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee # via sphinx-autobuild watchfiles==1.1.1 \ --hash=sha256:00485f441d183717038ed2e887a7c868154f216877653121068107b227a2f64c \ @@ -1543,74 +1469,66 @@ watchfiles==1.1.1 \ --hash=sha256:f8979280bdafff686ba5e4d8f97840f929a87ed9cdf133cbbd42f7766774d2aa \ --hash=sha256:f9a2ae5c91cecc9edd47e041a930490c31c3afb1f5e6d71de3dc671bfaca02bf # via sphinx-autobuild -websockets==15.0.1 \ - --hash=sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2 \ - --hash=sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9 \ - --hash=sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5 \ - --hash=sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3 \ - --hash=sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8 \ - --hash=sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e \ - --hash=sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1 \ - --hash=sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256 \ - --hash=sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85 \ - --hash=sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880 \ - --hash=sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123 \ - --hash=sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375 \ - --hash=sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065 \ - --hash=sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed \ - --hash=sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41 \ - --hash=sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411 \ - --hash=sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597 \ - --hash=sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f \ - --hash=sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c \ - --hash=sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3 \ - --hash=sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb \ - --hash=sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e \ - --hash=sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee \ - --hash=sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f \ - --hash=sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf \ - --hash=sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf \ - --hash=sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4 \ - --hash=sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a \ - --hash=sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665 \ - --hash=sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22 \ - --hash=sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675 \ - --hash=sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4 \ - --hash=sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d \ - --hash=sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5 \ - --hash=sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65 \ - --hash=sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792 \ - --hash=sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57 \ - --hash=sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9 \ - --hash=sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3 \ - --hash=sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151 \ - --hash=sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d \ - --hash=sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475 \ - --hash=sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940 \ - --hash=sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431 \ - --hash=sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee \ - --hash=sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413 \ - --hash=sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8 \ - --hash=sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b \ - --hash=sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a \ - --hash=sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054 \ - --hash=sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb \ - --hash=sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205 \ - --hash=sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04 \ - --hash=sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4 \ - --hash=sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa \ - --hash=sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9 \ - --hash=sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122 \ - --hash=sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b \ - --hash=sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905 \ - --hash=sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770 \ - --hash=sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe \ - --hash=sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b \ - --hash=sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562 \ - --hash=sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561 \ - --hash=sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215 \ - --hash=sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931 \ - --hash=sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9 \ - --hash=sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f \ - --hash=sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7 +websockets==16.0 \ + --hash=sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c \ + --hash=sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a \ + --hash=sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe \ + --hash=sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e \ + --hash=sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec \ + --hash=sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1 \ + --hash=sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64 \ + --hash=sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3 \ + --hash=sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8 \ + --hash=sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206 \ + --hash=sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3 \ + --hash=sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156 \ + --hash=sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d \ + --hash=sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9 \ + --hash=sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad \ + --hash=sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2 \ + --hash=sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03 \ + --hash=sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8 \ + --hash=sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230 \ + --hash=sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8 \ + --hash=sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea \ + --hash=sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641 \ + --hash=sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957 \ + --hash=sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6 \ + --hash=sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6 \ + --hash=sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5 \ + --hash=sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f \ + --hash=sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00 \ + --hash=sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e \ + --hash=sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b \ + --hash=sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72 \ + --hash=sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39 \ + --hash=sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9 \ + --hash=sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79 \ + --hash=sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0 \ + --hash=sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac \ + --hash=sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35 \ + --hash=sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0 \ + --hash=sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5 \ + --hash=sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c \ + --hash=sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8 \ + --hash=sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1 \ + --hash=sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244 \ + --hash=sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3 \ + --hash=sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767 \ + --hash=sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a \ + --hash=sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d \ + --hash=sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd \ + --hash=sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e \ + --hash=sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944 \ + --hash=sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82 \ + --hash=sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d \ + --hash=sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4 \ + --hash=sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5 \ + --hash=sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904 \ + --hash=sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde \ + --hash=sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f \ + --hash=sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c \ + --hash=sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89 \ + --hash=sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da \ + --hash=sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4 # via sphinx-autobuild From 56292638be2a978b2674a895e61d99198b6cbe93 Mon Sep 17 00:00:00 2001 From: RolandJentschETAS <135332348+RolandJentschETAS@users.noreply.github.com> Date: Wed, 4 Feb 2026 14:40:35 +0100 Subject: [PATCH 200/231] fix: wrong module warning in draw functions (#385) --- src/extensions/score_draw_uml_funcs/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/extensions/score_draw_uml_funcs/helpers.py b/src/extensions/score_draw_uml_funcs/helpers.py index 456bed7e..839110ee 100644 --- a/src/extensions/score_draw_uml_funcs/helpers.py +++ b/src/extensions/score_draw_uml_funcs/helpers.py @@ -166,7 +166,7 @@ def get_hierarchy_text( close_mod_text = "" parent_need = {} - if "mod_" not in need["type"]: + if "mod" not in need["type"]: parent_need_str = need.get("includes_back", []) if parent_need_str: From 534e1efbe8d1af4999b68e19922d958bfed6cc86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Wed, 4 Feb 2026 20:58:02 +0100 Subject: [PATCH 201/231] Add link_check action & workflow (#386) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Maximilian Sören Pollak <maximilian.pollak@qorix.com> --- .github/actions/link-check/action.yml | 62 +++++++++++++ .github/actions/link-check/link_check.sh | 3 + .github/actions/link-check/link_parser.py | 101 ++++++++++++++++++++++ .github/workflows/link_check.yml | 19 ++++ .github/workflows/test_links.yml | 66 ++++++++++++++ docs.bzl | 13 +++ src/incremental.py | 2 + 7 files changed, 266 insertions(+) create mode 100644 .github/actions/link-check/action.yml create mode 100755 .github/actions/link-check/link_check.sh create mode 100644 .github/actions/link-check/link_parser.py create mode 100644 .github/workflows/link_check.yml create mode 100644 .github/workflows/test_links.yml diff --git a/.github/actions/link-check/action.yml b/.github/actions/link-check/action.yml new file mode 100644 index 00000000..2cebec8b --- /dev/null +++ b/.github/actions/link-check/action.yml @@ -0,0 +1,62 @@ +name: 'Link Check and Automated Issue' +description: 'Checks links, parses results, and creates or updates an issue with findings.' +inputs: + github-token: + description: 'GitHub token' + required: true +runs: + using: "composite" + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + + - name: Run LinkChecker (generates linkcheck_output.txt) + shell: bash + run: | + chmod +x ${{ github.action_path }}/link_check.sh + ${{ github.action_path }}/link_check.sh + + - name: Parse broken links (generates issue_body.md) + shell: bash + run: | + python3 ${{ github.action_path }}/link_parser.py linkcheck_output.txt + + - name: Create or update GitHub issue from findings + if: success() && hashFiles('issue_body.md') != '' + uses: actions/github-script@v7 + with: + github-token: ${{ inputs.github-token }} + script: | + const fs = require('fs'); + const path = require('path'); + const body = fs.readFileSync(path.join(process.cwd(), 'issue_body.md'), 'utf-8'); + const title = "Automated Issue: Broken Documentation Links"; + + // Find existing open issue with the same title created by GitHub Actions bot + const { data: issues } = await github.rest.issues.listForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + state: "open", + creator: "github-actions[bot]", + labels: undefined, + }); + + const issue = issues.find(i => i.title === title); + + if (issue) { + // Update the existing issue + await github.rest.issues.update({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue.number, + body, + }); + } else { + // Create a new issue + await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title, + body, + }); + } diff --git a/.github/actions/link-check/link_check.sh b/.github/actions/link-check/link_check.sh new file mode 100755 index 00000000..4933c035 --- /dev/null +++ b/.github/actions/link-check/link_check.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +set -e +bazel run //:docs_link_check > linkcheck_output.txt || true diff --git a/.github/actions/link-check/link_parser.py b/.github/actions/link-check/link_parser.py new file mode 100644 index 00000000..0976ab60 --- /dev/null +++ b/.github/actions/link-check/link_parser.py @@ -0,0 +1,101 @@ +import argparse +import re +import sys +from dataclasses import dataclass +from datetime import datetime + +PARSING_STATUSES = ["broken"] + + +@dataclass +class BrokenLink: + location: str + line_nr: str + reasoning: str + + +def parse_broken_links(log: str) -> list[BrokenLink]: + broken_links: list[BrokenLink] = [] + lines = log.strip().split("\n") + + for line in lines: + parts = line.split(") ") + if len(parts) < 2: + continue + + location_part = parts[0].replace("(", "").strip() + location = location_part.split(":")[0].strip() + line_nr = location_part.split("line")[-1].strip() + status_and_url_part = parts[1] + + if not any(status in status_and_url_part for status in PARSING_STATUSES): + continue + status_and_url = status_and_url_part.split(" - ") + if len(status_and_url) < 2: + continue + reasoning = status_and_url[1].strip() + + broken_links.append( + BrokenLink( + location=location, + line_nr=line_nr, + reasoning=reasoning, + ) + ) + + return broken_links + + +def generate_markdown_table(broken_links: list[BrokenLink]) -> str: + table = "| Location | Line Number | Reasoning |\n" + table += "|----------|-------------|-----------|\n" + + for link in broken_links: + table += ( + f"| {link.location} | {link.line_nr} | {link.reasoning} |\n" + ) + + return table + + +def generate_issue_body(broken_links: list[BrokenLink]) -> str: + markdown_table = generate_markdown_table(broken_links) + return f""" +# Broken Links Report. +**Last updated: {datetime.now().strftime('%d-%m-%Y %H:%M')}** + +The following broken links were detected in the documentation: +{markdown_table} +Please investigate and fix these issues to ensure all links are functional. +Thank you! + +--- +This issue will be auto updated regularly if link issues are found. +You may close it if you wish, though a new one will be created if link issues are still present. + +""" + + +def strip_ansi_codes(text: str) -> str: + """Remove ANSI escape sequences from text""" + ansi_escape = re.compile(r"\x1b\[[0-9;]*m") + return ansi_escape.sub("", text) + + +if __name__ == "__main__": + argparse = argparse.ArgumentParser( + description="Parse broken links from Sphinx log and generate issue body." + ) + argparse.add_argument("logfile", type=str, help="Path to the Sphinx log file.") + args = argparse.parse_args() + with open(args.logfile) as f: + log_content_raw = f.read() + log_content = strip_ansi_codes(log_content_raw) + broken_links = parse_broken_links(log_content) + if not broken_links: + # Nothing broken found, can exit early + sys.exit(0) + issue_body = generate_issue_body(broken_links) + if broken_links: + with open("issue_body.md", "w") as out: + out.write(issue_body) diff --git a/.github/workflows/link_check.yml b/.github/workflows/link_check.yml new file mode 100644 index 00000000..8f026964 --- /dev/null +++ b/.github/workflows/link_check.yml @@ -0,0 +1,19 @@ +name: Link Check + +on: + workflow_dispatch: + schedule: + # Runs every week at 00:00 on Sunday + - cron: '0 0 * * 0' + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Run link check action + uses: ./.github/actions/link-check + with: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test_links.yml b/.github/workflows/test_links.yml new file mode 100644 index 00000000..e8d382c3 --- /dev/null +++ b/.github/workflows/test_links.yml @@ -0,0 +1,66 @@ +name: Link Check and Automated Issue + +on: + workflow_dispatch: + +jobs: + check-links: + runs-on: ubuntu-latest + outputs: + should_create_issue: ${{ steps.detect.outputs.issue_needed }} + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + + # Run your link checker and generate log + - name: Run LinkChecker + run: | + bazel run //:link_check > linkcheck_output.txt + continue-on-error: true + + # Run your Python script to parse the linkcheck log and generate issue body + - name: Parse broken links and generate issue body + run: | + python3 scripts/link_parser.py linkcheck_output.txt + + # Check if issue_body.md exists and is not empty + - name: Check for issues to report + id: detect + run: | + if [ -s issue_body.md ]; then + echo "issue_needed=true" >> "$GITHUB_OUTPUT" + else + echo "issue_needed=false" >> "$GITHUB_OUTPUT" + fi + + # Upload issue body artifact if present + - name: Upload issue body + if: steps.detect.outputs.issue_needed == 'true' + uses: actions/upload-artifact@v4 + with: + name: issue-body + path: issue_body.md + + create-issue: + needs: check-links + if: needs.check-links.outputs.should_create_issue == 'true' + runs-on: ubuntu-latest + steps: + - name: Download issue body artifact + uses: actions/download-artifact@v4 + with: + name: issue-body + + - name: Create GitHub issue from findings + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const fs = require('fs'); + const body = fs.readFileSync('issue_body.md', 'utf-8'); + github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: "Automated Issue: Broken Documentation Links", + body, + }); diff --git a/docs.bzl b/docs.bzl index 53073af4..fea3ce5d 100644 --- a/docs.bzl +++ b/docs.bzl @@ -173,6 +173,19 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): }, ) + py_binary( + name = "docs_link_check", + tags = ["cli_help=Verify Links inside Documentation:\nbazel run //:link_check\n (Note: this could take a long time)"], + srcs = ["@score_docs_as_code//src:incremental.py"], + data = data, + deps = deps, + env = { + "SOURCE_DIRECTORY": source_dir, + "DATA": str(data), + "ACTION": "linkcheck", + }, + ) + py_binary( name = "docs_check", tags = ["cli_help=Verify documentation:\nbazel run //:docs_check"], diff --git a/src/incremental.py b/src/incremental.py index fbabf8b1..1c381622 100644 --- a/src/incremental.py +++ b/src/incremental.py @@ -109,6 +109,8 @@ def get_env(name: str) -> str: builder = "html" elif action == "check": builder = "needs" + elif action == "linkcheck": + builder = "linkcheck" else: raise ValueError(f"Unknown action: {action}") From d14b9175b7f2c1d7859196996b601ff915b25268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Thu, 5 Feb 2026 09:03:11 +0100 Subject: [PATCH 202/231] Msp fix action (#387) * Fix permissions * Adding extra comment for local fixing --- .github/actions/link-check/link_parser.py | 2 ++ .github/workflows/link_check.yml | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/.github/actions/link-check/link_parser.py b/.github/actions/link-check/link_parser.py index 0976ab60..2d7222ae 100644 --- a/.github/actions/link-check/link_parser.py +++ b/.github/actions/link-check/link_parser.py @@ -69,6 +69,8 @@ def generate_issue_body(broken_links: list[BrokenLink]) -> str: Please investigate and fix these issues to ensure all links are functional. Thank you! +> To test locally if all link issues are resolved use `bazel run //:docs_link_check` + --- This issue will be auto updated regularly if link issues are found. You may close it if you wish, though a new one will be created if link issues are still present. diff --git a/.github/workflows/link_check.yml b/.github/workflows/link_check.yml index 8f026964..79be15b7 100644 --- a/.github/workflows/link_check.yml +++ b/.github/workflows/link_check.yml @@ -6,6 +6,10 @@ on: # Runs every week at 00:00 on Sunday - cron: '0 0 * * 0' +permissions: + contents: read + issues: write + jobs: link-check: runs-on: ubuntu-latest From 5008ea2b5ca613c7cb8cd3b940b16cda2bc6a584 Mon Sep 17 00:00:00 2001 From: Chidananda Swamy R <chidananda.swamy@ltts.com> Date: Thu, 5 Feb 2026 14:32:35 +0530 Subject: [PATCH 203/231] Rename 'Process Requirement' to 'Tool Requirement' (#391) Rename 'Process Requirement' to 'Tool Requirement' as process requirement id is already mentioned in 'Satisfies' column Signed-off-by: Chidananda Swamy R <chidananda.swamy@ltts.com> --- docs/internals/requirements/process_overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/internals/requirements/process_overview.rst b/docs/internals/requirements/process_overview.rst index 73cb4f6f..fda78e96 100644 --- a/docs/internals/requirements/process_overview.rst +++ b/docs/internals/requirements/process_overview.rst @@ -76,7 +76,7 @@ does not mean it is implemented. .. needtable:: :types: gd_req - :columns: id as "Process Requirement";implemented;satisfies + :columns: id as "Tool Requirement";implemented;satisfies :colwidths: 1;1;2 :style: table From 88bd7ce87f2a146ccdb95bd38260a960b8420b4f Mon Sep 17 00:00:00 2001 From: ramceb <89037993+ramceb@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:16:45 +0100 Subject: [PATCH 204/231] Add sphinx_module support (#366) Co-authored-by: Jochen Hoenle <jochen.hoenle@bmw.de> --- MODULE.bazel | 2 +- docs.bzl | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index bd71747c..18dbdc78 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -109,7 +109,7 @@ git_override( # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "1.1.0") +bazel_dep(name = "score_tooling", version = "1.1.1") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") diff --git a/docs.bzl b/docs.bzl index fea3ce5d..b8c45c18 100644 --- a/docs.bzl +++ b/docs.bzl @@ -46,6 +46,7 @@ load("@pip_process//:requirements.bzl", "all_requirements") load("@rules_pkg//pkg:mappings.bzl", "pkg_files", "strip_prefix") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@score_tooling//:defs.bzl", "score_virtualenv") +load("@score_tooling//bazel/rules/rules_score:rules_score.bzl", "sphinx_module") def _rewrite_needs_json_to_docs_sources(labels): """Replace '@repo//:needs_json' -> '@repo//:docs_sources' for every item.""" @@ -106,6 +107,7 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): if call_path != "": fail("docs() must be called from the root package. Current package: " + call_path) + module_deps = deps deps = deps + all_requirements + [ "@score_docs_as_code//src:plantuml_for_python", "@score_docs_as_code//src/extensions/score_sphinx_bundle:score_sphinx_bundle", @@ -255,6 +257,30 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): visibility = ["//visibility:public"], ) + sphinx_module( + name = native.module_name() + "_module", + srcs = native.glob([ + source_dir + "/**/*.rst", + source_dir + "/**/*.png", + source_dir + "/**/*.svg", + source_dir + "/**/*.md", + source_dir + "/**/*.html", + source_dir + "/**/*.css", + source_dir + "/**/*.puml", + source_dir + "/**/*.need", + source_dir + "/**/*.yaml", + source_dir + "/**/*.json", + source_dir + "/**/*.csv", + source_dir + "/**/*.inc", + "more_docs/**/*.rst", + ], allow_empty = True), + # config = ":" + source_dir + "/conf.py", + index = source_dir + "/index.rst", + sphinx = "@score_tooling//bazel/rules/rules_score:score_build", + deps = module_deps, + visibility = ["//visibility:public"], + ) + def _sourcelinks_json(name, srcs): """ Creates a target that generates a JSON file with source code links. From e6460ed36adfb2152f94529d77cdaa7b650d2118 Mon Sep 17 00:00:00 2001 From: Chidananda Swamy R <chidananda.swamy@ltts.com> Date: Fri, 6 Feb 2026 16:14:12 +0530 Subject: [PATCH 205/231] Fix wording in process overview documentation (#393) 1. Fix wording in process overview documentation 2. Add header to separate the priority 1 & 2 table for better readability Signed-off-by: Chidananda Swamy R <chidananda.swamy@ltts.com> --- docs/internals/requirements/process_overview.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/internals/requirements/process_overview.rst b/docs/internals/requirements/process_overview.rst index fda78e96..884552ec 100644 --- a/docs/internals/requirements/process_overview.rst +++ b/docs/internals/requirements/process_overview.rst @@ -10,10 +10,13 @@ how well this tool implements process requirements. Unsatisfied Process Requirements ################################ -The following table lists tool requirements from our process +The following table lists process requirements from our process which are not (yet) satisfied, i.e. covered by tool requirements. +Unsatisfied – Priority 1 +------------------------ + .. needtable:: :types: gd_req :columns: id;title;tags @@ -51,6 +54,9 @@ i.e. covered by tool requirements. continue results.append(need) +Unsatisfied – Priority 2 +------------------------ + .. needtable:: :types: gd_req :columns: id;title;tags From 37230422578d8909664a9bb0128b31bdba984f18 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Fri, 6 Feb 2026 11:20:37 +0000 Subject: [PATCH 206/231] feat: implement schema validation for Sphinx-Needs using generated schemas.json --- .gitignore | 1 + docs/internals/requirements/requirements.rst | 2 +- src/extensions/score_metamodel/__init__.py | 31 ++- src/extensions/score_metamodel/metamodel.yaml | 36 +-- src/extensions/score_metamodel/sn_schemas.py | 233 ++++++++++++++++++ .../score_source_code_linker/__init__.py | 28 ++- src/extensions/score_sync_toml/__init__.py | 6 + 7 files changed, 309 insertions(+), 28 deletions(-) create mode 100644 src/extensions/score_metamodel/sn_schemas.py diff --git a/.gitignore b/.gitignore index b4c5bb79..88eef561 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ user.bazelrc # docs build artifacts /_build* docs/ubproject.toml +docs/schemas.json # Vale - editorial style guide .vale.ini diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index 816646cb..70a5413a 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -1097,6 +1097,6 @@ Grouped Requirements .. needextend:: c.this_doc() and type == 'tool_req' and not status :status: valid -.. needextend:: "metamodel.yaml" in source_code_link +.. needextend:: source_code_link is not None and "metamodel.yaml" in source_code_link :+satisfies: tool_req__docs_metamodel :+tags: config diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 0a6c4dae..523d455d 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -31,6 +31,7 @@ from src.extensions.score_metamodel.metamodel_types import ( ScoreNeedType as ScoreNeedType, ) +from src.extensions.score_metamodel.sn_schemas import write_sn_schemas from src.extensions.score_metamodel.yaml_parser import ( default_options as default_options, ) @@ -237,10 +238,28 @@ def setup(app: Sphinx) -> dict[str, str | bool]: # load metamodel.yaml via ruamel.yaml metamodel = load_metamodel_data() + # Sphinx-Needs 6 requires extra options as dicts: {"name": ..., "schema": ...} + # Options WITH a schema get JSON schema validation (value must be a string). + # Options WITHOUT a schema are registered but not validated. + # non_schema_options = {"source_code_link", "testlink", "codelink"} + non_schema_options = {} # currently empty → all options get schema validation + extra_options_schema = [ + {"name": opt, "schema": {"type": "string"}} + for opt in metamodel.needs_extra_options + if opt not in non_schema_options + ] + extra_options_wo_schema = [ + {"name": opt} + for opt in metamodel.needs_extra_options + if opt in non_schema_options + ] + # extra_options = [{"name": opt} for opt in metamodel.needs_extra_options] + extra_options = extra_options_schema + extra_options_wo_schema + # Assign everything to Sphinx config app.config.needs_types = metamodel.needs_types app.config.needs_extra_links = metamodel.needs_extra_links - app.config.needs_extra_options = metamodel.needs_extra_options + app.config.needs_extra_options = extra_options app.config.graph_checks = metamodel.needs_graph_check app.config.prohibited_words_checks = metamodel.prohibited_words_checks @@ -251,6 +270,16 @@ def setup(app: Sphinx) -> dict[str, str | bool]: app.config.needs_reproducible_json = True app.config.needs_json_remove_defaults = True + # Generate schemas.json from the metamodel and register it with sphinx-needs. + # This enables sphinx-needs 6 schema validation: required fields, regex + # patterns on option values, and (eventually) link target type checks. + + print("Trying to generate schemas from metamodel.yaml and register them with Sphinx-Needs.") + + write_sn_schemas(app, metamodel) + + print("Successfully generated schemas from metamodel.yaml and registered them with Sphinx-Needs.") + # sphinx-collections runs on default prio 500. # We need to populate the sphinx-collections config before that happens. # --> 499 diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 398195c7..6c6b29c8 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -124,7 +124,7 @@ needs_types: mandatory_options: # req-Id: tool_req__docs_common_attr_status status: ^(valid|draft)$ - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: fix once process_description is fixed @@ -252,7 +252,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ # WARNING: THis will be activated again with new process release (1.1.0) - # content: ^[\s\S]+$ + # content: ^(.|[\n\r])+$ # req-Id: tool_req__docs_req_attr_rationale rationale: ^.+$ # req-Id: tool_req__docs_common_attr_security @@ -266,8 +266,8 @@ needs_types: testcovered: ^(YES|NO)$ hash: ^.*$ # req-Id: tool_req__docs_req_attr_validity_correctness - valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ - valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + valid_from: ^v(0|[1-9][0-9]*)\.(?:0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ + valid_until: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ tags: - requirement - requirement_excl_process @@ -286,7 +286,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: stkh_req @@ -299,8 +299,8 @@ needs_types: testcovered: ^(YES|NO)$ hash: ^.*$ # req-Id: tool_req__docs_req_attr_validity_correctness - valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ - valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + valid_from: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ + valid_until: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ optional_links: belongs_to: feat # for evaluation tags: @@ -320,7 +320,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: feat_req @@ -348,7 +348,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: make it mandatory @@ -381,7 +381,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ optional_options: codelink: ^.*$ testlink: ^.*$ @@ -728,7 +728,7 @@ needs_types: failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ mandatory_links: violates: feat_arc_sta optional_options: @@ -748,7 +748,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ mandatory_links: # req-Id: tool_req__docs_saf_attrs_violates violates: feat_arc_sta @@ -775,7 +775,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ @@ -803,7 +803,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ @@ -830,7 +830,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^[\s\S]+$ + content: ^(.|[\n\r])+$ optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ @@ -971,6 +971,12 @@ needs_extra_links: partially_verifies: incoming: partially_verified_by outgoing: partially_verifies + + # Decision Records + affects: + incoming: affected by + outgoing: affects + ############################################################## # Graph Checks # The graph checks focus on the relation of the needs and their attributes. diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py new file mode 100644 index 00000000..0b81c992 --- /dev/null +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -0,0 +1,233 @@ +"""Transforms the YAML metamodel into sphinx-needs JSON schema definitions. + +Reads need types from the parsed metamodel (MetaModelData) and generates a +``schemas.json`` file that sphinx-needs uses to validate each need against +the S-CORE metamodel rules (required fields, regex patterns, link constraints). + +Schema structure per need type (sphinx-needs schema format): + - ``select`` : matches needs by their ``type`` field + - ``validate.local`` : validates the need's own properties (patterns, required) + - ``validate.network`` : validates properties of linked needs (NOT YET ACTIVE) +""" + +import json +from pathlib import Path + +from sphinx.application import Sphinx +from sphinx.config import Config +from sphinx_needs import logging + +from src.extensions.score_metamodel.yaml_parser import MetaModelData + +# Fields whose values are lists in sphinx-needs (e.g. tags: ["safety", "security"]). +# These need an "array of strings" JSON schema instead of a plain "string" schema. +SN_ARRAY_FIELDS = { + "tags", + "sections", +} + +# Fields to skip during schema generation. +IGNORE_FIELDS = { + "content", # not yet available in ubCode +} + +LOGGER = logging.get_logger(__name__) + + +def write_sn_schemas(app: Sphinx, metamodel: MetaModelData) -> None: + """Build sphinx-needs schema definitions from the metamodel and write to JSON. + + For every need type that has at least one constraint (mandatory/optional + fields or links), a schema entry is created with: + + 1. A **selector** that matches needs whose ``type`` equals the directive name. + 2. A **local validator** containing: + - ``required`` list for mandatory fields/links. + - ``properties`` with regex ``pattern`` constraints for field values. + - ``minItems: 1`` for mandatory links (must have at least one target). + 3. A **network validator** (currently disabled) that would check that + linked needs have the expected ``type``. + + The resulting JSON is written to ``<confdir>/schemas.json`` and registered + with sphinx-needs via ``config.needs_schema_definitions_from_json``. + """ + config: Config = app.config + schemas = [] + schema_definitions = {"schemas": schemas} + + for need_type in metamodel.needs_types: + # Extract the four constraint categories from the metamodel YAML + mandatory_fields = need_type.get("mandatory_options", {}) + optional_fields = need_type.get("optional_options", {}) + mandatory_links = need_type.get("mandatory_links", {}) + optional_links = need_type.get("optional_links", {}) + + # Skip need types that have no constraints at all + if not ( + mandatory_fields or optional_fields or mandatory_links or optional_links + ): + continue + + # --- Classify link values as regex patterns vs. target type names --- + # In the metamodel YAML, a link value can be either: + # - A regex (starts with "^"), e.g. "^logic_arc_int(_op)*__.+$" + # → validated locally (the link ID must match the pattern) + # - A plain type name, e.g. "comp" + # → validated via network (the linked need must have that type) + # Multiple values are comma-separated, e.g. "comp, sw_unit" + mandatory_links_regexes = {} + mandatory_links_targets = {} + optional_links_regexes = {} + optional_links_targets = {} + value: str + field: str + for field, value in mandatory_links.items(): + link_values = [v.strip() for v in value.split(",")] + for link_value in link_values: + if link_value.startswith("^"): + if field in mandatory_links_regexes: + LOGGER.error( + "Multiple regex patterns for mandatory link field " + f"'{field}' in need type '{type_name}'. " + "Only the first one will be used in the schema." + ) + mandatory_links_regexes[field] = link_value + else: + mandatory_links_targets[field] = link_value + + for field, value in optional_links.items(): + link_values = [v.strip() for v in value.split(",")] + for link_value in link_values: + if link_value.startswith("^"): + if field in optional_links_regexes: + LOGGER.error( + "Multiple regex patterns for optional link field " + f"'{field}' in need type '{type_name}'. " + "Only the first one will be used in the schema." + ) + optional_links_regexes[field] = link_value + else: + optional_links_targets[field] = link_value + + # --- Build the schema entry for this need type --- + type_schema = { + "id": f"need-type-{need_type['directive']}", + "severity": "violation", + "message": "Need does not conform to S-CORE metamodel", + } + type_name = need_type["directive"] + + # Selector: only apply this schema to needs with matching type + selector = { + "properties": {"type": {"const": type_name}}, + "required": ["type"], + } + type_schema["select"] = selector + + # --- Local validation (the need's own properties) --- + type_schema["validate"] = {} + validator_local = { + "properties": {}, + "required": [], + # "unevaluatedProperties": False, + } + + # Mandatory fields: must be present AND match the regex pattern + for field, pattern in mandatory_fields.items(): + if field in IGNORE_FIELDS: + continue + validator_local["required"].append(field) + validator_local["properties"][field] = get_field_pattern_schema( + field, pattern + ) + + # Optional fields: if present, must match the regex pattern + for field, pattern in optional_fields.items(): + if field in IGNORE_FIELDS: + continue + validator_local["properties"][field] = get_field_pattern_schema( + field, pattern + ) + + # Mandatory links (regex): must have at least one entry + # TODO: regex pattern matching on link IDs is not yet enabled + for field, pattern in mandatory_links_regexes.items(): + validator_local["properties"][field] = { + "type": "array", + "minItems": 1, + } + validator_local["required"].append(field) + # validator_local["properties"][field] = get_array_pattern_schema(pattern) + + # Optional links (regex): allowed but not required + # TODO: regex pattern matching on link IDs is not yet enabled + for field, pattern in optional_links_regexes.items(): + validator_local["properties"][field] = { + "type": "array", + } + # validator_local["properties"][field] = get_array_pattern_schema(pattern) + + type_schema["validate"]["local"] = validator_local + + # --- Network validation (properties of linked needs) --- + # TODO: network validation is not yet enabled — the assignments to + # validator_network are commented out below. + validator_network = {} + for field, target_type in mandatory_links_targets.items(): + link_validator = { + "items": { + "local": { + "properties": {"type": {"type": "string", "const": target_type}} + } + }, + } + # validator_network[field] = link_validator + for field, target_type in optional_links_targets.items(): + link_validator = { + "items": { + "local": { + "properties": {"type": {"type": "string", "const": target_type}} + } + }, + } + # validator_network[field] = link_validator + if validator_network: + type_schema["validate"]["network"] = validator_network + + schemas.append(type_schema) + + # Write the complete schema definitions to a JSON file in confdir + schemas_output_path = Path(app.confdir) / "schemas.json" + with open(schemas_output_path, "w", encoding="utf-8") as f: + json.dump(schema_definitions, f, indent=2, ensure_ascii=False) + + # Tell sphinx-needs to load the schema from the JSON file + config.needs_schema_definitions_from_json = "schemas.json" + # config.needs_schema_definitions = schema_definitions + + +def get_field_pattern_schema(field: str, pattern: str): + """Return the appropriate JSON schema for a field's regex pattern. + + Array-valued fields (like ``tags``) get an array-of-strings schema; + scalar fields get a plain string schema. + """ + if field in SN_ARRAY_FIELDS: + return get_array_pattern_schema(pattern) + return get_pattern_schema(pattern) + + +def get_pattern_schema(pattern: str): + """Return a JSON schema that validates a string against a regex pattern.""" + return { + "type": "string", + "pattern": pattern, + } + + +def get_array_pattern_schema(pattern: str): + """Return a JSON schema that validates an array where each item matches a regex.""" + return { + "type": "array", + "items": get_pattern_schema(pattern), + } diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 094ebf4a..5ef283f7 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -375,18 +375,24 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: need_as_dict = cast(dict[str, object], need) - need_as_dict["source_code_link"] = ", ".join( - f"{get_github_link(n)}<>{n.file}:{n.line}" - for n in source_code_links.links.CodeLinks - ) - need_as_dict["testlink"] = ", ".join( - f"{get_github_link(n)}<>{n.name}" for n in source_code_links.links.TestLinks - ) + modified_need = False + if source_code_links.links.CodeLinks: + modified_need = True + need_as_dict["source_code_link"] = ", ".join( + f"{get_github_link(n)}<>{n.file}:{n.line}" + for n in source_code_links.links.CodeLinks + ) + if source_code_links.links.TestLinks: + modified_need = True + need_as_dict["testlink"] = ", ".join( + f"{get_github_link(n)}<>{n.name}" for n in source_code_links.links.TestLinks + ) - # NOTE: Removing & adding the need is important to make sure - # the needs gets 're-evaluated'. - Needs_Data.remove_need(need["id"]) - Needs_Data.add_need(need) + if modified_need: + # NOTE: Removing & adding the need is important to make sure + # the needs gets 're-evaluated'. + Needs_Data.remove_need(need["id"]) + Needs_Data.add_need(need) # ╭──────────────────────────────────────╮ diff --git a/src/extensions/score_sync_toml/__init__.py b/src/extensions/score_sync_toml/__init__.py index 79ebfb7a..72e598e6 100644 --- a/src/extensions/score_sync_toml/__init__.py +++ b/src/extensions/score_sync_toml/__init__.py @@ -59,6 +59,12 @@ def setup(app: Sphinx) -> dict[str, str | bool]: ] # TODO remove the suppress_warnings once fixed + app.config.needscfg_exclude_vars = [ + "needs_from_toml", + "needs_from_toml_table", + # "needs_schema_definitions_from_json", + ] + return { "version": "0.1", "parallel_read_safe": True, From 7fbb09b9340bee52f31bc992e8321ad1c170cf6a Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Fri, 6 Feb 2026 11:26:57 +0000 Subject: [PATCH 207/231] fix: update ID regex pattern to include uppercase letters - bazel run docs succeeds --- src/extensions/score_metamodel/tests/test_metamodel_load.py | 4 ++-- src/extensions/score_metamodel/yaml_parser.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/extensions/score_metamodel/tests/test_metamodel_load.py b/src/extensions/score_metamodel/tests/test_metamodel_load.py index 3cb67965..72568592 100644 --- a/src/extensions/score_metamodel/tests/test_metamodel_load.py +++ b/src/extensions/score_metamodel/tests/test_metamodel_load.py @@ -40,8 +40,8 @@ def test_load_metamodel_data(): assert result.needs_types[0].get("color") == "blue" assert result.needs_types[0].get("style") == "bold" assert result.needs_types[0]["mandatory_options"] == { - # default id pattern: prefix + digits, lowercase letters and underscores - "id": "^T1[0-9a-z_]+$", + # default id pattern: prefix + digits, letters and underscores + "id": "^T1[0-9a-zA-Z_]+$", "opt1": "value1", } assert result.needs_types[0]["optional_options"] == { diff --git a/src/extensions/score_metamodel/yaml_parser.py b/src/extensions/score_metamodel/yaml_parser.py index 64916a90..8c83b4e5 100644 --- a/src/extensions/score_metamodel/yaml_parser.py +++ b/src/extensions/score_metamodel/yaml_parser.py @@ -119,7 +119,7 @@ def _parse_need_type( # Ensure ID regex is set if "id" not in t["mandatory_options"]: prefix = t["prefix"] - t["mandatory_options"]["id"] = f"^{prefix}[0-9a-z_]+$" + t["mandatory_options"]["id"] = f"^{prefix}[0-9a-zA-Z_]+$" if "color" in yaml_data: t["color"] = yaml_data["color"] From de6667c6ebda8c8a84f8e84e0f5a4cf70c2ee75d Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Fri, 6 Feb 2026 12:39:45 +0000 Subject: [PATCH 208/231] refactor: clean up print statements in schema generation process --- src/extensions/score_metamodel/__init__.py | 5 ----- src/extensions/score_source_code_linker/__init__.py | 3 ++- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 523d455d..2e697c36 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -273,13 +273,8 @@ def setup(app: Sphinx) -> dict[str, str | bool]: # Generate schemas.json from the metamodel and register it with sphinx-needs. # This enables sphinx-needs 6 schema validation: required fields, regex # patterns on option values, and (eventually) link target type checks. - - print("Trying to generate schemas from metamodel.yaml and register them with Sphinx-Needs.") - write_sn_schemas(app, metamodel) - print("Successfully generated schemas from metamodel.yaml and registered them with Sphinx-Needs.") - # sphinx-collections runs on default prio 500. # We need to populate the sphinx-collections config before that happens. # --> 499 diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 5ef283f7..6e5e07b4 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -385,7 +385,8 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: if source_code_links.links.TestLinks: modified_need = True need_as_dict["testlink"] = ", ".join( - f"{get_github_link(n)}<>{n.name}" for n in source_code_links.links.TestLinks + f"{get_github_link(n)}<>{n.name}" + for n in source_code_links.links.TestLinks ) if modified_need: From 60382b9c4a2498eaf4337ff216dbd71daa1dabeb Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 07:47:52 +0000 Subject: [PATCH 209/231] feat: refactored sn_schema for testabiblity; enhance schema validation by refining regex patterns and adding type hints --- src/extensions/score_metamodel/__init__.py | 5 +- src/extensions/score_metamodel/metamodel.yaml | 22 +- src/extensions/score_metamodel/sn_schemas.py | 321 +++++++++--------- 3 files changed, 179 insertions(+), 169 deletions(-) diff --git a/src/extensions/score_metamodel/__init__.py b/src/extensions/score_metamodel/__init__.py index 2e697c36..b0d207de 100644 --- a/src/extensions/score_metamodel/__init__.py +++ b/src/extensions/score_metamodel/__init__.py @@ -15,6 +15,7 @@ import pkgutil from collections.abc import Callable from pathlib import Path +from typing import Any from sphinx.application import Sphinx from sphinx_needs import logging @@ -243,12 +244,12 @@ def setup(app: Sphinx) -> dict[str, str | bool]: # Options WITHOUT a schema are registered but not validated. # non_schema_options = {"source_code_link", "testlink", "codelink"} non_schema_options = {} # currently empty → all options get schema validation - extra_options_schema = [ + extra_options_schema: list[dict[str, Any]] = [ {"name": opt, "schema": {"type": "string"}} for opt in metamodel.needs_extra_options if opt not in non_schema_options ] - extra_options_wo_schema = [ + extra_options_wo_schema: list[dict[str, Any]] = [ {"name": opt} for opt in metamodel.needs_extra_options if opt in non_schema_options diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 6c6b29c8..6fb4c36b 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -124,7 +124,7 @@ needs_types: mandatory_options: # req-Id: tool_req__docs_common_attr_status status: ^(valid|draft)$ - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: fix once process_description is fixed @@ -252,7 +252,7 @@ needs_types: # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ # WARNING: THis will be activated again with new process release (1.1.0) - # content: ^(.|[\n\r])+$ + # content: ^[\s\S]+$ # req-Id: tool_req__docs_req_attr_rationale rationale: ^.+$ # req-Id: tool_req__docs_common_attr_security @@ -286,7 +286,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: stkh_req @@ -320,7 +320,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_req_link_satisfies_allowed satisfies: feat_req @@ -348,7 +348,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ optional_links: # req-Id: tool_req__docs_req_link_satisfies_allowed # TODO: make it mandatory @@ -381,7 +381,7 @@ needs_types: safety: ^(QM|ASIL_B)$ # req-Id: tool_req__docs_common_attr_status status: ^(valid|invalid)$ - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ optional_options: codelink: ^.*$ testlink: ^.*$ @@ -728,7 +728,7 @@ needs_types: failure_effect: ^.*$ sufficient: ^(yes|no)$ status: ^(valid|invalid)$ - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ mandatory_links: violates: feat_arc_sta optional_options: @@ -748,7 +748,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ mandatory_links: # req-Id: tool_req__docs_saf_attrs_violates violates: feat_arc_sta @@ -775,7 +775,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ @@ -803,7 +803,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ @@ -830,7 +830,7 @@ needs_types: sufficient: ^(yes|no)$ status: ^(valid|invalid)$ # req-Id: tool_req__docs_saf_attrs_content - content: ^(.|[\n\r])+$ + content: ^[\s\S]+$ optional_options: # req-Id: tool_req__docs_saf_attrs_mitigation_issue mitigation_issue: ^https://github.com/.*$ diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index 0b81c992..fccad876 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -1,3 +1,15 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* """Transforms the YAML metamodel into sphinx-needs JSON schema definitions. Reads need types from the parsed metamodel (MetaModelData) and generates a @@ -12,11 +24,13 @@ import json from pathlib import Path +from typing import Any from sphinx.application import Sphinx from sphinx.config import Config from sphinx_needs import logging +from src.extensions.score_metamodel.metamodel_types import ScoreNeedType from src.extensions.score_metamodel.yaml_parser import MetaModelData # Fields whose values are lists in sphinx-needs (e.g. tags: ["safety", "security"]). @@ -37,164 +51,19 @@ def write_sn_schemas(app: Sphinx, metamodel: MetaModelData) -> None: """Build sphinx-needs schema definitions from the metamodel and write to JSON. - For every need type that has at least one constraint (mandatory/optional - fields or links), a schema entry is created with: - - 1. A **selector** that matches needs whose ``type`` equals the directive name. - 2. A **local validator** containing: - - ``required`` list for mandatory fields/links. - - ``properties`` with regex ``pattern`` constraints for field values. - - ``minItems: 1`` for mandatory links (must have at least one target). - 3. A **network validator** (currently disabled) that would check that - linked needs have the expected ``type``. - - The resulting JSON is written to ``<confdir>/schemas.json`` and registered - with sphinx-needs via ``config.needs_schema_definitions_from_json``. + Iterates over all need types, builds a schema for each one via + ``_build_need_type_schema``, and writes the result to + ``<confdir>/schemas.json``. """ config: Config = app.config - schemas = [] - schema_definitions = {"schemas": schemas} + schemas: list[dict[str, Any]] = [] for need_type in metamodel.needs_types: - # Extract the four constraint categories from the metamodel YAML - mandatory_fields = need_type.get("mandatory_options", {}) - optional_fields = need_type.get("optional_options", {}) - mandatory_links = need_type.get("mandatory_links", {}) - optional_links = need_type.get("optional_links", {}) - - # Skip need types that have no constraints at all - if not ( - mandatory_fields or optional_fields or mandatory_links or optional_links - ): - continue + schema = _build_need_type_schema(need_type) + if schema is not None: + schemas.append(schema) - # --- Classify link values as regex patterns vs. target type names --- - # In the metamodel YAML, a link value can be either: - # - A regex (starts with "^"), e.g. "^logic_arc_int(_op)*__.+$" - # → validated locally (the link ID must match the pattern) - # - A plain type name, e.g. "comp" - # → validated via network (the linked need must have that type) - # Multiple values are comma-separated, e.g. "comp, sw_unit" - mandatory_links_regexes = {} - mandatory_links_targets = {} - optional_links_regexes = {} - optional_links_targets = {} - value: str - field: str - for field, value in mandatory_links.items(): - link_values = [v.strip() for v in value.split(",")] - for link_value in link_values: - if link_value.startswith("^"): - if field in mandatory_links_regexes: - LOGGER.error( - "Multiple regex patterns for mandatory link field " - f"'{field}' in need type '{type_name}'. " - "Only the first one will be used in the schema." - ) - mandatory_links_regexes[field] = link_value - else: - mandatory_links_targets[field] = link_value - - for field, value in optional_links.items(): - link_values = [v.strip() for v in value.split(",")] - for link_value in link_values: - if link_value.startswith("^"): - if field in optional_links_regexes: - LOGGER.error( - "Multiple regex patterns for optional link field " - f"'{field}' in need type '{type_name}'. " - "Only the first one will be used in the schema." - ) - optional_links_regexes[field] = link_value - else: - optional_links_targets[field] = link_value - - # --- Build the schema entry for this need type --- - type_schema = { - "id": f"need-type-{need_type['directive']}", - "severity": "violation", - "message": "Need does not conform to S-CORE metamodel", - } - type_name = need_type["directive"] - - # Selector: only apply this schema to needs with matching type - selector = { - "properties": {"type": {"const": type_name}}, - "required": ["type"], - } - type_schema["select"] = selector - - # --- Local validation (the need's own properties) --- - type_schema["validate"] = {} - validator_local = { - "properties": {}, - "required": [], - # "unevaluatedProperties": False, - } - - # Mandatory fields: must be present AND match the regex pattern - for field, pattern in mandatory_fields.items(): - if field in IGNORE_FIELDS: - continue - validator_local["required"].append(field) - validator_local["properties"][field] = get_field_pattern_schema( - field, pattern - ) - - # Optional fields: if present, must match the regex pattern - for field, pattern in optional_fields.items(): - if field in IGNORE_FIELDS: - continue - validator_local["properties"][field] = get_field_pattern_schema( - field, pattern - ) - - # Mandatory links (regex): must have at least one entry - # TODO: regex pattern matching on link IDs is not yet enabled - for field, pattern in mandatory_links_regexes.items(): - validator_local["properties"][field] = { - "type": "array", - "minItems": 1, - } - validator_local["required"].append(field) - # validator_local["properties"][field] = get_array_pattern_schema(pattern) - - # Optional links (regex): allowed but not required - # TODO: regex pattern matching on link IDs is not yet enabled - for field, pattern in optional_links_regexes.items(): - validator_local["properties"][field] = { - "type": "array", - } - # validator_local["properties"][field] = get_array_pattern_schema(pattern) - - type_schema["validate"]["local"] = validator_local - - # --- Network validation (properties of linked needs) --- - # TODO: network validation is not yet enabled — the assignments to - # validator_network are commented out below. - validator_network = {} - for field, target_type in mandatory_links_targets.items(): - link_validator = { - "items": { - "local": { - "properties": {"type": {"type": "string", "const": target_type}} - } - }, - } - # validator_network[field] = link_validator - for field, target_type in optional_links_targets.items(): - link_validator = { - "items": { - "local": { - "properties": {"type": {"type": "string", "const": target_type}} - } - }, - } - # validator_network[field] = link_validator - if validator_network: - type_schema["validate"]["network"] = validator_network - - schemas.append(type_schema) + schema_definitions: dict[str, Any] = {"schemas": schemas} # Write the complete schema definitions to a JSON file in confdir schemas_output_path = Path(app.confdir) / "schemas.json" @@ -206,7 +75,147 @@ def write_sn_schemas(app: Sphinx, metamodel: MetaModelData) -> None: # config.needs_schema_definitions = schema_definitions -def get_field_pattern_schema(field: str, pattern: str): +def _classify_links( + links: dict[str, Any], type_name: str, mandatory: bool +) -> tuple[dict[str, str], dict[str, str]]: + """Classify link values into regex patterns vs. target type names. + + In the metamodel YAML, a link value can be either: + - A regex (starts with "^"), e.g. "^logic_arc_int(_op)*__.+$" + -> validated locally (the link ID must match the pattern) + - A plain type name, e.g. "comp" + -> validated via network (the linked need must have that type) + Multiple values are comma-separated, e.g. "comp, sw_unit". + + Returns: + A tuple of (regexes, targets) dicts, keyed by field name. + """ + label = "mandatory" if mandatory else "optional" + regexes: dict[str, str] = {} + targets: dict[str, str] = {} + + for field, value in links.items(): + link_values = [v.strip() for v in value.split(",")] + for link_value in link_values: + if link_value.startswith("^"): + if field in regexes: + LOGGER.error( + f"Multiple regex patterns for {label} link field " + f"'{field}' in need type '{type_name}'. " + "Only the first one will be used in the schema." + ) + regexes[field] = link_value + else: + targets[field] = link_value + + return regexes, targets + + +def _build_local_validator( + mandatory_fields: dict[str, str], + optional_fields: dict[str, str], + mandatory_links_regexes: dict[str, str], + optional_links_regexes: dict[str, str], +) -> dict[str, Any]: + """Build the local validator dict for a need type's schema. + + The local validator checks the need's own properties: + - Mandatory fields must be present and match their regex pattern. + - Optional fields, if present, must match their regex pattern. + - Mandatory links must have at least one entry. + """ + properties: dict[str, Any] = {} + required: list[str] = [] + + # Mandatory fields: must be present AND match the regex pattern + for field, pattern in mandatory_fields.items(): + if field in IGNORE_FIELDS: + continue + required.append(field) + properties[field] = get_field_pattern_schema(field, pattern) + + # Optional fields: if present, must match the regex pattern + for field, pattern in optional_fields.items(): + if field in IGNORE_FIELDS: + continue + properties[field] = get_field_pattern_schema(field, pattern) + + # Mandatory links (regex): must have at least one entry + # TODO: regex pattern matching on link IDs is not yet enabled + for field in mandatory_links_regexes: + properties[field] = {"type": "array", "minItems": 1} + required.append(field) + + # Optional links (regex): allowed but not required + # TODO: regex pattern matching on link IDs is not yet enabled + for field in optional_links_regexes: + properties[field] = {"type": "array"} + + return { + "properties": properties, + "required": required, + # "unevaluatedProperties": False, + } + + +def _build_need_type_schema(need_type: ScoreNeedType) -> dict[str, Any] | None: + """Build a sphinx-needs schema entry for a single need type. + + Returns ``None`` if the need type has no constraints (no mandatory/optional + fields or links), meaning no schema validation is needed. + + The returned dict has the sphinx-needs schema structure: + - ``select``: matches needs by their ``type`` field + - ``validate.local``: validates the need's own properties + - ``validate.network``: validates linked needs' types (NOT YET ACTIVE) + """ + mandatory_fields = need_type.get("mandatory_options", {}) + optional_fields = need_type.get("optional_options", {}) + mandatory_links = need_type.get("mandatory_links", {}) + optional_links = need_type.get("optional_links", {}) + + # Skip need types that have no constraints at all + if not (mandatory_fields or optional_fields or mandatory_links or optional_links): + return None + + type_name = need_type["directive"] + + # Classify link values as regex patterns vs. target type names. + # Note: links are still plain strings at this point (before postprocess_need_links). + mandatory_links_regexes, _ = _classify_links( + mandatory_links, type_name, mandatory=True + ) + optional_links_regexes, _ = _classify_links( + optional_links, type_name, mandatory=False + ) + + type_schema: dict[str, Any] = { + "id": f"need-type-{type_name}", + "severity": "violation", + "message": "Need does not conform to S-CORE metamodel", + # Selector: only apply this schema to needs with matching type + "select": { + "properties": {"type": {"const": type_name}}, + "required": ["type"], + }, + "validate": { + "local": _build_local_validator( + mandatory_fields, + optional_fields, + mandatory_links_regexes, + optional_links_regexes, + ), + }, + } + + # TODO: network validation is not yet enabled. + # When enabled, it would use the target type names (second return value + # of _classify_links) to check that linked needs have the expected type. + + return type_schema + + +def get_field_pattern_schema(field: str, pattern: str) -> dict[str, Any]: """Return the appropriate JSON schema for a field's regex pattern. Array-valued fields (like ``tags``) get an array-of-strings schema; @@ -217,7 +226,7 @@ def get_field_pattern_schema(field: str, pattern: str): return get_pattern_schema(pattern) -def get_pattern_schema(pattern: str): +def get_pattern_schema(pattern: str) -> dict[str, str]: """Return a JSON schema that validates a string against a regex pattern.""" return { "type": "string", @@ -225,7 +234,7 @@ def get_pattern_schema(pattern: str): } -def get_array_pattern_schema(pattern: str): +def get_array_pattern_schema(pattern: str) -> dict[str, Any]: """Return a JSON schema that validates an array where each item matches a regex.""" return { "type": "array", From 1a56cfe41c4df6dbfc07e91c4290a2ee20498965 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 08:29:56 +0000 Subject: [PATCH 210/231] Added unit tests for sn_schemas --- .../score_metamodel/tests/test_sn_schemas.py | 402 ++++++++++++++++++ 1 file changed, 402 insertions(+) create mode 100644 src/extensions/score_metamodel/tests/test_sn_schemas.py diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas.py b/src/extensions/score_metamodel/tests/test_sn_schemas.py new file mode 100644 index 00000000..b4ed5c24 --- /dev/null +++ b/src/extensions/score_metamodel/tests/test_sn_schemas.py @@ -0,0 +1,402 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +# pyright: reportPrivateUsage=false +import json +from pathlib import Path +from typing import Any, cast +from unittest.mock import MagicMock, patch + +import pytest + +from src.extensions.score_metamodel.metamodel_types import ScoreNeedType +from src.extensions.score_metamodel.sn_schemas import ( + IGNORE_FIELDS, + SN_ARRAY_FIELDS, + _build_local_validator, + _build_need_type_schema, + _classify_links, + get_array_pattern_schema, + get_field_pattern_schema, + get_pattern_schema, + write_sn_schemas, +) + + +# ============================================================================= +# Tests for get_pattern_schema +# ============================================================================= + + +class TestGetPatternSchema: + def test_returns_string_type_with_pattern(self) -> None: + result = get_pattern_schema("^[A-Z]+$") + assert result == {"type": "string", "pattern": "^[A-Z]+$"} + + def test_preserves_complex_regex(self) -> None: + pattern = r"^(feat|fix|chore)\/.+$" + result = get_pattern_schema(pattern) + assert result["type"] == "string" + assert result["pattern"] == pattern + + +# ============================================================================= +# Tests for get_array_pattern_schema +# ============================================================================= + + +class TestGetArrayPatternSchema: + def test_returns_array_type_with_items(self) -> None: + result = get_array_pattern_schema("^tag_.*$") + assert result == { + "type": "array", + "items": {"type": "string", "pattern": "^tag_.*$"}, + } + + def test_items_match_get_pattern_schema(self) -> None: + pattern = "^[a-z]+$" + result = get_array_pattern_schema(pattern) + assert result["items"] == get_pattern_schema(pattern) + + +# ============================================================================= +# Tests for get_field_pattern_schema +# ============================================================================= + + +class TestGetFieldPatternSchema: + def test_scalar_field_returns_string_schema(self) -> None: + result = get_field_pattern_schema("title", "^.+$") + assert result == {"type": "string", "pattern": "^.+$"} + + def test_array_field_returns_array_schema(self) -> None: + for array_field in SN_ARRAY_FIELDS: + result = get_field_pattern_schema(array_field, "^[a-z]+$") + assert result["type"] == "array", f"Field '{array_field}' should be array" + assert "items" in result + + def test_unknown_field_returns_string_schema(self) -> None: + result = get_field_pattern_schema("some_custom_field", "^.*$") + assert result["type"] == "string" + + +# ============================================================================= +# Tests for _classify_links +# ============================================================================= + + +class TestClassifyLinks: + def test_regex_link_classified_as_regex(self) -> None: + links = {"parent_need": "^logic_arc_int__.+$"} + regexes, targets = _classify_links(links, "my_type", mandatory=True) + assert regexes == {"parent_need": "^logic_arc_int__.+$"} + assert targets == {} + + def test_plain_type_classified_as_target(self) -> None: + links = {"satisfies": "comp"} + regexes, targets = _classify_links(links, "my_type", mandatory=False) + assert regexes == {} + assert targets == {"satisfies": "comp"} + + def test_comma_separated_mixed_values(self) -> None: + links = {"related": "^arc_.+$, comp"} + regexes, targets = _classify_links(links, "my_type", mandatory=True) + assert regexes == {"related": "^arc_.+$"} + assert targets == {"related": "comp"} + + def test_empty_links(self) -> None: + regexes, targets = _classify_links({}, "my_type", mandatory=True) + assert regexes == {} + assert targets == {} + + def test_multiple_fields(self) -> None: + links = { + "satisfies": "req", + "parent": "^parent__.+$", + } + regexes, targets = _classify_links(links, "my_type", mandatory=False) + assert regexes == {"parent": "^parent__.+$"} + assert targets == {"satisfies": "req"} + + def test_multiple_regex_for_same_field_logs_error(self) -> None: + links = {"field": "^regex1$, ^regex2$"} + with patch("src.extensions.score_metamodel.sn_schemas.LOGGER") as mock_logger: + regexes, _ = _classify_links(links, "my_type", mandatory=True) + mock_logger.error.assert_called_once() + # Last regex overwrites previous ones + assert regexes == {"field": "^regex2$"} + + def test_multiple_plain_targets_last_wins(self) -> None: + links = {"field": "comp, sw_unit"} + regexes, targets = _classify_links(links, "my_type", mandatory=True) + assert regexes == {} + # Last target overwrites + assert targets == {"field": "sw_unit"} + + +# ============================================================================= +# Tests for _build_local_validator +# ============================================================================= + + +class TestBuildLocalValidator: + def test_mandatory_fields_are_required(self) -> None: + mandatory = {"status": "^(valid|draft)$"} + result = _build_local_validator(mandatory, {}, {}, {}) + assert "status" in result["required"] + assert "status" in result["properties"] + assert result["properties"]["status"]["pattern"] == "^(valid|draft)$" + + def test_optional_fields_not_required(self) -> None: + optional = {"comment": "^.*$"} + result = _build_local_validator({}, optional, {}, {}) + assert "comment" not in result["required"] + assert "comment" in result["properties"] + + def test_ignored_fields_excluded(self) -> None: + mandatory = {field: "^.*$" for field in IGNORE_FIELDS} + optional = {field: "^.*$" for field in IGNORE_FIELDS} + result = _build_local_validator(mandatory, optional, {}, {}) + for field in IGNORE_FIELDS: + assert field not in result["properties"] + assert field not in result["required"] + + def test_mandatory_link_regexes_required_with_min_items(self) -> None: + mandatory_link_regexes = {"satisfies": "^req__.+$"} + result = _build_local_validator({}, {}, mandatory_link_regexes, {}) + assert "satisfies" in result["required"] + assert result["properties"]["satisfies"] == {"type": "array", "minItems": 1} + + def test_optional_link_regexes_not_required(self) -> None: + optional_link_regexes = {"related": "^rel__.+$"} + result = _build_local_validator({}, {}, {}, optional_link_regexes) + assert "related" not in result["required"] + assert result["properties"]["related"] == {"type": "array"} + + def test_combined_fields_and_links(self) -> None: + mandatory = {"status": "^valid$"} + optional = {"comment": "^.*$"} + mandatory_link_re = {"satisfies": "^req__.+$"} + optional_link_re = {"related": "^rel__.+$"} + result = _build_local_validator( + mandatory, optional, mandatory_link_re, optional_link_re + ) + assert set(result["required"]) == {"status", "satisfies"} + assert set(result["properties"].keys()) == { + "status", + "comment", + "satisfies", + "related", + } + + def test_empty_inputs(self) -> None: + result = _build_local_validator({}, {}, {}, {}) + assert result["properties"] == {} + assert result["required"] == [] + + def test_array_field_in_mandatory(self) -> None: + mandatory = {"tags": "^(safety|security)$"} + result = _build_local_validator(mandatory, {}, {}, {}) + assert result["properties"]["tags"]["type"] == "array" + assert "items" in result["properties"]["tags"] + + +# ============================================================================= +# Tests for _build_need_type_schema +# ============================================================================= + + +def _make_need_type(**overrides: Any) -> ScoreNeedType: + """Helper to create a ScoreNeedType-like dict.""" + base: dict[str, Any] = { + "directive": "test_type", + "title": "Test Type", + "prefix": "TT_", + } + base.update(overrides) + return cast(ScoreNeedType, base) + + +class TestBuildNeedTypeSchema: + def test_returns_none_for_no_constraints(self) -> None: + need_type = _make_need_type() + assert _build_need_type_schema(need_type) is None + + def test_returns_none_for_empty_constraints(self) -> None: + need_type = _make_need_type( + mandatory_options={}, + optional_options={}, + mandatory_links={}, + optional_links={}, + ) + assert _build_need_type_schema(need_type) is None + + def test_schema_has_correct_structure(self) -> None: + need_type = _make_need_type( + mandatory_options={"status": "^valid$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + assert schema["id"] == "need-type-test_type" + assert schema["severity"] == "violation" + assert "select" in schema + assert schema["select"]["properties"]["type"]["const"] == "test_type" + assert "validate" in schema + assert "local" in schema["validate"] + + def test_mandatory_fields_in_local_validator(self) -> None: + need_type = _make_need_type( + mandatory_options={"status": "^(valid|draft)$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + assert "status" in local["required"] + assert "status" in local["properties"] + + def test_optional_fields_in_local_validator(self) -> None: + need_type = _make_need_type( + optional_options={"comment": "^.*$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + assert "comment" not in local["required"] + assert "comment" in local["properties"] + + def test_mandatory_links_with_regex(self) -> None: + need_type = _make_need_type( + mandatory_links={"satisfies": "^req__.+$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + assert "satisfies" in local["required"] + assert local["properties"]["satisfies"] == {"type": "array", "minItems": 1} + + def test_mandatory_links_with_plain_target(self) -> None: + need_type = _make_need_type( + mandatory_links={"satisfies": "comp"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + # Plain targets don't produce local validation entries + assert "satisfies" not in local["properties"] + + def test_optional_links_with_regex(self) -> None: + need_type = _make_need_type( + optional_links={"related": "^rel__.+$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + assert "related" not in local["required"] + assert local["properties"]["related"] == {"type": "array"} + + +# ============================================================================= +# Tests for write_sn_schemas +# ============================================================================= + + +class TestWriteSnSchemas: + def test_writes_json_file(self, tmp_path: Path) -> None: + app = MagicMock() + app.confdir = str(tmp_path) + app.config = MagicMock() + + need_type: dict[str, Any] = { + "directive": "req", + "title": "Requirement", + "prefix": "REQ_", + "mandatory_options": {"status": "^valid$"}, + } + metamodel = MagicMock() + metamodel.needs_types = [need_type] + + write_sn_schemas(app, metamodel) + + output_path: Path = tmp_path / "schemas.json" + assert output_path.exists() + data = json.loads(output_path.read_text(encoding="utf-8")) + assert "schemas" in data + assert len(data["schemas"]) == 1 + assert data["schemas"][0]["id"] == "need-type-req" + + def test_sets_config_value(self, tmp_path: Path) -> None: + app = MagicMock() + app.confdir = str(tmp_path) + app.config = MagicMock() + + metamodel = MagicMock() + metamodel.needs_types = [] + + write_sn_schemas(app, metamodel) + + assert app.config.needs_schema_definitions_from_json == "schemas.json" + + def test_skips_need_types_without_constraints(self, tmp_path: Path) -> None: + app = MagicMock() + app.confdir = str(tmp_path) + app.config = MagicMock() + + need_type_with: dict[str, Any] = { + "directive": "req", + "title": "Requirement", + "prefix": "REQ_", + "mandatory_options": {"status": "^valid$"}, + } + need_type_without: dict[str, Any] = { + "directive": "info", + "title": "Info", + "prefix": "INF_", + } + metamodel = MagicMock() + metamodel.needs_types = [need_type_with, need_type_without] + + write_sn_schemas(app, metamodel) + + output_path: Path = tmp_path / "schemas.json" + data = json.loads(output_path.read_text(encoding="utf-8")) + assert len(data["schemas"]) == 1 + assert data["schemas"][0]["id"] == "need-type-req" + + def test_writes_valid_json_with_multiple_types(self, tmp_path: Path) -> None: + app = MagicMock() + app.confdir = str(tmp_path) + app.config = MagicMock() + + need_types: list[dict[str, Any]] = [ + { + "directive": "req", + "title": "Requirement", + "prefix": "REQ_", + "mandatory_options": {"status": "^valid$"}, + }, + { + "directive": "spec", + "title": "Specification", + "prefix": "SPEC_", + "optional_options": {"comment": "^.*$"}, + }, + ] + metamodel = MagicMock() + metamodel.needs_types = need_types + + write_sn_schemas(app, metamodel) + + output_path: Path = tmp_path / "schemas.json" + data = json.loads(output_path.read_text(encoding="utf-8")) + assert len(data["schemas"]) == 2 + ids = {s["id"] for s in data["schemas"]} + assert ids == {"need-type-req", "need-type-spec"} From 1ab312bbda34ec73696a2a42f2691e39948cf157 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 09:21:33 +0000 Subject: [PATCH 211/231] feat: add integration tests for schema generation against SCORE metamodel --- .../tests/test_sn_schemas_integration.py | 438 ++++++++++++++++++ 1 file changed, 438 insertions(+) create mode 100644 src/extensions/score_metamodel/tests/test_sn_schemas_integration.py diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py new file mode 100644 index 00000000..f9253814 --- /dev/null +++ b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py @@ -0,0 +1,438 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +# pyright: reportPrivateUsage=false +"""Integration tests for schema generation against the real SCORE metamodel. + +Test Design +----------- + +Objective: + Verify that the schemas generated by ``sn_schemas.py`` from the real + ``metamodel.yaml`` correctly accept valid needs and reject invalid ones. + This proves the schema-based validation (sphinx-needs 6) is a faithful + translation of the metamodel rules previously enforced only by Python + checks in ``checks/check_options.py``. + +Approach: + 1. Load the S-Core ``metamodel.yaml`` via ``load_metamodel_data()``. + 2. Generate schemas for every need type via ``_build_need_type_schema()``. + 3. Validate sample needs against the generated schemas using + ``jsonschema_rs.Draft7Validator``, the same JSON Schema engine + sphinx-needs uses at build time. + +Test categories: + + **Structural sweep** (``TestAllSchemasStructural``) + Iterates over ALL need types from the S-Core metamodel and verifies: + - Every generated schema is a valid JSON Schema (constructable). + - Schema structure matches the sphinx-needs contract + (id, severity, select, validate.local). + - The ``select`` schema matches only the correct need type. + - Fields in ``IGNORE_FIELDS`` (e.g. ``content``) are excluded. + - Every ``mandatory_options`` field appears in ``required``. + - Every ``optional_options`` field appears in ``properties`` + but NOT in ``required``. + - Regex patterns in schemas match the metamodel definitions exactly. + - Types without any constraints produce no schema. + + **Representative type tests** (``TestFeatReqSchema``, ``TestCompSchema``, + ``TestFeatSchema``) + For a curated set of need types, construct valid and invalid need + dicts and assert the schema accepts or rejects them. This covers + the constraint categories that exist in the metamodel: + + - ``feat_req``: Mandatory fields with regex patterns + (reqtype, security, safety, status), optional fields with patterns + (reqcovered, testcovered), mandatory link with plain target + (satisfies -> stkh_req, no local link validation), and ``content`` + in ``IGNORE_FIELDS``. + - ``comp``: Mandatory fields only, no mandatory links. + - ``feat``: Mandatory link with regex pattern + (includes: ``^logic_arc_int(_op)*__.+$``), producing a local + ``minItems: 1`` constraint. + +Validation helpers: + ``assert_schema_valid`` / ``assert_schema_invalid`` replicate the + two-step sphinx-needs validation: first match the ``select`` schema + (ensures the schema applies to the need's type), then validate against + ``validate.local``. + +Limitations: + - Network validation (checking linked needs' types) is not yet active + in ``sn_schemas.py`` and therefore not tested here. + - Graph checks (safety level decomposition, prohibited words) remain in + Python code and are outside the scope of schema-based validation. + - The ``content`` field is excluded via ``IGNORE_FIELDS`` because it is + not yet available in ubCode; this exclusion is explicitly tested. +""" + +from typing import Any, cast + +import jsonschema_rs +import pytest + +from src.extensions.score_metamodel.metamodel_types import ScoreNeedType +from src.extensions.score_metamodel.sn_schemas import ( + IGNORE_FIELDS, + _build_need_type_schema, +) +from src.extensions.score_metamodel.yaml_parser import ( + MetaModelData, + load_metamodel_data, +) + + +# ============================================================================= +# Fixtures +# ============================================================================= + + +@pytest.fixture(scope="module") +def metamodel() -> MetaModelData: + """Load the S-Core metamodel.yaml once for all tests in this module.""" + return load_metamodel_data() + + +@pytest.fixture(scope="module") +def schemas_by_type(metamodel: MetaModelData) -> dict[str, dict[str, Any]]: + """Generate sphinx-needs schemas for all need types and index by directive name.""" + result: dict[str, dict[str, Any]] = {} + for need_type in metamodel.needs_types: + schema = _build_need_type_schema(need_type) + if schema is not None: + result[need_type["directive"]] = schema + return result + + +@pytest.fixture(scope="module") +def need_types_by_directive(metamodel: MetaModelData) -> dict[str, ScoreNeedType]: + """Index need types by directive name for easy lookup.""" + return {nt["directive"]: nt for nt in metamodel.needs_types} + + +# ============================================================================= +# Helpers +# ============================================================================= + + +def assert_schema_valid(need_dict: dict[str, Any], schema: dict[str, Any]) -> None: + """Assert that a need dict passes the schema's local validator.""" + select_validator = jsonschema_rs.Draft7Validator(schema["select"]) + assert select_validator.is_valid(need_dict), ( + f"Need type '{need_dict.get('type')}' did not match schema selector" + ) + local_validator = jsonschema_rs.Draft7Validator(schema["validate"]["local"]) + # raises ValidationError with details on failure + local_validator.validate(need_dict) + + +def assert_schema_invalid(need_dict: dict[str, Any], schema: dict[str, Any]) -> None: + """Assert that a need dict FAILS the schema's local validator.""" + select_validator = jsonschema_rs.Draft7Validator(schema["select"]) + assert select_validator.is_valid(need_dict), ( + f"Need type '{need_dict.get('type')}' did not match schema selector" + ) + local_validator = jsonschema_rs.Draft7Validator(schema["validate"]["local"]) + assert not local_validator.is_valid(need_dict), ( + f"Expected validation to fail for need: {need_dict}" + ) + + +# ============================================================================= +# Structural sweep over all types +# ============================================================================= + + +class TestAllSchemasStructural: + """Verify every schema generated from the real metamodel is well-formed.""" + + def test_at_least_one_schema_generated( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + assert len(schemas_by_type) > 0 + + def test_all_schemas_are_valid_json_schemas( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + """Every schema's select and validate.local must be constructable.""" + for schema in schemas_by_type.values(): + jsonschema_rs.Draft7Validator(schema["select"]) + jsonschema_rs.Draft7Validator(schema["validate"]["local"]) + + def test_every_schema_has_required_structure( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + for type_name, schema in schemas_by_type.items(): + assert schema["id"] == f"need-type-{type_name}" + assert "severity" in schema + assert "select" in schema + assert "local" in schema["validate"] + + def test_select_matches_correct_type_only( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + for type_name, schema in schemas_by_type.items(): + selector = jsonschema_rs.Draft7Validator(schema["select"]) + assert selector.is_valid({"type": type_name}) + assert not selector.is_valid({"type": f"NOT_{type_name}"}) + + def test_ignored_fields_never_in_schemas( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + for type_name, schema in schemas_by_type.items(): + local = schema["validate"]["local"] + for field in IGNORE_FIELDS: + assert field not in local.get("properties", {}), ( + f"{type_name}: '{field}' should be ignored" + ) + assert field not in local.get("required", []), ( + f"{type_name}: '{field}' should be ignored" + ) + + def test_mandatory_options_are_required( + self, + schemas_by_type: dict[str, dict[str, Any]], + need_types_by_directive: dict[str, ScoreNeedType], + ) -> None: + for type_name, schema in schemas_by_type.items(): + need_type = need_types_by_directive[type_name] + local = schema["validate"]["local"] + for field in need_type.get("mandatory_options", {}): + if field in IGNORE_FIELDS: + continue + assert field in local["required"], ( + f"{type_name}: mandatory field '{field}' missing from required" + ) + + def test_optional_options_not_required( + self, + schemas_by_type: dict[str, dict[str, Any]], + need_types_by_directive: dict[str, ScoreNeedType], + ) -> None: + for type_name, schema in schemas_by_type.items(): + need_type = need_types_by_directive[type_name] + local = schema["validate"]["local"] + for field in need_type.get("optional_options", {}): + if field in IGNORE_FIELDS: + continue + assert field in local["properties"], ( + f"{type_name}: optional field '{field}' missing from properties" + ) + assert field not in local["required"], ( + f"{type_name}: optional field '{field}' should not be required" + ) + + def test_mandatory_option_patterns_match_metamodel( + self, + schemas_by_type: dict[str, dict[str, Any]], + need_types_by_directive: dict[str, ScoreNeedType], + ) -> None: + for type_name, schema in schemas_by_type.items(): + need_type = need_types_by_directive[type_name] + local = schema["validate"]["local"] + for field, pattern in need_type.get("mandatory_options", {}).items(): + if field in IGNORE_FIELDS: + continue + prop = local["properties"][field] + if prop.get("type") == "array": + assert prop["items"]["pattern"] == pattern, ( + f"{type_name}.{field}: pattern mismatch" + ) + else: + assert prop["pattern"] == pattern, ( + f"{type_name}.{field}: pattern mismatch" + ) + + def test_types_without_constraints_have_no_schema( + self, + metamodel: MetaModelData, + schemas_by_type: dict[str, dict[str, Any]], + ) -> None: + for nt in metamodel.needs_types: + directive = nt["directive"] + has_constraints = bool( + nt.get("mandatory_options") + or nt.get("optional_options") + or nt.get("mandatory_links") + or nt.get("optional_links") + ) + if not has_constraints: + assert directive not in schemas_by_type, ( + f"{directive} has no constraints but got a schema" + ) + + +# ============================================================================= +# feat_req: mandatory fields, mandatory link (plain target = no local link check) +# ============================================================================= + + +class TestFeatReqSchema: + """Integration tests for feat_req using the real metamodel.""" + + @staticmethod + def _make_valid() -> dict[str, Any]: + return { + "type": "feat_req", + "id": "feat_req__test__001", + "reqtype": "Functional", + "security": "YES", + "safety": "QM", + "status": "valid", + } + + def test_valid_need_passes( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + assert_schema_valid(self._make_valid(), schemas_by_type["feat_req"]) + + def test_missing_status_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + del need["status"] + assert_schema_invalid(need, schemas_by_type["feat_req"]) + + def test_missing_safety_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + del need["safety"] + assert_schema_invalid(need, schemas_by_type["feat_req"]) + + def test_wrong_status_pattern_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["status"] = "approved" # not in ^(valid|invalid)$ + assert_schema_invalid(need, schemas_by_type["feat_req"]) + + def test_wrong_safety_pattern_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["safety"] = "ASIL_D" # not in ^(QM|ASIL_B)$ + assert_schema_invalid(need, schemas_by_type["feat_req"]) + + def test_wrong_reqtype_pattern_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["reqtype"] = "Performance" # not in ^(Functional|Interface|...)$ + assert_schema_invalid(need, schemas_by_type["feat_req"]) + + def test_content_not_validated( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + """content is in IGNORE_FIELDS — missing content must not fail.""" + need = self._make_valid() + # no 'content' key at all — should still pass + assert_schema_valid(need, schemas_by_type["feat_req"]) + + def test_invalid_optional_field_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["reqcovered"] = "MAYBE" # not in ^(YES|NO)$ + assert_schema_invalid(need, schemas_by_type["feat_req"]) + + def test_valid_optional_field_passes( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["reqcovered"] = "YES" + assert_schema_valid(need, schemas_by_type["feat_req"]) + + def test_extra_unknown_fields_pass( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["unknown_field"] = "anything" + assert_schema_valid(need, schemas_by_type["feat_req"]) + + +# ============================================================================= +# comp: mandatory fields, no mandatory links +# ============================================================================= + + +class TestCompSchema: + """Integration tests for comp using the real metamodel.""" + + @staticmethod + def _make_valid() -> dict[str, Any]: + return { + "type": "comp", + "id": "comp__my_component", + "security": "YES", + "safety": "QM", + "status": "valid", + } + + def test_valid_need_passes( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + assert_schema_valid(self._make_valid(), schemas_by_type["comp"]) + + def test_missing_security_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + del need["security"] + assert_schema_invalid(need, schemas_by_type["comp"]) + + def test_wrong_security_pattern_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["security"] = "MAYBE" # not in ^(YES|NO)$ + assert_schema_invalid(need, schemas_by_type["comp"]) + + +# ============================================================================= +# feat: mandatory link with regex (includes: ^logic_arc_int(_op)*__.+$) +# ============================================================================= + + +class TestFeatSchema: + """Integration tests for feat — has a mandatory link with regex pattern.""" + + @staticmethod + def _make_valid() -> dict[str, Any]: + return { + "type": "feat", + "id": "feat__my_feature", + "security": "YES", + "safety": "QM", + "status": "valid", + "includes": ["logic_arc_int__something"], + } + + def test_valid_need_passes( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + assert_schema_valid(self._make_valid(), schemas_by_type["feat"]) + + def test_missing_mandatory_link_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + del need["includes"] + assert_schema_invalid(need, schemas_by_type["feat"]) + + def test_empty_mandatory_link_fails( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + need = self._make_valid() + need["includes"] = [] # minItems: 1 violated + assert_schema_invalid(need, schemas_by_type["feat"]) From e0bdf843d1444fc9a1932088c69137a8c39e3e55 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 09:29:23 +0000 Subject: [PATCH 212/231] refactor: remove unused imports and clean up whitespace in test files --- src/extensions/score_metamodel/tests/test_sn_schemas.py | 3 --- .../score_metamodel/tests/test_sn_schemas_integration.py | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas.py b/src/extensions/score_metamodel/tests/test_sn_schemas.py index b4ed5c24..cb366858 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas.py @@ -16,8 +16,6 @@ from typing import Any, cast from unittest.mock import MagicMock, patch -import pytest - from src.extensions.score_metamodel.metamodel_types import ScoreNeedType from src.extensions.score_metamodel.sn_schemas import ( IGNORE_FIELDS, @@ -31,7 +29,6 @@ write_sn_schemas, ) - # ============================================================================= # Tests for get_pattern_schema # ============================================================================= diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py index f9253814..dc54716b 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py @@ -76,7 +76,7 @@ not yet available in ubCode; this exclusion is explicitly tested. """ -from typing import Any, cast +from typing import Any import jsonschema_rs import pytest @@ -91,7 +91,6 @@ load_metamodel_data, ) - # ============================================================================= # Fixtures # ============================================================================= From 817498001133f28d018ca44d96bfe4ee73b97b78 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 09:38:48 +0000 Subject: [PATCH 213/231] feat: update documentation and comments for clarity on schema generation process --- src/extensions/score_metamodel/README.md | 80 ++++++++++++++++++++ src/extensions/score_metamodel/sn_schemas.py | 2 +- 2 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 src/extensions/score_metamodel/README.md diff --git a/src/extensions/score_metamodel/README.md b/src/extensions/score_metamodel/README.md new file mode 100644 index 00000000..2a27bb88 --- /dev/null +++ b/src/extensions/score_metamodel/README.md @@ -0,0 +1,80 @@ +# score_metamodel + +Sphinx extension that enforces the S-CORE metamodel on sphinx-needs documents. + +It reads `metamodel.yaml` (the single source of truth for all need types, fields, +links, and constraints) and validates every need in the documentation against +those rules. + +## What it does + +1. **Registers need types** with sphinx-needs (directives like `feat_req`, `comp`, + `workflow`, etc.) including their fields, links, and extra options. +2. **Generates `schemas.json`** from the metamodel so that sphinx-needs 6 can + validate needs at parse time (required fields, regex patterns, link + constraints). +3. **Runs post-build checks** that go beyond what JSON Schema can express + (graph traversals, prohibited words, ID format rules). + +## Metamodel overview + +`metamodel.yaml` defines: + +| Section | Purpose | +|---|---| +| `needs_types` | All need types (e.g. `feat_req`, `comp`, `document`) with their mandatory/optional fields and links | +| `needs_types_base_options` | Global optional fields applied to every type (e.g. `source_code_link`, `testlink`) | +| `needs_extra_links` | Custom link types (e.g. `satisfies`, `implements`, `mitigated_by`) | +| `prohibited_words_checks` | Forbidden words in titles/descriptions (e.g. "shall", "must") | +| `graph_checks` | Cross-need constraints (e.g. safety level decomposition rules) | + +Each need type can specify: + +- **`mandatory_options`** -- fields that must be present, with a regex pattern + the value must match (e.g. `status: ^(valid|invalid)$`). +- **`optional_options`** -- fields that, if present, must match a pattern. +- **`mandatory_links`** -- links that must have at least one target. The value + is either a plain type name (`stkh_req`) or a regex (`^logic_arc_int__.+$`). +- **`optional_links`** -- links that are allowed but not required. + +## Validation layers + +### Schema validation (sphinx-needs 6) + +`sn_schemas.py` translates the metamodel into a `schemas.json` file that +sphinx-needs evaluates at parse time. Each schema entry has: + +- **`select`** -- matches needs by their `type` field. +- **`validate.local`** -- JSON Schema checking the need's own properties + (required fields, regex patterns, mandatory links with `minItems: 1`). +- **`validate.network`** -- (not yet active) would validate linked needs' types. + +### Post-build Python checks + +Checks in `checks/` run after the Sphinx build and cover rules that +JSON Schema cannot express: + +| Check | File | What it validates | +|---|---|---| +| `check_options` | `check_options.py` | Mandatory/optional field presence and patterns (legacy, overlaps with schema validation) | +| `check_extra_options` | `check_options.py` | Warns about fields not defined in the metamodel | +| `check_id_format` | `attributes_format.py` | ID structure (`<type>__<abbrev>__<element>`, part count) | +| `check_for_prohibited_words` | `attributes_format.py` | Forbidden words in titles | +| `check_metamodel_graph` | `graph_checks.py` | Cross-need constraints (e.g. ASIL_B needs must link to non-QM requirements) | +| `check_id_contains_feature` | `id_contains_feature.py` | Need IDs must contain the feature abbreviation from the file path | +| `check_standards` | `standards.py` | Standard compliance link validation | + +## File layout + +``` +score_metamodel/ + __init__.py # Sphinx extension entry point (setup, check orchestration) + metamodel.yaml # The S-CORE metamodel definition + metamodel_types.py # Type definitions (ScoreNeedType, etc.) + yaml_parser.py # Parses metamodel.yaml into MetaModelData + sn_schemas.py # Generates schemas.json for sphinx-needs 6 + log.py # CheckLogger for structured warning output + external_needs.py # External needs integration + checks/ # Post-build validation checks + tests/ # Unit and integration tests +``` diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index fccad876..a7354fc8 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -10,7 +10,7 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -"""Transforms the YAML metamodel into sphinx-needs JSON schema definitions. +"""Transforms the YAML metamodel into sphinx-needs >6 JSON schema definitions. Reads need types from the parsed metamodel (MetaModelData) and generates a ``schemas.json`` file that sphinx-needs uses to validate each need against From 374f4cd19385512266623a87bb3b97c4bc82bb84 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 13:31:58 +0000 Subject: [PATCH 214/231] feat: enhance schema validation by refining network validation for mandatory links and updating tests --- src/extensions/score_metamodel/README.md | 20 ++- src/extensions/score_metamodel/sn_schemas.py | 71 +++++++++-- .../score_metamodel/tests/test_sn_schemas.py | 118 ++++++++++++++++-- .../tests/test_sn_schemas_integration.py | 102 ++++++++++++++- 4 files changed, 287 insertions(+), 24 deletions(-) diff --git a/src/extensions/score_metamodel/README.md b/src/extensions/score_metamodel/README.md index 2a27bb88..9f47fda5 100644 --- a/src/extensions/score_metamodel/README.md +++ b/src/extensions/score_metamodel/README.md @@ -39,15 +39,29 @@ Each need type can specify: ## Validation layers -### Schema validation (sphinx-needs 6) +### Schema validation (sphinx-needs >6) `sn_schemas.py` translates the metamodel into a `schemas.json` file that sphinx-needs evaluates at parse time. Each schema entry has: - **`select`** -- matches needs by their `type` field. - **`validate.local`** -- JSON Schema checking the need's own properties - (required fields, regex patterns, mandatory links with `minItems: 1`). -- **`validate.network`** -- (not yet active) would validate linked needs' types. + (required fields, regex patterns on option values, mandatory links with + `minItems: 1`). Regex patterns on **link IDs** (e.g. checking that + `includes` entries match `^logic_arc_int(_op)*__.+$`) are not yet + validated here; the schema only enforces that at least one link exists. + ID-pattern checking is still done by the Python `validate_links()` in + `check_options.py`. +- **`validate.network`** -- validates that linked needs have the expected + `type` (e.g. `satisfies` targets must be `stkh_req`). Uses the + sphinx-needs `items.local` format so each linked need is checked + individually. Only **mandatory** links are checked here; optional link + type violations are left to the Python `validate_links()` check, which + treats them as informational (`treat_as_info=True`) rather than errors. + Fields that mix regex and plain targets (e.g. + `complies: std_wp, ^std_req__aspice_40__iic.*$`) are also excluded + because the `items` schema would incorrectly require all linked needs + to match the plain type. ### Post-build Python checks diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index a7354fc8..bba3a141 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -19,7 +19,7 @@ Schema structure per need type (sphinx-needs schema format): - ``select`` : matches needs by their ``type`` field - ``validate.local`` : validates the need's own properties (patterns, required) - - ``validate.network`` : validates properties of linked needs (NOT YET ACTIVE) + - ``validate.network`` : validates properties of linked needs """ import json @@ -77,7 +77,7 @@ def write_sn_schemas(app: Sphinx, metamodel: MetaModelData) -> None: def _classify_links( links: dict[str, Any], type_name: str, mandatory: bool -) -> tuple[dict[str, str], dict[str, str]]: +) -> tuple[dict[str, str], dict[str, list[str]]]: """Classify link values into regex patterns vs. target type names. In the metamodel YAML, a link value can be either: @@ -89,10 +89,11 @@ def _classify_links( Returns: A tuple of (regexes, targets) dicts, keyed by field name. + ``targets`` maps each field to a list of all allowed type names. """ label = "mandatory" if mandatory else "optional" regexes: dict[str, str] = {} - targets: dict[str, str] = {} + targets: dict[str, list[str]] = {} for field, value in links.items(): link_values = [v.strip() for v in value.split(",")] @@ -106,7 +107,9 @@ def _classify_links( ) regexes[field] = link_value else: - targets[field] = link_value + if field not in targets: + targets[field] = [] + targets[field].append(link_value) return regexes, targets @@ -116,6 +119,7 @@ def _build_local_validator( optional_fields: dict[str, str], mandatory_links_regexes: dict[str, str], optional_links_regexes: dict[str, str], + mandatory_links_targets: dict[str, list[str]] | None = None, ) -> dict[str, Any]: """Build the local validator dict for a need type's schema. @@ -146,6 +150,15 @@ def _build_local_validator( properties[field] = {"type": "array", "minItems": 1} required.append(field) + # Mandatory links (plain target types): must have at least one entry. + # The type of the linked need is checked via validate.network, but the + # list length constraint belongs in the local validator. + # Skip fields already handled by mandatory_links_regexes (mixed regex + plain). + for field in mandatory_links_targets or {}: + if field not in properties: + properties[field] = {"type": "array", "minItems": 1} + required.append(field) + # Optional links (regex): allowed but not required # TODO: regex pattern matching on link IDs is not yet enabled for field in optional_links_regexes: @@ -167,7 +180,7 @@ def _build_need_type_schema(need_type: ScoreNeedType) -> dict[str, Any] | None: The returned dict has the sphinx-needs schema structure: - ``select``: matches needs by their ``type`` field - ``validate.local``: validates the need's own properties - - ``validate.network``: validates linked needs' types (NOT YET ACTIVE) + - ``validate.network``: validates linked needs' types """ mandatory_fields = need_type.get("mandatory_options", {}) optional_fields = need_type.get("optional_options", {}) @@ -182,13 +195,50 @@ def _build_need_type_schema(need_type: ScoreNeedType) -> dict[str, Any] | None: # Classify link values as regex patterns vs. target type names. # Note: links are still plain strings at this point (before postprocess_need_links). - mandatory_links_regexes, _ = _classify_links( + mandatory_links_regexes, mandatory_links_targets = _classify_links( mandatory_links, type_name, mandatory=True ) - optional_links_regexes, _ = _classify_links( + optional_links_regexes, optional_links_targets = _classify_links( optional_links, type_name, mandatory=False ) + # Build validate.network for link fields with plain type targets. + # The network schema uses sphinx-needs' ValidateSchemaType format: + # each entry's ``items.local`` is a JSON Schema applied to each linked need. + network: dict[str, Any] = {} + + def add_network_entry(field: str, target_types: list[str]) -> None: + type_constraint: dict[str, Any] = ( + {"enum": target_types} + if len(target_types) > 1 + else {"const": target_types[0]} + ) + network[field] = { + "type": "array", + "items": { + "local": { + "properties": {"type": type_constraint}, + "required": ["type"], + } + }, + } + + # Only add network entries for *mandatory* links with exclusively plain + # type targets. Two categories are intentionally excluded: + # + # 1. Mixed regex+plain fields (e.g. "complies: std_wp, ^std_req__aspice_40__iic.*$"): + # The items schema would incorrectly require ALL linked needs to match + # the plain type, while some legitimately match the regex instead. + # + # 2. Optional links: The Python validate_links() in check_options.py treats + # optional link type violations as informational (treat_as_info=True), + # but schemas use a single severity ("violation") per need type. + # Including optional links would escalate info-level issues to errors. + # Optional link types are validated by the Python check instead. + for field, target_types in mandatory_links_targets.items(): + if field not in mandatory_links_regexes: + add_network_entry(field, target_types) + type_schema: dict[str, Any] = { "id": f"need-type-{type_name}", "severity": "violation", @@ -204,13 +254,12 @@ def _build_need_type_schema(need_type: ScoreNeedType) -> dict[str, Any] | None: optional_fields, mandatory_links_regexes, optional_links_regexes, + mandatory_links_targets, ), }, } - - # TODO: network validation is not yet enabled. - # When enabled, it would use the target type names (second return value - # of _classify_links) to check that linked needs have the expected type. + if network: + type_schema["validate"]["network"] = network return type_schema diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas.py b/src/extensions/score_metamodel/tests/test_sn_schemas.py index cb366858..70f51003 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas.py @@ -102,13 +102,13 @@ def test_plain_type_classified_as_target(self) -> None: links = {"satisfies": "comp"} regexes, targets = _classify_links(links, "my_type", mandatory=False) assert regexes == {} - assert targets == {"satisfies": "comp"} + assert targets == {"satisfies": ["comp"]} def test_comma_separated_mixed_values(self) -> None: links = {"related": "^arc_.+$, comp"} regexes, targets = _classify_links(links, "my_type", mandatory=True) assert regexes == {"related": "^arc_.+$"} - assert targets == {"related": "comp"} + assert targets == {"related": ["comp"]} def test_empty_links(self) -> None: regexes, targets = _classify_links({}, "my_type", mandatory=True) @@ -122,7 +122,7 @@ def test_multiple_fields(self) -> None: } regexes, targets = _classify_links(links, "my_type", mandatory=False) assert regexes == {"parent": "^parent__.+$"} - assert targets == {"satisfies": "req"} + assert targets == {"satisfies": ["req"]} def test_multiple_regex_for_same_field_logs_error(self) -> None: links = {"field": "^regex1$, ^regex2$"} @@ -132,12 +132,11 @@ def test_multiple_regex_for_same_field_logs_error(self) -> None: # Last regex overwrites previous ones assert regexes == {"field": "^regex2$"} - def test_multiple_plain_targets_last_wins(self) -> None: + def test_multiple_plain_targets_all_kept(self) -> None: links = {"field": "comp, sw_unit"} regexes, targets = _classify_links(links, "my_type", mandatory=True) assert regexes == {} - # Last target overwrites - assert targets == {"field": "sw_unit"} + assert targets == {"field": ["comp", "sw_unit"]} # ============================================================================= @@ -206,6 +205,12 @@ def test_array_field_in_mandatory(self) -> None: assert result["properties"]["tags"]["type"] == "array" assert "items" in result["properties"]["tags"] + def test_mandatory_link_targets_required_with_min_items(self) -> None: + mandatory_link_targets = {"satisfies": ["comp", "sw_unit"]} + result = _build_local_validator({}, {}, {}, {}, mandatory_link_targets) + assert "satisfies" in result["required"] + assert result["properties"]["satisfies"] == {"type": "array", "minItems": 1} + # ============================================================================= # Tests for _build_need_type_schema @@ -287,8 +292,9 @@ def test_mandatory_links_with_plain_target(self) -> None: schema = _build_need_type_schema(need_type) assert schema is not None local = schema["validate"]["local"] - # Plain targets don't produce local validation entries - assert "satisfies" not in local["properties"] + # Mandatory plain-target links get minItems: 1 in local validator + assert "satisfies" in local["required"] + assert local["properties"]["satisfies"] == {"type": "array", "minItems": 1} def test_optional_links_with_regex(self) -> None: need_type = _make_need_type( @@ -397,3 +403,99 @@ def test_writes_valid_json_with_multiple_types(self, tmp_path: Path) -> None: assert len(data["schemas"]) == 2 ids = {s["id"] for s in data["schemas"]} assert ids == {"need-type-req", "need-type-spec"} + + +# ============================================================================= +# Tests for validate.network schema generation +# ============================================================================= + + +class TestNetworkValidation: + def test_single_mandatory_target_type(self) -> None: + need_type = _make_need_type( + mandatory_links={"satisfies": "comp"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + network = schema["validate"].get("network") + assert network is not None + assert "satisfies" in network + entry = network["satisfies"] + assert entry["type"] == "array" + assert entry["items"]["local"]["properties"]["type"]["const"] == "comp" + assert entry["items"]["local"]["required"] == ["type"] + # minItems is in local validator, not network + assert "minItems" not in entry + + def test_optional_target_types_excluded_from_network(self) -> None: + """Optional links are not validated via network schema. + + The Python validate_links() treats optional link type violations as + informational (treat_as_info=True). Since schemas use a single severity + per need type, including optional links would escalate info-level issues + to errors. + """ + need_type = _make_need_type( + optional_links={"implements": "logic_arc_int, real_arc_int_op"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + assert "network" not in schema["validate"] + + def test_mandatory_and_optional_combined(self) -> None: + """Only mandatory links appear in network; optional links are excluded.""" + need_type = _make_need_type( + mandatory_links={"satisfies": "comp"}, + optional_links={"implements": "logic_arc_int, real_arc_int_op"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + network = schema["validate"].get("network") + assert network is not None + # Only mandatory links in network + assert set(network.keys()) == {"satisfies"} + assert network["satisfies"]["items"]["local"]["properties"]["type"]["const"] == "comp" + + def test_mandatory_plain_target_gets_local_min_items(self) -> None: + need_type = _make_need_type( + mandatory_links={"satisfies": "comp"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + assert "satisfies" in local["required"] + assert local["properties"]["satisfies"] == {"type": "array", "minItems": 1} + + def test_optional_plain_target_no_local_min_items(self) -> None: + need_type = _make_need_type( + optional_links={"implements": "logic_arc_int"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + local = schema["validate"]["local"] + assert "implements" not in local.get("required", []) + + def test_no_network_when_only_regex_links(self) -> None: + need_type = _make_need_type( + mandatory_links={"includes": "^logic_arc_int__.+$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + assert "network" not in schema["validate"] + + def test_mixed_regex_and_plain_skips_network(self) -> None: + """When a field mixes regex and plain targets, no network entry is generated. + + The items schema would require ALL linked needs to match the plain type, + but some legitimately match the regex instead. Validated by Python checks. + """ + need_type = _make_need_type( + optional_links={"complies": "std_wp, ^std_req__aspice_40__iic.*$"}, + ) + schema = _build_need_type_schema(need_type) + assert schema is not None + # Regex part goes to local validator + local = schema["validate"]["local"] + assert local["properties"]["complies"] == {"type": "array"} + # No network entry for mixed fields + assert "network" not in schema["validate"] diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py index dc54716b..e5c010d6 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py @@ -68,8 +68,6 @@ ``validate.local``. Limitations: - - Network validation (checking linked needs' types) is not yet active - in ``sn_schemas.py`` and therefore not tested here. - Graph checks (safety level decomposition, prohibited words) remain in Python code and are outside the scope of schema-based validation. - The ``content`` field is excluded via ``IGNORE_FIELDS`` because it is @@ -288,6 +286,7 @@ def _make_valid() -> dict[str, Any]: "security": "YES", "safety": "QM", "status": "valid", + "satisfies": ["stkh_req__some_need"], } def test_valid_need_passes( @@ -415,6 +414,7 @@ def _make_valid() -> dict[str, Any]: "safety": "QM", "status": "valid", "includes": ["logic_arc_int__something"], + "consists_of": ["comp__some_component"], } def test_valid_need_passes( @@ -435,3 +435,101 @@ def test_empty_mandatory_link_fails( need = self._make_valid() need["includes"] = [] # minItems: 1 violated assert_schema_invalid(need, schemas_by_type["feat"]) + + +# ============================================================================= +# Network validation: plain type targets produce validate.network entries +# ============================================================================= + + +class TestNetworkValidation: + """Verify validate.network schemas for types with plain-target links.""" + + def test_mandatory_link_has_network_entry( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + """feat_req: satisfies -> stkh_req produces a network entry.""" + schema = schemas_by_type["feat_req"] + network = schema["validate"].get("network") + assert network is not None + assert "satisfies" in network + entry = network["satisfies"] + assert entry["type"] == "array" + assert entry["items"]["local"]["properties"]["type"]["const"] == "stkh_req" + assert entry["items"]["local"]["required"] == ["type"] + + def test_mandatory_link_has_local_min_items( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + """feat_req: mandatory satisfies link gets minItems: 1 in local validator.""" + schema = schemas_by_type["feat_req"] + local = schema["validate"]["local"] + assert "satisfies" in local["required"] + assert local["properties"]["satisfies"] == {"type": "array", "minItems": 1} + + def test_optional_link_excluded_from_network( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + """tool_req: satisfies is optional, so no network entry is generated. + + Optional link type violations are treated as informational by the Python + validate_links() check (treat_as_info=True). Since schemas use a single + severity per need type, optional links are excluded from network to avoid + escalating info-level issues to errors. + """ + schema = schemas_by_type["tool_req"] + network = schema["validate"].get("network", {}) + assert "satisfies" not in network + + def test_network_validates_linked_need_type( + self, schemas_by_type: dict[str, dict[str, Any]] + ) -> None: + """The local schema inside items validates a linked need's type field.""" + schema = schemas_by_type["feat_req"] + network = schema["validate"]["network"] + local_schema = network["satisfies"]["items"]["local"] + validator = jsonschema_rs.Draft7Validator(local_schema) + # Valid linked need + assert validator.is_valid({"type": "stkh_req"}) + # Invalid linked need type + assert not validator.is_valid({"type": "comp_req"}) + + def test_all_mandatory_plain_links_have_local_and_network( + self, + schemas_by_type: dict[str, dict[str, Any]], + need_types_by_directive: dict[str, ScoreNeedType], + ) -> None: + """Structural sweep: every mandatory plain-target link has both entries. + + Fields that mix regex and plain targets are excluded from network + validation (the items schema would incorrectly require ALL linked + needs to match the plain type). + """ + for type_name, schema in schemas_by_type.items(): + need_type = need_types_by_directive[type_name] + local = schema["validate"]["local"] + network = schema["validate"].get("network", {}) + for link_field, link_value in need_type.get("mandatory_links", {}).items(): + assert isinstance(link_value, str) # before postprocess_need_links + values = [v.strip() for v in link_value.split(",")] + plain_targets = [v for v in values if not v.startswith("^")] + has_regex = any(v.startswith("^") for v in values) + if not plain_targets: + continue + # Must have local minItems: 1 + assert link_field in local["required"], ( + f"{type_name}.{link_field}: missing from local required" + ) + assert local["properties"][link_field] == { + "type": "array", + "minItems": 1, + }, f"{type_name}.{link_field}: wrong local properties" + # Network type constraint only for non-mixed fields + if has_regex: + assert link_field not in network, ( + f"{type_name}.{link_field}: mixed field should NOT be in network" + ) + else: + assert link_field in network, ( + f"{type_name}.{link_field}: missing from network" + ) From ad533453b30cef22d43272ce77047c2a31599f36 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 13:42:18 +0000 Subject: [PATCH 215/231] refactor: simplify optional links classification and improve test assertions for clarity --- src/extensions/score_metamodel/sn_schemas.py | 2 +- src/extensions/score_metamodel/tests/test_sn_schemas.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index bba3a141..153bae9c 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -198,7 +198,7 @@ def _build_need_type_schema(need_type: ScoreNeedType) -> dict[str, Any] | None: mandatory_links_regexes, mandatory_links_targets = _classify_links( mandatory_links, type_name, mandatory=True ) - optional_links_regexes, optional_links_targets = _classify_links( + optional_links_regexes, _ = _classify_links( optional_links, type_name, mandatory=False ) diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas.py b/src/extensions/score_metamodel/tests/test_sn_schemas.py index 70f51003..84b534d6 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas.py @@ -454,7 +454,10 @@ def test_mandatory_and_optional_combined(self) -> None: assert network is not None # Only mandatory links in network assert set(network.keys()) == {"satisfies"} - assert network["satisfies"]["items"]["local"]["properties"]["type"]["const"] == "comp" + assert ( + network["satisfies"]["items"]["local"]["properties"]["type"]["const"] + == "comp" + ) def test_mandatory_plain_target_gets_local_min_items(self) -> None: need_type = _make_need_type( From ce37b3517880d701f3e1c3311b28cb2d5c3698c7 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 9 Feb 2026 13:55:59 +0000 Subject: [PATCH 216/231] refactor: improve comment formatting for clarity in schema validation logic --- src/extensions/score_metamodel/sn_schemas.py | 3 ++- .../score_metamodel/tests/test_sn_schemas_integration.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index 153bae9c..0f94c8f9 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -226,7 +226,8 @@ def add_network_entry(field: str, target_types: list[str]) -> None: # Only add network entries for *mandatory* links with exclusively plain # type targets. Two categories are intentionally excluded: # - # 1. Mixed regex+plain fields (e.g. "complies: std_wp, ^std_req__aspice_40__iic.*$"): + # 1. Mixed regex+plain fields (e.g. + # "complies: std_wp, ^std_req__aspice_40__iic.*$"): # The items schema would incorrectly require ALL linked needs to match # the plain type, while some legitimately match the regex instead. # diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py index e5c010d6..2b7f3463 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas_integration.py @@ -527,7 +527,8 @@ def test_all_mandatory_plain_links_have_local_and_network( # Network type constraint only for non-mixed fields if has_regex: assert link_field not in network, ( - f"{type_name}.{link_field}: mixed field should NOT be in network" + f"{type_name}.{link_field}: mixed field should NOT be in " + "network" ) else: assert link_field in network, ( From d428a31bc777875568d61cdd69ce2a8d3123d68c Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Wed, 11 Feb 2026 08:37:09 +0000 Subject: [PATCH 217/231] feat: add logging for existing links and warnings for missing needs in schema validation --- .../score_source_code_linker/__init__.py | 58 +++++++++++-------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index 6e5e07b4..5be814ee 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -318,6 +318,38 @@ def find_need(all_needs: NeedsMutable, id: str) -> NeedItem | None: return all_needs.get(id) +def _log_needs_with_existing_links(needs: NeedsMutable) -> None: + """Log needs that already have source_code_link or testlink set.""" + if LOGGER.getEffectiveLevel() >= 10: + for id, need in needs.items(): + if need.get("source_code_link"): + LOGGER.debug( + f"?? Need {id} already has source_code_link: " + f"{need.get('source_code_link')}" + ) + if need.get("testlink"): + LOGGER.debug( + f"?? Need {id} already has testlink: {need.get('testlink')}" + ) + + +def _warn_missing_need(source_code_links: SourceCodeLinks) -> None: + """Log warnings when a need referenced by source/test links is not found.""" + # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 + for n in source_code_links.links.CodeLinks: + LOGGER.warning( + f"{n.file}:{n.line}: Could not find {source_code_links.need} " + "in documentation [CODE LINK]", + type="score_source_code_linker", + ) + for n in source_code_links.links.TestLinks: + LOGGER.warning( + f"{n.file}:{n.line}: Could not find {source_code_links.need} " + "in documentation [TEST LINK]", + type="score_source_code_linker", + ) + + # re-qid: gd_req__req__attr_impl def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: """ @@ -339,17 +371,7 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: ) # TODO: why do we create a copy? Can we also needs_copy = needs[:]? copy(needs)? # Enabled automatically for DEBUGGING - if LOGGER.getEffectiveLevel() >= 10: - for id, need in needs.items(): - if need.get("source_code_link"): - LOGGER.debug( - f"?? Need {id} already has source_code_link: " - f"{need.get('source_code_link')}" - ) - if need.get("testlink"): - LOGGER.debug( - f"?? Need {id} already has testlink: {need.get('testlink')}" - ) + _log_needs_with_existing_links(needs) source_code_links_by_need = load_source_code_links_combined_json( get_cache_filename(app.outdir, "score_scl_grouped_cache.json") @@ -358,19 +380,7 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: for source_code_links in source_code_links_by_need: need = find_need(needs_copy, source_code_links.need) if need is None: - # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 - for n in source_code_links.links.CodeLinks: - LOGGER.warning( - f"{n.file}:{n.line}: Could not find {source_code_links.need} " - "in documentation [CODE LINK]", - type="score_source_code_linker", - ) - for n in source_code_links.links.TestLinks: - LOGGER.warning( - f"{n.file}:{n.line}: Could not find {source_code_links.need} " - "in documentation [TEST LINK]", - type="score_source_code_linker", - ) + _warn_missing_need(source_code_links) continue need_as_dict = cast(dict[str, object], need) From e49778e65d43155e2df337726e5982acab503887 Mon Sep 17 00:00:00 2001 From: Chidananda Swamy R <chidananda.swamy@ltts.com> Date: Wed, 11 Feb 2026 14:11:59 +0530 Subject: [PATCH 218/231] Fix grammar and broken page links in README.md (#395) Corrected typo, grammatical errors and broken page reference links in README. Signed-off-by: Chidananda Swamy R <chidananda.swamy@ltts.com> --- src/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/README.md b/src/README.md index e84f9279..61e08db9 100644 --- a/src/README.md +++ b/src/README.md @@ -30,7 +30,7 @@ It should be treated as a 'get-started' guide, giving you all needed information - Python - Git - **VSCode** (Optional) - - Several integrations and guides are development primarily with VS Code in mind. + - Several integrations and guides are developed primarily with VS Code in mind. @@ -68,9 +68,9 @@ src/ ``` -Find all important bazel commands in the [project README](/README.md) +Find all important Bazel commands in the [project README](/README.md) -Find everything related to testing and how to add your on test suite [here](/tools/testing/pytest/README.md) +Find everything related to testing and how to add your own test suite [here](/src/tests/README.md) ## Developing new tools @@ -78,7 +78,7 @@ Find everything related to testing and how to add your on test suite [here](/too 2. Create a dedicated test directory 3. Include an appropriate README in markdown -> If you want to develop your own sphinx extension, check out the [extensions guide](/src/extensions/README.md) +> If you want to develop your own Sphinx extension, check out the [extensions guide](/src/extensions/README.md) ## Updating dependencies From 7330a45607efa74680c93b25829d92ff23c7d78e Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Wed, 11 Feb 2026 08:42:38 +0000 Subject: [PATCH 219/231] feat: enhance README to clarify metamodel validation in IDE with ubCode diagnostics --- src/extensions/score_metamodel/README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/extensions/score_metamodel/README.md b/src/extensions/score_metamodel/README.md index 9f47fda5..9f1b8296 100644 --- a/src/extensions/score_metamodel/README.md +++ b/src/extensions/score_metamodel/README.md @@ -12,7 +12,10 @@ those rules. `workflow`, etc.) including their fields, links, and extra options. 2. **Generates `schemas.json`** from the metamodel so that sphinx-needs 6 can validate needs at parse time (required fields, regex patterns, link - constraints). + constraints). Because ubCode (the VS Code extension for sphinx-needs) + evaluates these schemas during editing, **metamodel violations are shown + as diagnostics directly in the IDE** -- catching errors early with + lightweight, fast rendering, without needing a full Sphinx build. 3. **Runs post-build checks** that go beyond what JSON Schema can express (graph traversals, prohibited words, ID format rules). @@ -63,7 +66,7 @@ sphinx-needs evaluates at parse time. Each schema entry has: because the `items` schema would incorrectly require all linked needs to match the plain type. -### Post-build Python checks +### Post-build S-Core metamodel checks Checks in `checks/` run after the Sphinx build and cover rules that JSON Schema cannot express: From 6ee8e869df9c54421bc953e15fe4b1c84f201cf8 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Wed, 11 Feb 2026 09:07:39 +0000 Subject: [PATCH 220/231] feat: refine regex patterns for version validation in needs_types --- src/extensions/score_metamodel/metamodel.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/extensions/score_metamodel/metamodel.yaml b/src/extensions/score_metamodel/metamodel.yaml index 6fb4c36b..129a5353 100644 --- a/src/extensions/score_metamodel/metamodel.yaml +++ b/src/extensions/score_metamodel/metamodel.yaml @@ -266,8 +266,8 @@ needs_types: testcovered: ^(YES|NO)$ hash: ^.*$ # req-Id: tool_req__docs_req_attr_validity_correctness - valid_from: ^v(0|[1-9][0-9]*)\.(?:0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ - valid_until: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ + valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ tags: - requirement - requirement_excl_process @@ -299,8 +299,8 @@ needs_types: testcovered: ^(YES|NO)$ hash: ^.*$ # req-Id: tool_req__docs_req_attr_validity_correctness - valid_from: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ - valid_until: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?$ + valid_from: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ + valid_until: ^v(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*))?$ optional_links: belongs_to: feat # for evaluation tags: From 558b210e0a2231b86d0444efc17a87d18f1291c6 Mon Sep 17 00:00:00 2001 From: Dan Calavrezo <195309321+dcalavrezo-qorix@users.noreply.github.com> Date: Thu, 12 Feb 2026 17:58:29 +0200 Subject: [PATCH 221/231] docs: uplifted version of process (#399) --- MODULE.bazel | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 18dbdc78..3881f894 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -100,12 +100,7 @@ http_file( # docs dependency # Note: requirements were last aligned with 1.2.0, # the switch to 1.3.1 is purely to drop the dependency on docs-as-code 1.x. -bazel_dep(name = "score_process", version = "1.4.2") -git_override( - module_name = "score_process", - commit = "43b3a13eae17f2e539fb8cca2beedb69717b2e12", - remote = "https://github.com/eclipse-score/process_description.git", -) +bazel_dep(name = "score_process", version = "1.4.3") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") From f505139d440076f0ec6ecfe80c0877b74c130b7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Fri, 13 Feb 2026 09:40:53 +0100 Subject: [PATCH 222/231] Increase Tooling version (#400) Needed to fix circular dependency broken release once. Will be fixed for next major release (or removed) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 3881f894..9949a562 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -104,7 +104,7 @@ bazel_dep(name = "score_process", version = "1.4.3") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "1.1.1") +bazel_dep(name = "score_tooling", version = "1.1.2-RC") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") From 5517b366f160b3b691e4272b04cf0022099b85ad Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Fri, 13 Feb 2026 11:09:40 +0000 Subject: [PATCH 223/231] feat: enhance JSON schema generation to allow empty strings for optional fields --- src/extensions/score_metamodel/README.md | 121 ++++++++++++++++++ src/extensions/score_metamodel/sn_schemas.py | 40 ++++-- .../score_metamodel/tests/test_sn_schemas.py | 27 ++++ 3 files changed, 178 insertions(+), 10 deletions(-) diff --git a/src/extensions/score_metamodel/README.md b/src/extensions/score_metamodel/README.md index 9f1b8296..edd285e8 100644 --- a/src/extensions/score_metamodel/README.md +++ b/src/extensions/score_metamodel/README.md @@ -81,6 +81,127 @@ JSON Schema cannot express: | `check_id_contains_feature` | `id_contains_feature.py` | Need IDs must contain the feature abbreviation from the file path | | `check_standards` | `standards.py` | Standard compliance link validation | +### Coverage comparison + +Schema column: **yes** = implemented, **feasible** = could be added, **--** = not possible. + +| Rule | Schema (`sn_schemas.py` + sphinx-needs) | S-Core metamodel (`checks/`) | Notes | +|---|:---:|:---:|---| +| ID required | yes | -- | `needs_id_required` (sphinx-needs built-in) | +| ID basic regex | yes | -- | `needs_id_regex` (sphinx-needs built-in) | +| Dead link detection | yes | -- | `allow_dead_links` (sphinx-needs built-in) | +| Mandatory field presence | yes | yes | Both enforce `required` | +| Mandatory field regex | yes | yes | Same pattern from metamodel | +| Optional field regex | yes | yes | Schema: only if field present | +| Mandatory link presence | yes | yes | Schema: `minItems: 1` in local | +| Mandatory link target type | yes | yes | Schema: `validate.network` | +| Mandatory link ID regex | feasible | yes | Can add `items.pattern` in local; TODO in code | +| Optional link target type | feasible | yes (info) | Split into separate schema with `severity: "info"` | +| Optional link ID regex | feasible | yes (info) | Same split-severity approach | +| Mixed regex+plain link type | -- | yes | `ValidateSchemaType` has no `anyOf`/`oneOf` | +| ID structure (parts count) | feasible | yes | Per-type pattern from `parts` field; cannot check file-path part | +| Prohibited words | feasible | yes | Negative lookahead regex on `title`; less precise than Python | +| Graph constraints | -- | yes | Cross-need traversals beyond JSON Schema | +| Undefined extra options | -- | yes | `unevaluatedProperties` would reject sphinx-needs internal fields | + +#### Rule explanations + +**ID required** -- +Every need directive must have a manually set ID (e.g. `.. feat_req:: feat_req__my_feature__001`). +Enforced by sphinx-needs' `needs_id_required = True` in `__init__.py`. + +**ID basic regex** -- +The ID must match `^[A-Za-z0-9_-]{6,}` (at least 6 alphanumeric/underscore/hyphen characters). +Enforced by sphinx-needs' `needs_id_regex` in `__init__.py`. The build stops if a need +has an invalid ID. + +**Dead link detection** -- +A link like `satisfies: nonexistent_need_id` that points to a need that does not exist +triggers a sphinx-needs warning. Controlled per link type via `allow_dead_links` in +`needs_extra_links`. + +**Mandatory field presence** -- +A `feat_req` must have a `status` field. If it is missing, both the schema +(`"required": ["status"]`) and the Python check flag it. + +**Mandatory field regex** -- +The `status` field on `feat_req` must match `^(valid|invalid)$`. Both the schema +(`"pattern": "^(valid|invalid)$"`) and the Python check validate this. Writing +`status: approved` is rejected. + +**Optional field regex** -- +`document` has `optional_options: { author: ^.*$ }`. If `author` is present, it must +match the pattern. If absent, no error. The schema includes it in `properties` but +not in `required`. + +**Mandatory link presence** -- +`feat_req` has `mandatory_links: { satisfies: stkh_req }`. At least one target must +be provided. The schema enforces this with `"satisfies": {"type": "array", "minItems": 1}` +in `validate.local`. + +**Mandatory link target type** -- +`feat_req.satisfies` must point to a need of type `stkh_req`. The schema enforces +this with `validate.network`: each linked need is checked for +`{"type": {"const": "stkh_req"}}`. If a `feat_req` links to a `comp` via `satisfies`, +the schema rejects it. + +**Mandatory link ID regex** (feasible) -- +`feat` has `mandatory_links: { includes: ^logic_arc_int(_op)*__.+$ }`. The link +target IDs (strings like `logic_arc_int__something`) must match this regex. +Currently the schema only enforces that at least one link exists (`minItems: 1`), +not the ID pattern. *Feasible*: add `"items": {"pattern": "^logic_arc_int(_op)*__.+$"}` +to the local schema. There is a TODO in the code for this. + +**Optional link target type** (feasible) -- +`tool_req` has `optional_links: { satisfies: gd_req, stkh_req }`. If provided, targets +should be `gd_req` or `stkh_req`. The Python check validates this but treats violations +as informational (non-fatal). The schema currently skips this because all schema entries +use `severity: "violation"` and there is no way to set a different severity for one +rule within the same schema entry. *Feasible*: create a second schema entry for the +same need type with `severity: "info"` that only checks optional link targets. + +**Optional link ID regex** (feasible) -- +Same as above, but for regex-based link IDs on optional links (e.g. +`optional_links: { links: ^.*$ }` on `tsf`). Same severity-split approach would work. + +**Mixed regex+plain link type** (not possible) -- +`workproduct` has `optional_links: { complies: std_wp, ^std_req__aspice_40__iic.*$ }`. +A `complies` target is valid if it is either a need of type `std_wp` OR has an ID +matching the regex. The `validate.network` `items` schema applies to ALL linked needs +identically, so it cannot express "match type X *or* match regex Y". +sphinx-needs' `ValidateSchemaType` does not support `anyOf`/`oneOf`. +These mixed fields are validated only by the Python check. + +**ID structure (parts count)** (feasible) -- +`feat_req` has `parts: 3`, meaning its ID must have 3 segments separated by `__` +(e.g. `feat_req__my_feature__001`). The Python check (`check_id_format`) splits on +`__` and counts parts. *Feasible*: generate a per-type regex like +`^feat_req__[^_]+(__[^_]+){1}$` in the schema. However, the Python check also +validates that the ID contains the feature abbreviation from the file path +(`check_id_contains_feature`), which depends on runtime context and cannot be +expressed in a schema. + +**Prohibited words** (feasible) -- +The metamodel forbids words like "shall", "must", "will" in need titles (for +requirement types). The Python check splits the title into words and checks each one. +*Feasible*: add a negative lookahead regex on the `title` field, e.g. +`^(?!.*\b(shall|must|will)\b).*$`. This is less precise than the Python check +(which normalizes case, strips punctuation) but catches most violations. + +**Graph constraints** (not possible) -- +`graph_checks` in the metamodel define rules like "an ASIL_B need must link to at +least one non-QM requirement via `satisfies`". This requires traversing the need +graph across multiple levels, which is fundamentally beyond what JSON Schema can +express. Only the Python check (`check_metamodel_graph`) can do this. + +**Undefined extra options** (not possible) -- +The Python check (`check_extra_options`) warns when a need has fields not defined +in the metamodel (e.g. a typo like `saftey` instead of `safety`). In theory, +`unevaluatedProperties: false` could reject unknown fields. In practice, sphinx-needs +adds many internal fields to needs (e.g. `docname`, `lineno`, `is_external`, computed +fields from dynamic functions) that are not in the metamodel. Enabling this would +cause false positives on every need. + ## File layout ``` diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index 0f94c8f9..1f04ab5e 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -136,13 +136,14 @@ def _build_local_validator( if field in IGNORE_FIELDS: continue required.append(field) - properties[field] = get_field_pattern_schema(field, pattern) + properties[field] = get_field_pattern_schema(field, pattern, is_optional=False) # Optional fields: if present, must match the regex pattern + # Allow empty strings to align with Python checker behavior for field, pattern in optional_fields.items(): if field in IGNORE_FIELDS: continue - properties[field] = get_field_pattern_schema(field, pattern) + properties[field] = get_field_pattern_schema(field, pattern, is_optional=True) # Mandatory links (regex): must have at least one entry # TODO: regex pattern matching on link IDs is not yet enabled @@ -265,28 +266,47 @@ def add_network_entry(field: str, target_types: list[str]) -> None: return type_schema -def get_field_pattern_schema(field: str, pattern: str) -> dict[str, Any]: +def get_field_pattern_schema(field: str, pattern: str, is_optional: bool = False) -> dict[str, Any]: """Return the appropriate JSON schema for a field's regex pattern. Array-valued fields (like ``tags``) get an array-of-strings schema; scalar fields get a plain string schema. + + For optional fields, the schema allows empty strings to align with the + Python metamodel checker's behavior (which treats empty strings as absent). """ if field in SN_ARRAY_FIELDS: - return get_array_pattern_schema(pattern) - return get_pattern_schema(pattern) + return get_array_pattern_schema(pattern, is_optional=is_optional) + return get_pattern_schema(pattern, is_optional=is_optional) + +def get_pattern_schema(pattern: str, is_optional: bool = False) -> dict[str, Any]: + """Return a JSON schema that validates a string against a regex pattern. -def get_pattern_schema(pattern: str) -> dict[str, str]: - """Return a JSON schema that validates a string against a regex pattern.""" + For optional fields, allows either an empty string OR a string matching + the pattern, matching the Python checker's behavior where empty strings + are treated as "absent" and not validated. + """ + if is_optional: + # Allow empty strings for optional fields (Python checker treats "" as absent) + # Use regex alternation to match either empty string or the original pattern + return { + "type": "string", + "pattern": f"^$|{pattern}", + } return { "type": "string", "pattern": pattern, } -def get_array_pattern_schema(pattern: str) -> dict[str, Any]: - """Return a JSON schema that validates an array where each item matches a regex.""" +def get_array_pattern_schema(pattern: str, is_optional: bool = False) -> dict[str, Any]: + """Return a JSON schema that validates an array where each item matches a regex. + + For optional fields, allows empty strings in the array to align with the + Python checker's behavior. + """ return { "type": "array", - "items": get_pattern_schema(pattern), + "items": get_pattern_schema(pattern, is_optional=is_optional), } diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas.py b/src/extensions/score_metamodel/tests/test_sn_schemas.py index 84b534d6..ad7949eb 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas.py @@ -45,6 +45,16 @@ def test_preserves_complex_regex(self) -> None: assert result["type"] == "string" assert result["pattern"] == pattern + def test_optional_allows_empty_string(self) -> None: + result = get_pattern_schema("^https://github.com/.*$", is_optional=True) + assert result == {"type": "string", "pattern": "^$|^https://github.com/.*$"} + + def test_mandatory_does_not_allow_empty_string(self) -> None: + result = get_pattern_schema("^[A-Z]+$", is_optional=False) + assert result == {"type": "string", "pattern": "^[A-Z]+$"} + # Should not have alternation with empty string + assert "^$|" not in result.get("pattern", "") + # ============================================================================= # Tests for get_array_pattern_schema @@ -64,6 +74,11 @@ def test_items_match_get_pattern_schema(self) -> None: result = get_array_pattern_schema(pattern) assert result["items"] == get_pattern_schema(pattern) + def test_optional_array_allows_empty_string_items(self) -> None: + result = get_array_pattern_schema("^tag_.*$", is_optional=True) + assert result["type"] == "array" + assert result["items"] == {"type": "string", "pattern": "^$|^tag_.*$"} + # ============================================================================= # Tests for get_field_pattern_schema @@ -85,6 +100,16 @@ def test_unknown_field_returns_string_schema(self) -> None: result = get_field_pattern_schema("some_custom_field", "^.*$") assert result["type"] == "string" + def test_optional_scalar_field_allows_empty_string(self) -> None: + result = get_field_pattern_schema("mitigation_issue", "^https://github.com/.*$", is_optional=True) + assert result == {"type": "string", "pattern": "^$|^https://github.com/.*$"} + + def test_mandatory_scalar_field_does_not_allow_empty_string(self) -> None: + result = get_field_pattern_schema("status", "^(valid|invalid)$", is_optional=False) + assert result == {"type": "string", "pattern": "^(valid|invalid)$"} + # Should not have alternation with empty string + assert "^$|" not in result.get("pattern", "") + # ============================================================================= # Tests for _classify_links @@ -157,6 +182,8 @@ def test_optional_fields_not_required(self) -> None: result = _build_local_validator({}, optional, {}, {}) assert "comment" not in result["required"] assert "comment" in result["properties"] + # Optional fields should allow empty strings via pattern alternation + assert result["properties"]["comment"] == {"type": "string", "pattern": "^$|^.*$"} def test_ignored_fields_excluded(self) -> None: mandatory = {field: "^.*$" for field in IGNORE_FIELDS} From 22f830d2a9925ec6d0117270af95eba724dd7366 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Fri, 13 Feb 2026 11:17:59 +0000 Subject: [PATCH 224/231] style: format function parameters for better readability in schema functions --- src/extensions/score_metamodel/sn_schemas.py | 4 +++- .../score_metamodel/tests/test_sn_schemas.py | 13 ++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/extensions/score_metamodel/sn_schemas.py b/src/extensions/score_metamodel/sn_schemas.py index 1f04ab5e..f5953d98 100644 --- a/src/extensions/score_metamodel/sn_schemas.py +++ b/src/extensions/score_metamodel/sn_schemas.py @@ -266,7 +266,9 @@ def add_network_entry(field: str, target_types: list[str]) -> None: return type_schema -def get_field_pattern_schema(field: str, pattern: str, is_optional: bool = False) -> dict[str, Any]: +def get_field_pattern_schema( + field: str, pattern: str, is_optional: bool = False +) -> dict[str, Any]: """Return the appropriate JSON schema for a field's regex pattern. Array-valued fields (like ``tags``) get an array-of-strings schema; diff --git a/src/extensions/score_metamodel/tests/test_sn_schemas.py b/src/extensions/score_metamodel/tests/test_sn_schemas.py index ad7949eb..51f5ff4c 100644 --- a/src/extensions/score_metamodel/tests/test_sn_schemas.py +++ b/src/extensions/score_metamodel/tests/test_sn_schemas.py @@ -101,11 +101,15 @@ def test_unknown_field_returns_string_schema(self) -> None: assert result["type"] == "string" def test_optional_scalar_field_allows_empty_string(self) -> None: - result = get_field_pattern_schema("mitigation_issue", "^https://github.com/.*$", is_optional=True) + result = get_field_pattern_schema( + "mitigation_issue", "^https://github.com/.*$", is_optional=True + ) assert result == {"type": "string", "pattern": "^$|^https://github.com/.*$"} def test_mandatory_scalar_field_does_not_allow_empty_string(self) -> None: - result = get_field_pattern_schema("status", "^(valid|invalid)$", is_optional=False) + result = get_field_pattern_schema( + "status", "^(valid|invalid)$", is_optional=False + ) assert result == {"type": "string", "pattern": "^(valid|invalid)$"} # Should not have alternation with empty string assert "^$|" not in result.get("pattern", "") @@ -183,7 +187,10 @@ def test_optional_fields_not_required(self) -> None: assert "comment" not in result["required"] assert "comment" in result["properties"] # Optional fields should allow empty strings via pattern alternation - assert result["properties"]["comment"] == {"type": "string", "pattern": "^$|^.*$"} + assert result["properties"]["comment"] == { + "type": "string", + "pattern": "^$|^.*$", + } def test_ignored_fields_excluded(self) -> None: mandatory = {field: "^.*$" for field in IGNORE_FIELDS} From 90745879ab5bcd75b304929c7edb4263285ed936 Mon Sep 17 00:00:00 2001 From: Chidananda Swamy R <chidananda.swamy@ltts.com> Date: Mon, 16 Feb 2026 15:52:26 +0530 Subject: [PATCH 225/231] Convert markdown table to reStructuredText format (#402) Replaced the markdown table with a reStructuredText table format in the description of reqid: Safety Analysis Linkage Violates" Signed-off-by: Chidananda Swamy R <chidananda.swamy@ltts.com> --- docs/internals/requirements/requirements.rst | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index 816646cb..b9934e3f 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -997,12 +997,18 @@ Testing Docs-As-Code shall enforce that needs of type :need:`tool_req__docs_saf_types` have a `violates` links to at least one dynamic / static diagram according to the table. - | Source | Target | - | -- | -- | - | feat_saf_dfa | feat_arc_sta | - | comp_saf_dfa | comp_arc_sta | - | feat_saf_fmea | feat_arc_dyn | - | comp_saf_fmea | comp_arc_dyn | + + .. table:: + :widths: auto + + ============= =================== + Link Source Allowed Link Target + ============= =================== + feat_saf_dfa feat_arc_sta + comp_saf_dfa comp_arc_sta + feat_saf_fmea feat_arc_dyn + comp_saf_fmea comp_arc_dyn + ============= =================== From 15e1779957b51cdd38e6fc47903d5654d1d2bb3d Mon Sep 17 00:00:00 2001 From: Chidananda Swamy R <chidananda.swamy@ltts.com> Date: Mon, 16 Feb 2026 15:54:04 +0530 Subject: [PATCH 226/231] Update safety analysis process mapping section (#401) --- docs/internals/requirements/requirements.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/internals/requirements/requirements.rst b/docs/internals/requirements/requirements.rst index b9934e3f..55bfb953 100644 --- a/docs/internals/requirements/requirements.rst +++ b/docs/internals/requirements/requirements.rst @@ -354,9 +354,9 @@ Versioning request of the file containing the document. -------- -Mapping -------- +-------- + Mapping +-------- .. needtable:: :style: table @@ -1048,9 +1048,9 @@ Testing Docs-As-Code shall enforce that every Safety Analysis has a short description of the failure effect (e.g. failure lead to an unintended actuation of the analysed element) -------- -Mapping -------- +---------------------------------------------------------------- +Safety Analysis (DFA + FMEA) Process to Tool Requirement Mapping +---------------------------------------------------------------- .. needtable:: :style: table From e33bd2fb4377c1986e0a0420a30cf52cabcf28a7 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Mon, 16 Feb 2026 13:32:56 +0000 Subject: [PATCH 227/231] refactor: remove unused needscfg_exclude_vars configuration --- src/extensions/score_sync_toml/__init__.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/extensions/score_sync_toml/__init__.py b/src/extensions/score_sync_toml/__init__.py index 72e598e6..79ebfb7a 100644 --- a/src/extensions/score_sync_toml/__init__.py +++ b/src/extensions/score_sync_toml/__init__.py @@ -59,12 +59,6 @@ def setup(app: Sphinx) -> dict[str, str | bool]: ] # TODO remove the suppress_warnings once fixed - app.config.needscfg_exclude_vars = [ - "needs_from_toml", - "needs_from_toml_table", - # "needs_schema_definitions_from_json", - ] - return { "version": "0.1", "parallel_read_safe": True, From a38e999d13a1a4a79f6287c34ec6e3c09fcad664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20S=C3=B6ren=20Pollak?= <maximilian.pollak@qorix.com> Date: Mon, 16 Feb 2026 16:15:31 +0100 Subject: [PATCH 228/231] Fix testlinker & increase tooling version (#403) --- MODULE.bazel | 2 +- src/extensions/score_source_code_linker/testlink.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 9949a562..b358e521 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -104,7 +104,7 @@ bazel_dep(name = "score_process", version = "1.4.3") # Add Linter bazel_dep(name = "rules_multitool", version = "1.9.0") -bazel_dep(name = "score_tooling", version = "1.1.2-RC") +bazel_dep(name = "score_tooling", version = "1.1.2") multitool_root = use_extension("@rules_multitool//multitool:extension.bzl", "multitool") use_repo(multitool_root, "actionlint_hub", "multitool", "ruff_hub", "shellcheck_hub", "yamlfmt_hub") diff --git a/src/extensions/score_source_code_linker/testlink.py b/src/extensions/score_source_code_linker/testlink.py index 50066fe3..51311f3e 100644 --- a/src/extensions/score_source_code_linker/testlink.py +++ b/src/extensions/score_source_code_linker/testlink.py @@ -93,7 +93,17 @@ class DataOfTestCase: @classmethod def from_dict(cls, data: dict[str, Any]): # type-ignore - return cls(**data) # type-ignore + return cls( + name=data["name"], + file=data["file"], + line=data["line"], + result=data["result"], + TestType=data["TestType"], + DerivationTechnique=data["DerivationTechnique"], + result_text=data["result_text"], + PartiallyVerifies=data.get("PartiallyVerifies"), + FullyVerifies=data.get("FullyVerifies"), + ) @classmethod def clean_text(cls, text: str): From eac5d5e85b3cee98ee8156a6d3adae919f547c64 Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riess@gmail.com> Date: Wed, 18 Feb 2026 15:33:41 +0000 Subject: [PATCH 229/231] feat: add schema definitions section to shared.toml --- src/extensions/score_sync_toml/shared.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/extensions/score_sync_toml/shared.toml b/src/extensions/score_sync_toml/shared.toml index 9daf6ba0..6b89b51d 100644 --- a/src/extensions/score_sync_toml/shared.toml +++ b/src/extensions/score_sync_toml/shared.toml @@ -18,3 +18,6 @@ options = true content = true parse_content = false content_required = false + +[needs] +schema_definitions_from_json="schemas.json" From 2cc5ee8548033c1c50f76da57d3d9448e746fa5a Mon Sep 17 00:00:00 2001 From: Andreas Zwinkau <95761648+a-zw@users.noreply.github.com> Date: Thu, 19 Feb 2026 10:20:50 +0100 Subject: [PATCH 230/231] Cleanup (#405) --- MODULE.bazel | 11 ++--------- docs.bzl | 22 +++------------------- scripts_bazel/BUILD | 2 +- scripts_bazel/tests/BUILD | 2 +- src/extensions/BUILD | 2 +- src/extensions/score_draw_uml_funcs/BUILD | 2 +- src/extensions/score_header_service/BUILD | 2 +- src/extensions/score_layout/BUILD | 2 +- src/extensions/score_metamodel/BUILD | 2 +- src/extensions/score_sphinx_bundle/BUILD | 2 +- src/extensions/score_sync_toml/BUILD | 2 +- src/helper_lib/BUILD | 2 +- 12 files changed, 15 insertions(+), 38 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index b358e521..fc023985 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -18,13 +18,6 @@ module( version = "0.0.0", ) -############################################################################### -# -# Packaging dependencies -# -############################################################################### -bazel_dep(name = "rules_pkg", version = "1.1.0") - ############################################################################### # # Python version @@ -51,11 +44,11 @@ pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip") pip.parse( envsubst = ["PIP_INDEX_URL"], extra_pip_args = ["--index-url=${PIP_INDEX_URL:-https://pypi.org/simple/}"], - hub_name = "pip_process", + hub_name = "docs_as_code_hub_env", python_version = PYTHON_VERSION, requirements_lock = "//src:requirements.txt", ) -use_repo(pip, "pip_process") +use_repo(pip, "docs_as_code_hub_env") # Additional Python rules provided by aspect, e.g. an improved version of bazel_dep(name = "aspect_rules_py", version = "1.4.0") diff --git a/docs.bzl b/docs.bzl index b8c45c18..59865265 100644 --- a/docs.bzl +++ b/docs.bzl @@ -42,8 +42,7 @@ Easy streamlined way for S-CORE docs-as-code. # For user-facing documentation, refer to `/README.md`. load("@aspect_rules_py//py:defs.bzl", "py_binary") -load("@pip_process//:requirements.bzl", "all_requirements") -load("@rules_pkg//pkg:mappings.bzl", "pkg_files", "strip_prefix") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") load("@rules_python//sphinxdocs:sphinx.bzl", "sphinx_build_binary", "sphinx_docs") load("@score_tooling//:defs.bzl", "score_virtualenv") load("@score_tooling//bazel/rules/rules_score:rules_score.bzl", "sphinx_module") @@ -120,7 +119,7 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): deps = deps, ) - pkg_files( + native.filegroup( name = "docs_sources", srcs = native.glob([ source_dir + "/**/*.png", @@ -137,7 +136,6 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): source_dir + "/**/*.inc", "more_docs/**/*.rst", ], allow_empty = True), - strip_prefix = strip_prefix.from_pkg(), # avoid flattening of folders visibility = ["//visibility:public"], ) @@ -259,21 +257,7 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): sphinx_module( name = native.module_name() + "_module", - srcs = native.glob([ - source_dir + "/**/*.rst", - source_dir + "/**/*.png", - source_dir + "/**/*.svg", - source_dir + "/**/*.md", - source_dir + "/**/*.html", - source_dir + "/**/*.css", - source_dir + "/**/*.puml", - source_dir + "/**/*.need", - source_dir + "/**/*.yaml", - source_dir + "/**/*.json", - source_dir + "/**/*.csv", - source_dir + "/**/*.inc", - "more_docs/**/*.rst", - ], allow_empty = True), + srcs = [":docs_sources"], # config = ":" + source_dir + "/conf.py", index = source_dir + "/index.rst", sphinx = "@score_tooling//bazel/rules/rules_score:score_build", diff --git a/scripts_bazel/BUILD b/scripts_bazel/BUILD index f332a4af..81c9212f 100644 --- a/scripts_bazel/BUILD +++ b/scripts_bazel/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_binary") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") filegroup( name = "sources", diff --git a/scripts_bazel/tests/BUILD b/scripts_bazel/tests/BUILD index 2290a6a2..25f09278 100644 --- a/scripts_bazel/tests/BUILD +++ b/scripts_bazel/tests/BUILD @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") score_py_pytest( diff --git a/src/extensions/BUILD b/src/extensions/BUILD index 4f5d6c91..471897b2 100644 --- a/src/extensions/BUILD +++ b/src/extensions/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_binary", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest", "score_virtualenv") py_library( diff --git a/src/extensions/score_draw_uml_funcs/BUILD b/src/extensions/score_draw_uml_funcs/BUILD index b16000a6..65c45d71 100644 --- a/src/extensions/score_draw_uml_funcs/BUILD +++ b/src/extensions/score_draw_uml_funcs/BUILD @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") filegroup( name = "all_sources", diff --git a/src/extensions/score_header_service/BUILD b/src/extensions/score_header_service/BUILD index 48185811..b96c60e8 100644 --- a/src/extensions/score_header_service/BUILD +++ b/src/extensions/score_header_service/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") filegroup( diff --git a/src/extensions/score_layout/BUILD b/src/extensions/score_layout/BUILD index 8e21188d..fcd3be75 100644 --- a/src/extensions/score_layout/BUILD +++ b/src/extensions/score_layout/BUILD @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "requirement") +load("@docs_as_code_hub_env//:requirements.bzl", "requirement") filegroup( name = "all_sources", diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index 4a5267e6..e2d00056 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") filegroup( diff --git a/src/extensions/score_sphinx_bundle/BUILD b/src/extensions/score_sphinx_bundle/BUILD index 53c3b721..c8d0a0a7 100644 --- a/src/extensions/score_sphinx_bundle/BUILD +++ b/src/extensions/score_sphinx_bundle/BUILD @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") filegroup( name = "all_sources", diff --git a/src/extensions/score_sync_toml/BUILD b/src/extensions/score_sync_toml/BUILD index fdb8acb3..34ba6584 100644 --- a/src/extensions/score_sync_toml/BUILD +++ b/src/extensions/score_sync_toml/BUILD @@ -12,7 +12,7 @@ # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "requirement") +load("@docs_as_code_hub_env//:requirements.bzl", "requirement") filegroup( name = "all_sources", diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD index 545a7ea2..62b59785 100644 --- a/src/helper_lib/BUILD +++ b/src/helper_lib/BUILD @@ -11,7 +11,7 @@ # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* load("@aspect_rules_py//py:defs.bzl", "py_library") -load("@pip_process//:requirements.bzl", "all_requirements") +load("@docs_as_code_hub_env//:requirements.bzl", "all_requirements") load("@score_tooling//:defs.bzl", "score_py_pytest") filegroup( From 29fcd07cd04447f47f5a3254074b1d75f6df305b Mon Sep 17 00:00:00 2001 From: Arnaud Riess <arnaud.riesslgmail.com> Date: Fri, 20 Feb 2026 07:26:10 +0000 Subject: [PATCH 231/231] feat: add server configuration for index_on_save in shared.toml --- src/extensions/score_sync_toml/shared.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/extensions/score_sync_toml/shared.toml b/src/extensions/score_sync_toml/shared.toml index 6b89b51d..53eeb3c5 100644 --- a/src/extensions/score_sync_toml/shared.toml +++ b/src/extensions/score_sync_toml/shared.toml @@ -21,3 +21,6 @@ content_required = false [needs] schema_definitions_from_json="schemas.json" + +[server] +index_on_save = true