diff --git a/BUILD b/BUILD index ff6103ec2..c84b7796e 100644 --- a/BUILD +++ b/BUILD @@ -15,6 +15,7 @@ load("@score_tooling//:defs.bzl", "cli_helper", "copyright_checker") load("//:docs.bzl", "docs") package(default_visibility = ["//visibility:public"]) +exports_files(["pyproject.toml"]) copyright_checker( name = "copyright", diff --git a/docs.bzl b/docs.bzl index 8f728f123..f2b498e56 100644 --- a/docs.bzl +++ b/docs.bzl @@ -120,7 +120,7 @@ def _missing_requirements(deps): fail(msg) fail("This case should be unreachable?!") -def docs(source_dir = "docs", data = [], deps = [], scan_code = []): +def docs(known_good = None, source_dir = "docs", data = [], deps = [], scan_code = []): """Creates all targets related to documentation. By using this function, you'll get any and all updates for documentation targets in one place. @@ -176,33 +176,43 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): data_with_docs_sources = _rewrite_needs_json_to_docs_sources(data) additional_combo_sourcelinks = _rewrite_needs_json_to_sourcelinks(data) _merge_sourcelinks(name = "merged_sourcelinks", sourcelinks = [":sourcelinks_json"] + additional_combo_sourcelinks) + docs_data = data + [":sourcelinks_json"] + combo_data = data_with_docs_sources + [":merged_sourcelinks"] + + docs_env = { + "SOURCE_DIRECTORY": source_dir, + "DATA": str(data), + "ACTION": "incremental", + "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", + } + docs_sources_env = { + "SOURCE_DIRECTORY": source_dir, + "DATA": str(data_with_docs_sources), + "ACTION": "incremental", + "SCORE_SOURCELINKS": "$(location :merged_sourcelinks)", + } + if known_good: + docs_env["KNOWN_GOOD"] = "$(location "+ known_good + ")" + docs_sources_env["KNOWN_GOOD"] = "$(location "+ known_good + ")" + docs_data.append(known_good) + combo_data.append(known_good) py_binary( name = "docs", tags = ["cli_help=Build documentation:\nbazel run //:docs"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data + [":sourcelinks_json"], + data = docs_data, deps = deps, - env = { - "SOURCE_DIRECTORY": source_dir, - "DATA": str(data), - "ACTION": "incremental", - "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", - }, + env = docs_env, ) py_binary( name = "docs_combo_experimental", tags = ["cli_help=Build full documentation with all dependencies:\nbazel run //:docs_combo_experimental"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data_with_docs_sources + [":merged_sourcelinks"], + data = combo_data, deps = deps, - env = { - "SOURCE_DIRECTORY": source_dir, - "DATA": str(data_with_docs_sources), - "ACTION": "incremental", - "SCORE_SOURCELINKS": "$(location :merged_sourcelinks)", - }, + env = docs_sources_env ) py_binary( @@ -222,42 +232,27 @@ def docs(source_dir = "docs", data = [], deps = [], scan_code = []): name = "docs_check", tags = ["cli_help=Verify documentation:\nbazel run //:docs_check"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data + [":sourcelinks_json"], + data = docs_data, deps = deps, - env = { - "SOURCE_DIRECTORY": source_dir, - "DATA": str(data), - "ACTION": "check", - "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", - }, + env = docs_env ) py_binary( name = "live_preview", tags = ["cli_help=Live preview documentation in the browser:\nbazel run //:live_preview"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data + [":sourcelinks_json"], + data = docs_data, deps = deps, - env = { - "SOURCE_DIRECTORY": source_dir, - "DATA": str(data), - "ACTION": "live_preview", - "SCORE_SOURCELINKS": "$(location :sourcelinks_json)", - }, + env = docs_env ) py_binary( name = "live_preview_combo_experimental", tags = ["cli_help=Live preview full documentation with all dependencies in the browser:\nbazel run //:live_preview_combo_experimental"], srcs = ["@score_docs_as_code//src:incremental.py"], - data = data_with_docs_sources + [":merged_sourcelinks"], + data = combo_data, deps = deps, - env = { - "SOURCE_DIRECTORY": source_dir, - "DATA": str(data_with_docs_sources), - "ACTION": "live_preview", - "SCORE_SOURCELINKS": "$(location :merged_sourcelinks)", - }, + env = docs_sources_env ) score_virtualenv( @@ -309,15 +304,19 @@ def _sourcelinks_json(name, srcs): """ output_file = name + ".json" - native.genrule( - name = name, - srcs = srcs, - outs = [output_file], - cmd = """ + cmd = """ $(location @score_docs_as_code//scripts_bazel:generate_sourcelinks) \ --output $@ \ $(SRCS) - """, + """ + + rule_srcs = srcs + + native.genrule( + name = name, + srcs = rule_srcs, + outs = [output_file], + cmd = cmd, tools = ["@score_docs_as_code//scripts_bazel:generate_sourcelinks"], visibility = ["//visibility:public"], ) diff --git a/pyproject.toml b/pyproject.toml index 41a0a8765..6aa48c6f3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,3 +22,47 @@ extend-exclude = [ "bazel-*", ".venv*/**", ] +[tool.pytest.ini_options] +log_cli = true +log_cli_level = "Debug" +log_cli_format = "[%(asctime)s.%(msecs)03d] [%(levelname)-3s] [%(name)s] %(message)s" +log_cli_date_format = "%Y-%m-%d %H:%M:%S" + +log_format = "[%(asctime)s.%(msecs)03d] [%(levelname)-3s] [%(name)s] %(message)s" +log_date_format = "%Y-%m-%d %H:%M:%S" + +log_file_level = "Debug" +log_file_format = "[%(asctime)s.%(msecs)03d] [%(levelname)-3s] [%(name)s] %(message)s" +log_file_date_format = "%Y-%m-%d %H:%M:%S" + +markers = [ + "metadata", + "test_properties(dict): Add custom properties to test XML output", +] + +norecursedirs = [ + ".*", # hidden folders like .git, .venv, .cache, etc. + "_build*", # common docs-as-code directory + "bazel-*", # Bazel output folders +] + +junit_duration_report = "call" +junit_family = "xunit1" + +filterwarnings = [ + "ignore::pytest.PytestExperimentalApiWarning", + # Silence third-party deprecations from sphinx_needs targeting Python 3.14 removals. + # We'll drop these ignores once sphinx_needs releases a fix. + "ignore:.*deprecated.*Python 3\\.14.*:DeprecationWarning:sphinx_needs\\..*", + # Docutils is deprecating OptionParser in favor of argparse (0.21+). + # This one originates inside sphinx_needs.layout. + # We'll drop these ignores once sphinx/sphinx_needs releases a fix. + "ignore:^The frontend\\.OptionParser class will be replaced by a subclass of argparse\\.ArgumentParser in Docutils 0\\.21 or later\\.:DeprecationWarning:sphinx_needs\\.layout", + # This one bubbles up from stdlib optparse but is *explicitly* a Docutils message. + # We match the full message to avoid silencing unrelated optparse warnings. + # We'll drop these ignores once sphinx/sphinx_needs releases a fix. + "ignore:^The frontend\\.Option class will be removed in Docutils 0\\.21 or later\\.:DeprecationWarning:optparse", +] +pythonpath = [ + "src/extensions/", +] diff --git a/scripts_bazel/generate_sourcelinks_cli.py b/scripts_bazel/generate_sourcelinks_cli.py index 4291b97c5..809d35894 100644 --- a/scripts_bazel/generate_sourcelinks_cli.py +++ b/scripts_bazel/generate_sourcelinks_cli.py @@ -28,6 +28,7 @@ from src.extensions.score_source_code_linker.needlinks import ( store_source_code_links_json, ) +from src.helper_lib import get_runfiles_dir, parse_filename logging.basicConfig(level=logging.INFO, format="%(message)s") logger = logging.getLogger(__name__) @@ -53,11 +54,20 @@ def main(): args = parser.parse_args() all_need_references = [] - for file_path in args.files: - abs_file_path = file_path.resolve() - assert abs_file_path.exists(), abs_file_path + + all_files = args.files + + runfiles_dir = get_runfiles_dir() + for raw_file_path in all_files: + assert raw_file_path.exists(), raw_file_path + prefix, module_name, file_path, file_name = parse_filename( + raw_file_path, runfiles_dir + ) references = _extract_references_from_file( - abs_file_path.parent, Path(abs_file_path.name) + prefix=prefix, + file_name=file_name, + file_path=Path(file_path), + module_name=module_name, ) all_need_references.extend(references) diff --git a/scripts_bazel/tests/BUILD b/scripts_bazel/tests/BUILD index 25f092780..2e913f616 100644 --- a/scripts_bazel/tests/BUILD +++ b/scripts_bazel/tests/BUILD @@ -21,6 +21,7 @@ score_py_pytest( "//scripts_bazel:generate_sourcelinks", "//src/extensions/score_source_code_linker", ] + all_requirements, + pytest_config = "//:pyproject.toml", ) score_py_pytest( @@ -29,4 +30,5 @@ score_py_pytest( deps = [ "//scripts_bazel:merge_sourcelinks", ] + all_requirements, + pytest_config = "//:pyproject.toml", ) diff --git a/src/extensions/score_metamodel/BUILD b/src/extensions/score_metamodel/BUILD index e2d00056d..0891707c0 100644 --- a/src/extensions/score_metamodel/BUILD +++ b/src/extensions/score_metamodel/BUILD @@ -66,4 +66,5 @@ score_py_pytest( ], ) + ["tests/rst/conf.py"], deps = [":score_metamodel"], + pytest_config = "//:pyproject.toml" ) diff --git a/src/extensions/score_source_code_linker/BUILD b/src/extensions/score_source_code_linker/BUILD index 758fc786d..e3c289c66 100644 --- a/src/extensions/score_source_code_linker/BUILD +++ b/src/extensions/score_source_code_linker/BUILD @@ -75,4 +75,5 @@ score_py_pytest( ":score_source_code_linker", "//src/extensions/score_metamodel", ], + pytest_config = "//:pyproject.toml", ) diff --git a/src/extensions/score_source_code_linker/__init__.py b/src/extensions/score_source_code_linker/__init__.py index cf9843dc5..262bcc63f 100644 --- a/src/extensions/score_source_code_linker/__init__.py +++ b/src/extensions/score_source_code_linker/__init__.py @@ -58,7 +58,10 @@ find_git_root, find_ws_root, ) -from src.helper_lib.additional_functions import get_github_link +from src.helper_lib.additional_functions import ( + get_github_link, + get_module_has_from_known_good_json, +) LOGGER = get_logger(__name__) # Uncomment this to enable more verbose logging @@ -355,20 +358,35 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: source_code_links_by_need = load_source_code_links_combined_json( get_cache_filename(app.outdir, "score_scl_grouped_cache.json") ) - + if known_good_path := os.getenv("KNOWN_GOOD"): + module_hash_mapping = get_module_has_from_known_good_json(Path(known_good_path)) + else: + module_hash_mapping = None for source_code_links in source_code_links_by_need: need = find_need(needs_copy, source_code_links.need) if need is None: # TODO: print github annotations as in https://github.com/eclipse-score/bazel_registry/blob/7423b9996a45dd0a9ec868e06a970330ee71cf4f/tools/verify_semver_compatibility_level.py#L126-L129 for n in source_code_links.links.CodeLinks: + full_file_path = ( + f"{n.module}/{n.path}/{n.file}" + if n.module + else f"{n.path}/{n.file}" + ) LOGGER.warning( - f"{n.file}:{n.line}: Could not find {source_code_links.need} " + f"{full_file_path}:{n.line}: " + f"Could not find {source_code_links.need} " "in documentation [CODE LINK]", type="score_source_code_linker", ) for n in source_code_links.links.TestLinks: + full_file_path = ( + f"{n.module}/{n.path}/{n.file}" + if n.module + else f"{n.path}/{n.file}" + ) LOGGER.warning( - f"{n.file}:{n.line}: Could not find {source_code_links.need} " + f"{full_file_path}:{n.line}: " + f"Could not find {source_code_links.need} " "in documentation [TEST LINK]", type="score_source_code_linker", ) @@ -377,11 +395,12 @@ def inject_links_into_needs(app: Sphinx, env: BuildEnvironment) -> None: need_as_dict = cast(dict[str, object], need) need_as_dict["source_code_link"] = ", ".join( - f"{get_github_link(n)}<>{n.file}:{n.line}" + f"{get_github_link(n, module_hash_mapping)}<>{n.file}:{n.line}" for n in source_code_links.links.CodeLinks ) need_as_dict["testlink"] = ", ".join( - f"{get_github_link(n)}<>{n.name}" for n in source_code_links.links.TestLinks + f"{get_github_link(n, module_hash_mapping)}<>{n.name}" + for n in source_code_links.links.TestLinks ) # NOTE: Removing & adding the need is important to make sure diff --git a/src/extensions/score_source_code_linker/generate_source_code_links_json.py b/src/extensions/score_source_code_linker/generate_source_code_links_json.py index abedc2db4..532d27de5 100644 --- a/src/extensions/score_source_code_linker/generate_source_code_links_json.py +++ b/src/extensions/score_source_code_linker/generate_source_code_links_json.py @@ -20,10 +20,15 @@ import os from pathlib import Path +from sphinx_needs.logging import get_logger + from src.extensions.score_source_code_linker.needlinks import ( NeedLink, store_source_code_links_json, ) +from src.helper_lib import get_runfiles_dir, parse_filename + +LOGGER = get_logger(__name__) TAGS = [ "# " + "req-traceability:", @@ -43,34 +48,43 @@ def _extract_references_from_line(line: str): yield tag, req.strip() -def _extract_references_from_file(root: Path, file_path: Path) -> list[NeedLink]: +def _extract_references_from_file( + prefix: Path, file_name: str, file_path: Path, module_name: str +) -> list[NeedLink]: """Scan a single file for template strings and return findings.""" - assert root.is_absolute(), "Root path must be absolute" - assert not file_path.is_absolute(), "File path must be relative to the root" + # assert root.is_absolute(), f"Root path must be absolute. {root} is not" + #assert not file_path.is_absolute(), "File path must be relative to the root" # assert file_path.is_relative_to(root), ( # f"File path ({file_path}) must be relative to the root ({root})" # ) - assert (root / file_path).exists(), ( - f"File {file_path} does not exist in root {root}." - ) - + # assert (root / file_path).exists(), ( + # f"File {file_path} does not exist in root {root}." + # ) findings: list[NeedLink] = [] - + if module_name: + module_name_full = str(module_name) + "+" + complete_file = prefix / module_name_full / file_path / file_name + else: + complete_file = prefix / file_path / file_name + assert complete_file is not None try: - with open(root / file_path, encoding="utf-8", errors="ignore") as f: + with open(complete_file, encoding="utf-8", errors="ignore") as f: for line_num, line in enumerate(f, 1): for tag, req in _extract_references_from_line(line): findings.append( NeedLink( - file=file_path, + file=file_name, + path=file_path, + module=module_name if module_name is not None else "", line=line_num, tag=tag, need=req, full_line=line.strip(), ) ) - except (UnicodeDecodeError, PermissionError, OSError): + except (UnicodeDecodeError, PermissionError, OSError) as e: # Skip files that can't be read as text + LOGGER.debug(f"Error reading file to parse for linked needs: \n{e}") pass return findings @@ -111,18 +125,22 @@ def find_all_need_references(search_path: Path) -> list[NeedLink]: list[FileFindings]: List of FileFindings objects containing all findings for each file that contains template strings. """ + runfiles_dir = get_runfiles_dir() start_time = os.times().elapsed all_need_references: list[NeedLink] = [] # Use os.walk to have better control over directory traversal for file in iterate_files_recursively(search_path): - references = _extract_references_from_file(search_path, file) + prefix, module_name, file_path, file_name = parse_filename(file, runfiles_dir) + references = _extract_references_from_file( + prefix, file_name, Path(file_path), module_name + ) all_need_references.extend(references) elapsed_time = os.times().elapsed - start_time - print( - f"DEBUG: Found {len(all_need_references)} need references " + LOGGER.debug( + f"Found {len(all_need_references)} need references " f"in {elapsed_time:.2f} seconds" ) diff --git a/src/extensions/score_source_code_linker/needlinks.py b/src/extensions/score_source_code_linker/needlinks.py index 348147292..b6068c2d6 100644 --- a/src/extensions/score_source_code_linker/needlinks.py +++ b/src/extensions/score_source_code_linker/needlinks.py @@ -23,7 +23,9 @@ class NeedLink: """Represents a single template string finding in a file.""" - file: Path + file: str + path: Path + module: str line: int tag: str need: str @@ -36,7 +38,9 @@ def DefaultNeedLink() -> NeedLink: Like this better than adding defaults to the dataclass, as it is deliberate """ return NeedLink( - file=Path("."), + file="", + path=Path(), + module="", line=0, tag="", need="", @@ -54,9 +58,19 @@ def default(self, o: object): def needlink_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: - if {"file", "line", "tag", "need", "full_line"} <= d.keys(): + if { + "file", + "path", + "module", + "line", + "tag", + "need", + "full_line", + } <= d.keys(): return NeedLink( - file=Path(d["file"]), + file=d["file"], + path=Path(d["path"]), + module=d["module"], line=d["line"], tag=d["tag"], need=d["need"], diff --git a/src/extensions/score_source_code_linker/testlink.py b/src/extensions/score_source_code_linker/testlink.py index ee83c7f95..0bcae48ed 100644 --- a/src/extensions/score_source_code_linker/testlink.py +++ b/src/extensions/score_source_code_linker/testlink.py @@ -36,11 +36,13 @@ @dataclass(frozen=True, order=True) class DataForTestLink: name: str - file: Path + file: str + path: Path line: int need: str verify_type: str result: str + module: str = "" # Is "" when running for local tests result_text: str = "" @@ -57,15 +59,19 @@ def DataForTestLink_JSON_Decoder(d: dict[str, Any]) -> DataForTestLink | dict[st if { "name", "file", + "path", "line", "need", "verify_type", "result", + "module", "result_text", } <= d.keys(): return DataForTestLink( name=d["name"], - file=Path(d["file"]), + file=d["file"], + path=Path(d["path"]), + module=d["module"], line=d["line"], need=d["need"], verify_type=d["verify_type"], @@ -81,6 +87,8 @@ def DataForTestLink_JSON_Decoder(d: dict[str, Any]) -> DataForTestLink | dict[st class DataOfTestCase: name: str | None = None file: str | None = None + path: Path | None = None + module: str | None = None # Is none when running for local tests line: str | None = None result: str | None = None # passed | falied | skipped | disabled # Intentionally not snakecase to make dict parsing simple @@ -96,6 +104,8 @@ def from_dict(cls, data: dict[str, Any]): # type-ignore return cls( name=data.get("name"), file=data.get("file"), + path=data.get("path"), + module=data.get("module"), line=data.get("line"), result=data.get("result"), TestType=data.get("TestType"), @@ -165,6 +175,10 @@ def is_valid(self) -> bool: ] for field in fields: if getattr(self, field) is None: + # Module can be None when we are in a local enviroment, + # not reference-integration. + if field == "module": + continue # This might be a warning in the future, but for now we want be lenient. LOGGER.info( f"TestCase: {self.name} has a None value for the field: " @@ -198,6 +212,7 @@ def parse_attributes(verify_field: str | None, verify_type: str): assert self.name is not None assert self.file is not None assert self.line is not None + assert self.path is not None assert self.result is not None assert self.result_text is not None assert self.TestType is not None @@ -206,7 +221,11 @@ def parse_attributes(verify_field: str | None, verify_type: str): for need in verify_field.split(","): yield DataForTestLink( name=self.name, # type-ignore - file=Path(self.file), # type-ignore + file=self.file, # type-ignore + path=Path(self.path), # type-ignore + module=self.module + if self.module is not None + else "", # type-ignore line=int(self.line), # type-ignore need=need.strip(), verify_type=verify_type, @@ -234,6 +253,8 @@ def DataOfTestCase_JSON_Decoder(d: dict[str, Any]) -> DataOfTestCase | dict[str, "name", "file", "line", + "path", + "module", "result", "TestType", "DerivationTechnique", @@ -244,6 +265,8 @@ def DataOfTestCase_JSON_Decoder(d: dict[str, Any]) -> DataOfTestCase | dict[str, return DataOfTestCase( name=d["name"], file=d["file"], + path=d["path"], + module=d["module"], line=d["line"], result=d["result"], TestType=d["TestType"], diff --git a/src/extensions/score_source_code_linker/tests/expected_codelink.json b/src/extensions/score_source_code_linker/tests/expected_codelink.json index 447ef8a69..62af09919 100644 --- a/src/extensions/score_source_code_linker/tests/expected_codelink.json +++ b/src/extensions/score_source_code_linker/tests/expected_codelink.json @@ -1,27 +1,35 @@ [ { - "file": "src/implementation1.py", + "file": "implementation1.py", + "path": "src", + "module": "", "line": 3, "tag":"#-----req-Id:", "need": "TREQ_ID_1", "full_line": "#-----req-Id: TREQ_ID_1" }, { - "file": "src/implementation2.py", + "file": "implementation2.py", + "path": "src", + "module": "", "line": 5, "tag":"#-----req-Id:", "need": "TREQ_ID_1", "full_line": "#-----req-Id: TREQ_ID_1" }, { - "file": "src/implementation1.py", + "file": "implementation1.py", + "path": "src", + "module": "", "line": 9, "tag":"#-----req-Id:", "need": "TREQ_ID_2", "full_line":"#-----req-Id: TREQ_ID_2" }, { - "file": "src/bad_implementation.py", + "file": "bad_implementation.py", + "path": "src", + "module": "", "line":2, "tag":"#-----req-Id:", "need": "TREQ_ID_200", diff --git a/src/extensions/score_source_code_linker/tests/expected_grouped.json b/src/extensions/score_source_code_linker/tests/expected_grouped.json index 256232661..e63ad3e28 100644 --- a/src/extensions/score_source_code_linker/tests/expected_grouped.json +++ b/src/extensions/score_source_code_linker/tests/expected_grouped.json @@ -4,14 +4,18 @@ "links": { "CodeLinks": [ { - "file": "src/implementation1.py", + "file": "implementation1.py", + "path": "src", + "module": "", "line": 3, "tag":"#-----req-Id:", "need": "TREQ_ID_1", "full_line": "#-----req-Id: TREQ_ID_1" }, { - "file": "src/implementation2.py", + "file": "implementation2.py", + "path": "src", + "module": "", "line": 5, "tag":"#-----req-Id:", "need": "TREQ_ID_1", @@ -22,7 +26,9 @@ "TestLinks": [ { "name": "TestRequirementsCoverage__test_system_startup_time", - "file": "src/tests/testfile_2.py", + "file": "testfile_2.py", + "path": "src/tests", + "module": "", "line": 25, "need": "TREQ_ID_1", "verify_type": "fully", @@ -37,7 +43,9 @@ "links": { "CodeLinks": [ { - "file": "src/implementation1.py", + "file": "implementation1.py", + "path": "src", + "module": "", "line": 9, "tag":"#-----req-Id:", "need": "TREQ_ID_2", @@ -48,7 +56,9 @@ { "name": "test_api_response_format", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 10, "need": "TREQ_ID_2", "verify_type": "partially", @@ -57,7 +67,9 @@ }, { "name": "test_error_handling", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 38, "need": "TREQ_ID_2", "verify_type": "partially", @@ -75,7 +87,9 @@ "TestLinks": [ { "name": "test_api_response_format", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 10, "need": "TREQ_ID_3", "verify_type": "partially", @@ -84,7 +98,9 @@ }, { "name": "test_error_handling", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 38, "need": "TREQ_ID_3", "verify_type": "partially", @@ -99,7 +115,9 @@ "links": { "CodeLinks": [ { - "file": "src/bad_implementation.py", + "file": "bad_implementation.py", + "path": "src", + "module": "", "line":2, "tag":"#-----req-Id:", "need": "TREQ_ID_200", diff --git a/src/extensions/score_source_code_linker/tests/expected_testlink.json b/src/extensions/score_source_code_linker/tests/expected_testlink.json index 19068a4d5..bcfaf3cbb 100644 --- a/src/extensions/score_source_code_linker/tests/expected_testlink.json +++ b/src/extensions/score_source_code_linker/tests/expected_testlink.json @@ -1,7 +1,9 @@ [ { "name": "test_api_response_format", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 10, "need": "TREQ_ID_2", "verify_type": "partially", @@ -10,7 +12,9 @@ }, { "name": "test_api_response_format", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 10, "need": "TREQ_ID_3", "verify_type": "partially", @@ -19,7 +23,9 @@ }, { "name": "test_error_handling", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 38, "need": "TREQ_ID_2", "verify_type": "partially", @@ -28,7 +34,9 @@ }, { "name": "test_error_handling", - "file": "src/testfile_1.py", + "file": "testfile_1.py", + "path": "src", + "module": "", "line": 38, "need": "TREQ_ID_3", "verify_type": "partially", @@ -37,7 +45,9 @@ }, { "name": "TestRequirementsCoverage__test_system_startup_time", - "file": "src/tests/testfile_2.py", + "file": "testfile_2.py", + "path": "src/tests", + "module": "", "line": 25, "need": "TREQ_ID_1", "verify_type": "fully", diff --git a/src/extensions/score_source_code_linker/tests/test_codelink.py b/src/extensions/score_source_code_linker/tests/test_codelink.py index 29ddc7235..828c13aa6 100644 --- a/src/extensions/score_source_code_linker/tests/test_codelink.py +++ b/src/extensions/score_source_code_linker/tests/test_codelink.py @@ -127,9 +127,11 @@ def default(self, o: object): def needlink_test_decoder(d: dict[str, Any]) -> NeedLink | dict[str, Any]: - if {"file", "line", "tag", "need", "full_line"} <= d.keys(): + if {"file", "path", "module", "line", "tag", "need", "full_line"} <= d.keys(): return NeedLink( - file=Path(d["file"]), + file=d["file"], + path=Path(d["path"]), + module=d["module"], line=d["line"], tag=decode_comment(d["tag"]), need=d["need"], @@ -179,28 +181,36 @@ def sample_needlinks() -> list[NeedLink]: """Create sample NeedLink objects for testing.""" return [ NeedLink( - file=Path("src/implementation1.py"), + file="implementation1.py", + path=Path("src"), + module="", line=3, tag="#" + " req-Id:", need="TREQ_ID_1", full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( - file=Path("src/implementation2.py"), + file="implementation2.py", + path=Path("src"), + module="", line=3, tag="#" + " req-Id:", need="TREQ_ID_1", full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( - file=Path("src/implementation1.py"), + file="implementation1.py", + path=Path("src"), + module="", line=9, tag="#" + " req-Id:", need="TREQ_ID_2", full_line="#" + " req-Id: TREQ_ID_2", ), NeedLink( - file=Path("src/bad_implementation.py"), + file="bad_implementation.py", + path=Path("src"), + module="", line=2, tag="#" + " req-Id:", need="TREQ_ID_200", @@ -308,11 +318,15 @@ def test_group_by_need(sample_needlinks: list[NeedLink]) -> None: for found_link in result: if found_link.need == "TREQ_ID_1": assert len(found_link.links.CodeLinks) == 2 - assert found_link.links.CodeLinks[0].file == Path("src/implementation1.py") - assert found_link.links.CodeLinks[1].file == Path("src/implementation2.py") + assert found_link.links.CodeLinks[0].file == "implementation1.py" + assert found_link.links.CodeLinks[0].path == Path("src") + assert found_link.links.CodeLinks[1].file == "implementation2.py" + assert found_link.links.CodeLinks[1].path == Path("src") elif found_link.need == "TREQ_ID_2": assert len(found_link.links.CodeLinks) == 1 - assert found_link.links.CodeLinks[0].file == Path("src/implementation1.py") + assert found_link.links.CodeLinks[0].file == "implementation1.py" + assert found_link.links.CodeLinks[0].path == Path("src") + assert found_link.links.CodeLinks[0].line == 9 elif found_link.need == "TREQ_ID_200": assert len(found_link.links.CodeLinks) == 1 @@ -338,7 +352,9 @@ def test_get_github_link_with_real_repo(git_repo: Path) -> None: """Test generating GitHub link with real repository.""" # Create a needlink needlink = NeedLink( - file=Path("src/test.py"), + file="test.py", + path=Path("src"), + module="", line=42, tag="#" + " req-Id:", need="REQ_001", @@ -402,7 +418,9 @@ def test_cache_file_with_encoded_comments(temp_dir: Path) -> None: # Create needlinks with spaces in tags and full_line needlinks = [ NeedLink( - file=Path("src/test.py"), + file="test.py", + path=Path("src"), + module="", line=1, tag="#" + " req-Id:", need="TEST_001", @@ -506,21 +524,27 @@ def another_function(): # (simulating what generate_source_code_links_json would do) needlinks = [ NeedLink( - file=Path("src/implementation1.py"), + file="implementation1.py", + path=Path("src"), + module="", line=3, tag="#" + " req-Id:", need="TREQ_ID_1", full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( - file=Path("src/implementation1.py"), + file="implementation1.py", + path=Path("src"), + module="", line=8, tag="#" + " req-Id:", need="TREQ_ID_2", full_line="#" + " req-Id: TREQ_ID_2", ), NeedLink( - file=Path("src/implementation2.py"), + file="implementation2.py", + path=Path("src"), + module="", line=3, tag="#" + " req-Id:", need="TREQ_ID_1", @@ -551,7 +575,7 @@ def another_function(): for needlink in loaded_links: github_link = get_github_link(needlink) assert "https://github.com/test-user/test-repo/blob/" in github_link - assert f"src/{needlink.file.name}#L{needlink.line}" in github_link + assert f"{needlink.path}/{needlink.file}#L{needlink.line}" in github_link @add_test_properties( @@ -579,7 +603,9 @@ def test_multiple_commits_hash_consistency(git_repo: Path) -> None: # Test that links use the current hash needlink = NeedLink( - file=Path("new_file.py"), + file="new_file.py", + path=Path("."), + module="", line=1, tag="#" + " req-Id:", need="TEST_001", diff --git a/src/extensions/score_source_code_linker/tests/test_need_source_links.py b/src/extensions/score_source_code_linker/tests/test_need_source_links.py index df234a0b9..3ad28e6e1 100644 --- a/src/extensions/score_source_code_linker/tests/test_need_source_links.py +++ b/src/extensions/score_source_code_linker/tests/test_need_source_links.py @@ -72,7 +72,9 @@ def default(self, o: object) -> Any: @pytest.fixture def sample_needlink() -> NeedLink: return NeedLink( - file=Path("src/example.py"), + file="example.py", + path=Path("src"), + module="", line=10, tag="# req:", need="REQ_001", @@ -84,7 +86,9 @@ def sample_needlink() -> NeedLink: def sample_testlink() -> DataForTestLink: return DataForTestLink( name="test_example", - file=Path("tests/test_example.py"), + file="test_example.py", + path=Path("tests"), + module="", need="REQ_001", line=5, verify_type="partially", diff --git a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py index 60bb98f80..5f0f06675 100644 --- a/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py +++ b/src/extensions/score_source_code_linker/tests/test_source_code_link_integration.py @@ -307,14 +307,18 @@ def example_source_link_text_all_ok(sphinx_base_dir: Path) -> dict[str, list[Nee return { "TREQ_ID_1": [ NeedLink( - file=Path("src/implementation2.py"), + file="implementation2.py", + path=Path("src"), + module="", line=5, tag="#" + " req-Id:", need="TREQ_ID_1", full_line="#" + " req-Id: TREQ_ID_1", ), NeedLink( - file=Path("src/implementation1.py"), + file="implementation1.py", + path=Path("src"), + module="", line=3, tag="#" + " req-Id:", need="TREQ_ID_1", @@ -323,7 +327,9 @@ def example_source_link_text_all_ok(sphinx_base_dir: Path) -> dict[str, list[Nee ], "TREQ_ID_2": [ NeedLink( - file=Path("src/implementation1.py"), + file="implementation1.py", + path=Path("src"), + module="", line=9, tag="#" + " req-Id:", need="TREQ_ID_2", @@ -340,7 +346,8 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): "TREQ_ID_1": [ DataForTestLink( name="TestRequirementsCoverage__test_system_startup_time", - file=Path("src/tests/testfile_2.py"), + file="testfile_2.py", + path=Path("src/tests"), need="TREQ_ID_1", line=25, verify_type="fully", @@ -351,7 +358,8 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): "TREQ_ID_2": [ DataForTestLink( name="test_api_response_format", - file=Path("src/testfile_1.py"), + file="testfile_1.py", + path=Path("src"), need="TREQ_ID_2", line=10, verify_type="partially", @@ -360,7 +368,8 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): ), DataForTestLink( name="test_error_handling", - file=Path("src/testfile_1.py"), + file="testfile_1.py", + path=Path("src"), need="TREQ_ID_2", line=38, verify_type="partially", @@ -371,7 +380,8 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): "TREQ_ID_3": [ DataForTestLink( name="test_api_response_format", - file=Path("src/testfile_1.py"), + file="testfile_1.py", + path=Path("src"), need="TREQ_ID_3", line=10, verify_type="partially", @@ -380,7 +390,8 @@ def example_test_link_text_all_ok(sphinx_base_dir: Path): ), DataForTestLink( name="test_error_handling", - file=Path("src/testfile_1.py"), + file="testfile_1.py", + path=Path("src"), need="TREQ_ID_3", line=38, verify_type="partially", @@ -397,7 +408,9 @@ def example_source_link_text_non_existent(sphinx_base_dir: Path): { "TREQ_ID_200": [ NeedLink( - file=Path("src/bad_implementation.py"), + file="bad_implementation.py", + path=Path("src"), + module="", line=2, tag="#" + " req-Id:", need="TREQ_ID_200", @@ -548,6 +561,8 @@ def test_source_link_integration_non_existent_id( try: app.build() warnings = app.warning.getvalue() + print("====================") + print("Warnings:\n", warnings) assert ( "src/bad_implementation.py:2: Could not find TREQ_ID_200 in documentation" in warnings diff --git a/src/extensions/score_source_code_linker/tests/test_testlink.py b/src/extensions/score_source_code_linker/tests/test_testlink.py index 74becef6b..fdaad7bd6 100644 --- a/src/extensions/score_source_code_linker/tests/test_testlink.py +++ b/src/extensions/score_source_code_linker/tests/test_testlink.py @@ -13,6 +13,8 @@ import json from pathlib import Path +import pytest + # This depends on the `attribute_plugin` in our tooling repository from attribute_plugin import add_test_properties # type: ignore[import-untyped] @@ -31,17 +33,58 @@ test_type="requirements-based", derivation_technique="requirements-analysis", ) -def test_testlink_serialization_roundtrip(): +def test_testlink_serialization_roundtrip_no_module(): """Ensure that Encode/Decode is reversible""" - link = DataForTestLink( + link_no_module = DataForTestLink( + name="my_test", + file="file.py", + path=Path("some"), + line=123, + need="REQ_001", + verify_type="fully", + result="passed", + result_text="All good", + ) + dumped = json.dumps(link_no_module, cls=DataForTestLink_JSON_Encoder) + loaded = json.loads(dumped, object_hook=DataForTestLink_JSON_Decoder) + + assert isinstance(loaded, DataForTestLink) + assert loaded == link_no_module + +DataForTestLinks =[ + DataForTestLink( + name="my_test", + file="file.py", + path=Path("some"), + line=123, + need="REQ_001", + verify_type="fully", + result="passed", + result_text="All good", + ), + DataForTestLink( name="my_test", - file=Path("some/file.py"), + file="file.py", + path=Path("some"), + module="some_module", line=123, need="REQ_001", verify_type="fully", result="passed", result_text="All good", ) +] + + + +@add_test_properties( + partially_verifies=["tool_req__docs_test_link_testcase"], + test_type="requirements-based", + derivation_technique="requirements-analysis", +) +@pytest.mark.parametrize("link", DataForTestLinks) +def test_testlink_serialization_roundtrip_with_module(link): + """Ensure that Encode/Decode is reversible""" dumped = json.dumps(link, cls=DataForTestLink_JSON_Encoder) loaded = json.loads(dumped, object_hook=DataForTestLink_JSON_Decoder) @@ -50,9 +93,10 @@ def test_testlink_serialization_roundtrip(): def test_testlink_encoder_handles_path(): - data = {"file": Path("some/thing.py")} + data = {"file": "thing.py","path": Path("some")} encoded = json.dumps(data, cls=DataForTestLink_JSON_Encoder) - assert '"file": "some/thing.py"' in encoded + assert '"file": "thing.py"' in encoded + assert '"path": "some"' in encoded @add_test_properties( @@ -93,7 +137,8 @@ def test_testcaseneed_to_dict_multiple_links(): """ case = DataOfTestCase( name="TC_01", - file="src/test.py", + file="test.py", + path=Path("src"), line="10", result="failed", TestType="unit", @@ -110,7 +155,8 @@ def test_testcaseneed_to_dict_multiple_links(): assert set(need_ids) == {"REQ-1", "REQ-2", "REQ-3"} for link in links: - assert link.file == Path("src/test.py") + assert link.file == "test.py" + assert link.path == Path("src") assert link.line == 10 assert link.name == "TC_01" assert link.result == "failed" @@ -128,7 +174,8 @@ def test_store_and_load_testlinks_roundtrip(tmp_path: Path): links = [ DataForTestLink( name="L1", - file=Path("abc.py"), + file="abc.py", + path=Path("src"), line=1, need="REQ_A", verify_type="partially", @@ -137,7 +184,9 @@ def test_store_and_load_testlinks_roundtrip(tmp_path: Path): ), DataForTestLink( name="L2", - file=Path("def.py"), + file="def.py", + path=Path("src"), + module="some_module", line=2, need="REQ_B", verify_type="fully", diff --git a/src/extensions/score_source_code_linker/tests/test_xml_parser.py b/src/extensions/score_source_code_linker/tests/test_xml_parser.py index 95b445dde..c2f03ef80 100644 --- a/src/extensions/score_source_code_linker/tests/test_xml_parser.py +++ b/src/extensions/score_source_code_linker/tests/test_xml_parser.py @@ -90,7 +90,7 @@ def _tmp_xml_dirs( dir1 / "test.xml", name="tc_with_props", result="failed", - file="path1", + file="src/path1", line=10, props={ "PartiallyVerifies": "REQ1", @@ -102,14 +102,16 @@ def _tmp_xml_dirs( ) # File without properties - _write_test_xml(dir2 / "test.xml", name="tc_no_props", file="path2", line=20) + _write_test_xml( + dir2 / "test.xml", name="tc_no_props", file="src/path2", line=20 + ) # File with some properties that we don't care about _write_test_xml( dir3 / "test.xml", name="tc_with_extra_props", result="failed", - file="path1", + file="src/path1", line=10, props={ # Properties we do not parse should not throw an error @@ -128,7 +130,7 @@ def _tmp_xml_dirs( dir4 / "test.xml", name="tc_with_missing_props", result="failed", - file="path1", + file="src/path1", line=10, props={ # derivation_technique and test_type are missing @@ -156,6 +158,8 @@ def test_find_xml_files( root: Path dir1: Path dir2: Path + dir3: Path + dir4: Path root, dir1, dir2, dir3, dir4 = tmp_xml_dirs() found = xml_parser.find_xml_files(root) expected: set[Path] = { diff --git a/src/extensions/score_source_code_linker/xml_parser.py b/src/extensions/score_source_code_linker/xml_parser.py index d91358bdb..c7d34efb2 100644 --- a/src/extensions/score_source_code_linker/xml_parser.py +++ b/src/extensions/score_source_code_linker/xml_parser.py @@ -94,7 +94,8 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str], lis Returns: tuple consisting of: - list[TestCaseNeed] - - list[str] => Testcase Names that did not have the required properties. + - list[str] => Testcase Names that did not have any properties at all. + - list[str] => Testcase Names that did not have all of the req. properties. """ test_case_needs: list[DataOfTestCase] = [] non_prop_tests: list[str] = [] @@ -116,8 +117,20 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str], lis testname = "__".join([testcn, testcasename]) else: testname = testcasename - test_file = testcase.get("file") + test_file_complete = testcase.get("file", "") + test_path, test_file = ( + test_file_complete.rsplit("/", maxsplit=1) + if test_file_complete + else [None, None] + ) line = testcase.get("line") + # Module can be None if we are not in a combo build + if "external" in str(file): + test_module = ( + str(file).split("external/")[-1].split("/")[0].removesuffix("+") + ) + else: + test_module = None # ╭──────────────────────────────────────╮ # │ Assert worldview that mandatory │ @@ -134,6 +147,8 @@ def read_test_xml_file(file: Path) -> tuple[list[DataOfTestCase], list[str], lis # "'lineNr' attribute. This is mandatory" # ) case_properties["name"] = testname + case_properties["module"] = test_module + case_properties["path"] = test_path case_properties["file"] = test_file case_properties["line"] = line case_properties["result"], case_properties["result_text"] = ( @@ -234,7 +249,7 @@ def build_test_needs_from_files( tcns: list[DataOfTestCase] = [] for file in xml_paths: # Last value can be ignored. The 'is_valid' function already prints infos - test_cases, tests_missing_all_props,_ = read_test_xml_file(file) + test_cases, tests_missing_all_props, _ = read_test_xml_file(file) non_prop_tests = ", ".join(n for n in tests_missing_all_props) if non_prop_tests: logger.info(f"Tests missing all properties: {non_prop_tests}") @@ -262,6 +277,7 @@ def construct_and_add_need(app: Sphinx, tn: DataOfTestCase): # and either 'Fully' or 'PartiallyVerifies' should not be None here assert tn.file is not None assert tn.name is not None + assert tn.path is not None # IDK if this is ideal or not with contextlib.suppress(BaseException): _ = add_external_need( @@ -278,7 +294,7 @@ def construct_and_add_need(app: Sphinx, tn: DataOfTestCase): else "", test_type=tn.TestType, derivation_technique=tn.DerivationTechnique, - file=tn.file, + file=tn.path / tn.file, line=tn.line, result=tn.result, # We just want the 'failed' or whatever result_text=tn.result_text if tn.result_text else "", diff --git a/src/helper_lib/BUILD b/src/helper_lib/BUILD index 62b597850..748a2a730 100644 --- a/src/helper_lib/BUILD +++ b/src/helper_lib/BUILD @@ -38,4 +38,5 @@ score_py_pytest( deps = [ ":helper_lib", ] + all_requirements, + pytest_config = "//:pyproject.toml", ) diff --git a/src/helper_lib/__init__.py b/src/helper_lib/__init__.py index 5699e478d..65b1ec5b7 100644 --- a/src/helper_lib/__init__.py +++ b/src/helper_lib/__init__.py @@ -218,3 +218,59 @@ def get_runfiles_dir() -> Path: "Have a look at README.md for instructions on how to build docs." ) return runfiles_dir + + +def parse_filename(filepath: Path, runfiles_dir: Path) -> tuple[Path, str, str, str]: + """ + Parse out the Module-Name from the filename gotten + /home/user/.cache/bazel/aksj37981712/external/score_docs_as_code+/src/tests/testfile.py + => score_docs_as_code + """ + + # COMBO BUILD + # If external is in the filepath that gets parsed => + # file is in an external module => combo build + # e.g. .../external/score_docs_as_code+/src/helper_lib/__init__.py + if "external" in str(filepath): + bazel_path = str(runfiles_dir.resolve()).split("/sandbox", maxsplit=1) + prefix = Path(bazel_path[0]) / "external" + filepath_split = str(filepath).removeprefix("external/").split("/", maxsplit=1) + module_name = str(filepath_split[0].removesuffix("+")) + path_file_split = filepath_split[1].rsplit("/", maxsplit=1) + if "/" in str(filepath_split[1]): + path_file_split = filepath_split[1].rsplit("/", maxsplit=1) + file_path = path_file_split[0] + file_name = path_file_split[1] + else: + file_path = "" + file_name = str(filepath_split[1]) + # LOCAL BUILD (external not in filepath) + # e.g. src/helper_lib/__init__.py + else: + # We have a non combo build and the file is local to this repo + # => can use the git root to find the root + + ws_root = find_ws_root() + if ws_root is None: + # Local running during `:sourcelinks_json` build + bazel_path = str(runfiles_dir.resolve()).split("/sandbox", maxsplit=1) + prefix = Path(bazel_path[0] + "/execroot/_main") + else: + # This happens in local running inside sphinx + prefix = find_git_root() + module_name = "" + if "/" in str(filepath): + path_file_split = str(filepath).rsplit("/", maxsplit=1) + file_path = path_file_split[0] + file_name = path_file_split[1] + else: + file_path = "" + file_name = str(filepath) + + # Assert worldview + assert prefix is not None + assert module_name is not None + assert file_path is not None + assert file_name is not None + + return prefix, module_name, file_path, file_name diff --git a/src/helper_lib/additional_functions.py b/src/helper_lib/additional_functions.py index 5b1ce6d98..fa791554b 100644 --- a/src/helper_lib/additional_functions.py +++ b/src/helper_lib/additional_functions.py @@ -10,7 +10,9 @@ # # SPDX-License-Identifier: Apache-2.0 # ******************************************************************************* +import json from pathlib import Path +from typing import TypedDict # Import types that depend on score_source_code_linker from src.extensions.score_source_code_linker.needlinks import DefaultNeedLink, NeedLink @@ -25,14 +27,46 @@ ) +class ModuleInfo(TypedDict): + hash: str + repo: str + + def get_github_link( link: NeedLink | DataForTestLink | DataOfTestCase | None = None, + known_json: dict[str, ModuleInfo] | None = None, ) -> str: if link is None: link = DefaultNeedLink() - passed_git_root = find_git_root() - if passed_git_root is None: - passed_git_root = Path() - base_url = get_github_base_url() - current_hash = get_current_git_hash(passed_git_root) - return f"{base_url}/blob/{current_hash}/{link.file}#L{link.line}" + + if known_json is not None and link.module is not None: + # Using the parsed know_good json file as source of truth + # We also have to check for link.module being + # not none as for example 'ref-int' could have links. + # And then we would not find them in the known_json + # and have to go the normal route + module_info = known_json[link.module] + current_hash = module_info["hash"] + base_url = module_info["repo"].removesuffix(".git") + else: + # Fall back to git discovery for local links + passed_git_root = find_git_root() + if passed_git_root is None: + passed_git_root = Path() + base_url = get_github_base_url() + current_hash = get_current_git_hash(passed_git_root) + + return f"{base_url}/blob/{current_hash}/{link.path}/{link.file}#L{link.line}" + + +def get_module_has_from_known_good_json(known_good_path: Path) -> dict[str, ModuleInfo]: + with open(known_good_path) as f: + known_good_json = json.load(f) # pyright: ignore[reportAny] It's a nested json we do not know the final struct of + modules: dict[str, ModuleInfo] = {} + for category in known_good_json["modules"].values(): # pyright: ignore[reportAny] These should only be strings + for module_name, module_data in category.items(): # pyright: ignore[reportAny] These should only be strings + modules[module_name] = { + "hash": module_data["hash"], + "repo": module_data["repo"], + } + return modules diff --git a/src/helper_lib/test_helper_lib.py b/src/helper_lib/test_helper_lib.py index 0486821a5..4ef62108b 100644 --- a/src/helper_lib/test_helper_lib.py +++ b/src/helper_lib/test_helper_lib.py @@ -16,12 +16,14 @@ from pathlib import Path import pytest +from pytest import MonkeyPatch from src.helper_lib import ( config_setdefault, get_current_git_hash, get_github_repo_info, get_runfiles_dir, + parse_filename, parse_remote_git_output, ) @@ -313,3 +315,285 @@ def test_git_root_search_not_found(tmp_path: Path, monkeypatch: pytest.MonkeyPat get_runfiles_dir() assert "Could not find git root" in str(excinfo.value) os.environ.pop("RUNFILES_DIR", None) + + +# ╭──────────────────────────────────────────────────────────────────────────────╮ +# │ GENERATED TESTS. Manually reviewed though may need further tweaking to │ +# │ catch more branches / edge cases │ +# ╰──────────────────────────────────────────────────────────────────────────────╯ + + +# COMBO BUILD TESTS (External Modules) + + +def test_external_module_basic(tmp_path: Path) -> None: + """Test parsing a file from an external module (combo build).""" + # Simulate bazel cache structure + bazel_cache = ( + tmp_path + / ".cache" + / "bazel" + / "_bazel_user" + / "58de169282104bb8c73a59023f615bcd" + ) + sandbox_base = ( + bazel_cache / "sandbox" / "linux-sandbox" / "42" / "execroot" / "_main" + ) + external_dir = sandbox_base / "external" + external_dir.mkdir(parents=True, exist_ok=True) + + runfiles_dir = ( + sandbox_base / "bazel-out" / "k8-fastbuild" / "bin" / "ide_support.runfiles" + ) + runfiles_dir.mkdir(parents=True, exist_ok=True) + + filepath = Path("external/score_docs_as_code+/src/tests/testfile.py") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + # Verify all components + assert module_name == "score_docs_as_code" + assert file_path == "src/tests" + assert file_name == "testfile.py" + + # Verify prefix points to the bazel cache external directory (NOT sandbox/execroot) + # The function extracts the path up to but not including /sandbox + expected_prefix = bazel_cache / "external" + assert prefix == expected_prefix + + +def test_external_module_root_file(tmp_path: Path) -> None: + """Test parsing a file at the root of an external module.""" + bazel_cache = tmp_path / ".cache" / "bazel" / "_bazel_user" / "abc123" + sandbox_base = bazel_cache / "sandbox" / "linux" / "1" / "execroot" / "_main" + external_dir = sandbox_base / "external" + external_dir.mkdir(parents=True, exist_ok=True) + + runfiles_dir = sandbox_base / "bazel-bin" / "test.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + filepath = Path("external/some_lib+/BUILD.bazel") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + assert module_name == "some_lib" + assert file_path == "" + assert file_name == "BUILD.bazel" + + # Verify prefix points to bazel cache external directory + expected_prefix = bazel_cache / "external" + assert prefix == expected_prefix + + +def test_external_module_special_chars(tmp_path: Path) -> None: + """Test parsing module names with hyphens, underscores, and multiple plus signs.""" + bazel_cache = tmp_path / ".cache" / "bazel" / "_bazel_test" / "hash123" + sandbox_base = bazel_cache / "sandbox" / "worker" / "99" / "execroot" / "_main" + external_dir = sandbox_base / "external" + external_dir.mkdir(parents=True, exist_ok=True) + + runfiles_dir = sandbox_base / "bin" / "test.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + filepath = Path("external/my-special_module+v2+/src/file.py") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + # removesuffix only removes the trailing '+' + assert module_name == "my-special_module+v2" + assert file_path == "src" + assert file_name == "file.py" + + # Verify prefix + expected_prefix = bazel_cache / "external" + assert prefix == expected_prefix + + +# LOCAL BUILD TESTS (With Workspace) + + +def test_local_with_workspace(tmp_path: Path, monkeypatch: MonkeyPatch) -> None: + """Test parsing a local file when workspace root is available.""" + workspace = tmp_path / "my_project" + workspace.mkdir() + git_dir = workspace / ".git" + git_dir.mkdir() + + runfiles_dir = tmp_path / "bazel-bin" / "ide_support.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + monkeypatch.setenv("BUILD_WORKSPACE_DIRECTORY", str(workspace)) + + filepath = Path("src/tests/test_example.py") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + assert module_name == "" + assert file_path == "src/tests" + assert file_name == "test_example.py" + + # Verify prefix is the git root (workspace) + assert prefix == workspace + assert prefix.exists() + assert (prefix / ".git").exists() + + +def test_local_root_file_with_workspace( + tmp_path: Path, monkeypatch: MonkeyPatch +) -> None: + """Test parsing a file at the repository root.""" + workspace = tmp_path / "repo" + workspace.mkdir() + git_dir = workspace / ".git" + git_dir.mkdir() + + runfiles_dir = tmp_path / "bazel-bin" / "build.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + monkeypatch.setenv("BUILD_WORKSPACE_DIRECTORY", str(workspace)) + + filepath = Path("BUILD.bazel") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + assert module_name == "" + assert file_path == "" + assert file_name == "BUILD.bazel" + + # Verify prefix is the workspace root + assert prefix == workspace + assert (prefix / ".git").exists() + + +# LOCAL BUILD TESTS (Without Workspace) + + +def test_local_without_workspace_uses_execroot( + tmp_path: Path, monkeypatch: MonkeyPatch +) -> None: + """Test parsing a local file when workspace root is None (uses execroot fallback). + + This simulates the actual behavior where: + - BUILD_WORKSPACE_DIRECTORY is not set + - prefix = /home/user/.cache/bazel/_bazel_user/hash/execroot/_main + """ + monkeypatch.delenv("BUILD_WORKSPACE_DIRECTORY", raising=False) + + # Simulate real bazel cache structure + bazel_cache = ( + tmp_path + / ".cache" + / "bazel" + / "_bazel_maximilianp" + / "4ac366a7bf0ecac1b3be0bef35848a2a" + ) + execroot = bazel_cache / "execroot" / "_main" + execroot.mkdir(parents=True, exist_ok=True) + + # Runfiles dir is inside sandbox, but we're checking the execroot path + sandbox_base = ( + bazel_cache / "sandbox" / "linux-sandbox" / "123" / "execroot" / "_main" + ) + runfiles_dir = sandbox_base / "bazel-out" / "k8-fastbuild" / "bin" / "test.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + filepath = Path("src/helper_lib/test_helper_lib.py") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + assert module_name == "" + assert file_path == "src/helper_lib" + assert file_name == "test_helper_lib.py" + + # Verify prefix points to execroot/_main (the path before /sandbox) + expected_prefix = bazel_cache / "execroot" / "_main" + assert prefix == expected_prefix + + +def test_local_root_file_without_workspace( + tmp_path: Path, monkeypatch: MonkeyPatch +) -> None: + """Test parsing a root file when workspace is None.""" + monkeypatch.delenv("BUILD_WORKSPACE_DIRECTORY", raising=False) + + bazel_cache = tmp_path / ".cache" / "bazel" / "_bazel_test" / "xyz789" + execroot = bazel_cache / "execroot" / "_main" + execroot.mkdir(parents=True, exist_ok=True) + + sandbox_base = bazel_cache / "sandbox" / "worker" / "42" / "execroot" / "_main" + runfiles_dir = sandbox_base / "bazel-bin" / "app.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + filepath = Path("README.md") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + assert module_name == "" + assert file_path == "" + assert file_name == "README.md" + + # Verify prefix is execroot/_main (before sandbox) + expected_prefix = bazel_cache / "execroot" / "_main" + assert prefix == expected_prefix + + +# EDGE CASES + + +def test_path_reconstruction_external(tmp_path: Path) -> None: + """Test that we can reconstruct paths from parsed components (external).""" + bazel_cache = ( + tmp_path + / ".cache" + / "bazel" + / "_bazel_user" + / "58de169282104bb8c73a59023f615bcd" + ) + sandbox_base = bazel_cache / "sandbox" / "execroot" / "_main" + external_dir = sandbox_base / "external" + external_dir.mkdir(parents=True, exist_ok=True) + + runfiles_dir = sandbox_base / "bazel-bin" / "test.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + original_path = "src/tests/test_file.py" + filepath = Path(f"external/awesome_module+/{original_path}") + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + assert module_name == "awesome_module" + reconstructed = f"{file_path}/{file_name}" if file_path else file_name + assert reconstructed == original_path + + # Verify we can construct the full path using prefix and module_name + full_path = prefix / module_name / file_path / file_name + expected_full_path = ( + bazel_cache / "external" / "awesome_module" / "src" / "tests" / "test_file.py" + ) + assert full_path == expected_full_path + + +def test_path_reconstruction_local(tmp_path: Path, monkeypatch: MonkeyPatch) -> None: + """Test that we can reconstruct paths from parsed components (local).""" + workspace = tmp_path / "workspace" + workspace.mkdir() + git_dir = workspace / ".git" + git_dir.mkdir() + + runfiles_dir = tmp_path / "bazel-bin" / "test.runfiles" + runfiles_dir.mkdir(parents=True, exist_ok=True) + + monkeypatch.setenv("BUILD_WORKSPACE_DIRECTORY", str(workspace)) + + original_path = "src/core/utils/helper.py" + filepath = Path(original_path) + + prefix, module_name, file_path, file_name = parse_filename(filepath, runfiles_dir) + + reconstructed = f"{file_path}/{file_name}" if file_path else file_name + assert reconstructed == original_path + assert module_name == "" + + # Verify we can construct the full path using prefix + full_path = prefix / file_path / file_name + assert full_path == workspace / "src" / "core" / "utils" / "helper.py"