From e0af091fc3d57855c3beb0c124c1f6fbebbb411d Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 4 Nov 2025 14:45:30 +0800 Subject: [PATCH 01/33] TRCLI-160 Updated readme, changelog and version files for release 1.13.0 --- CHANGELOG.MD | 7 +++++++ README.md | 4 +++- trcli/__init__.py | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 485784d..a49c9ab 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -6,6 +6,13 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb - **MINOR**: New features that are backward-compatible. - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility. +## [1.13.0] + +_released 12-01-2025 + +### Fixed + - Added new BDD/Gherkin parser command parse_bdd for behavioral driven development-related testing + ## [1.12.4] _released 11-03-2025 diff --git a/README.md b/README.md index 863573e..adda450 100644 --- a/README.md +++ b/README.md @@ -33,10 +33,11 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.12.4 +TestRail CLI v1.13.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) + - parse_bdd: Gherkin .feature files - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new empty test run @@ -85,6 +86,7 @@ Commands: add_run Add a new test run in TestRail labels Manage labels in TestRail parse_junit Parse JUnit report and upload results to TestRail + parse_bdd Parse Gherkin .feature files and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail references Manage references in TestRail diff --git a/trcli/__init__.py b/trcli/__init__.py index 19ee973..9a34ccc 100644 --- a/trcli/__init__.py +++ b/trcli/__init__.py @@ -1 +1 @@ -__version__ = "1.12.4" +__version__ = "1.13.0" From 783490df43d119a8ee9a0ee64587072b7cda6772 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 4 Nov 2025 16:38:40 +0800 Subject: [PATCH 02/33] TRCLI-190 Updated project dependency for gherkin parsing, added test data and gherkin test script --- quick_test_gherkin.py | 66 +++++++++++ setup.py | 5 +- tests/test_data/FEATURE/sample_login.feature | 41 +++++++ verify_gherkin_parsing.py | 115 +++++++++++++++++++ 4 files changed, 225 insertions(+), 2 deletions(-) create mode 100644 quick_test_gherkin.py create mode 100644 tests/test_data/FEATURE/sample_login.feature create mode 100644 verify_gherkin_parsing.py diff --git a/quick_test_gherkin.py b/quick_test_gherkin.py new file mode 100644 index 0000000..b7dbe53 --- /dev/null +++ b/quick_test_gherkin.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +"""Quick test script to parse any .feature file""" + +import sys +from pathlib import Path +from gherkin.parser import Parser +from gherkin.token_scanner import TokenScanner +import json + + +def parse_feature(filepath): + """Parse a Gherkin feature file and display key information.""" + with open(filepath, "r", encoding="utf-8") as f: + feature_text = f.read() + + parser = Parser() + scanner = TokenScanner(feature_text) + doc = parser.parse(scanner) + + feature = doc["feature"] + + print(f"\n{'='*60}") + print(f"Feature: {feature['name']}") + print(f"{'='*60}") + + # Count elements + scenarios = [c for c in feature["children"] if "scenario" in c] + backgrounds = [c for c in feature["children"] if "background" in c] + + print(f"\nSummary:") + print(f" Backgrounds: {len(backgrounds)}") + print(f" Scenarios: {len(scenarios)}") + + print(f"\nScenarios:") + for idx, child in enumerate(scenarios, 1): + scenario = child["scenario"] + tags = [tag["name"] for tag in scenario.get("tags", [])] + steps = scenario.get("steps", []) + examples = scenario.get("examples", []) + + scenario_type = "Scenario Outline" if examples else "Scenario" + print(f" {idx}. [{scenario_type}] {scenario['name']}") + print(f" Tags: {', '.join(tags) if tags else 'None'}") + print(f" Steps: {len(steps)}") + if examples: + total_examples = sum(len(ex.get("tableBody", [])) for ex in examples) + print(f" Example rows: {total_examples}") + + return doc + + +if __name__ == "__main__": + if len(sys.argv) > 1: + feature_file = Path(sys.argv[1]) + else: + # Default to sample file + feature_file = Path(__file__).parent / "tests" / "test_data" / "FEATURE" / "sample_login.feature" + + if not feature_file.exists(): + print(f"Error: File not found: {feature_file}") + sys.exit(1) + + parse_feature(feature_file) + print(f"\n{'='*60}") + print("✓ Parsing successful!") + print(f"{'='*60}\n") diff --git a/setup.py b/setup.py index 8ca4a94..7b98ee0 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ ], include_package_data=True, install_requires=[ - "click>=8.1.0,<8.2.2", # Note: click version 8.2.2 is yanked as of Aug 2, 2025! + "click>=8.1.0,<8.2.2", # Note: click version 8.2.2 is yanked as of Aug 2, 2025! "pyyaml>=6.0.0,<7.0.0", "junitparser>=3.1.0,<4.0.0", "pyserde==0.12.*", @@ -24,7 +24,8 @@ "humanfriendly>=10.0.0,<11.0.0", "openapi-spec-validator>=0.5.0,<1.0.0", "beartype>=0.17.0,<1.0.0", - "prance" # Does not use semantic versioning + "prance", # Does not use semantic versioning + "gherkin-official>=27.0.0,<28.0.0", # Gherkin/BDD feature file parser ], entry_points=""" [console_scripts] diff --git a/tests/test_data/FEATURE/sample_login.feature b/tests/test_data/FEATURE/sample_login.feature new file mode 100644 index 0000000..e0287b4 --- /dev/null +++ b/tests/test_data/FEATURE/sample_login.feature @@ -0,0 +1,41 @@ +Feature: User Login + As a registered user + I want to log in to the application + So that I can access my account + + Background: + Given the application is running + And I am on the login page + + @smoke @authentication + Scenario: Successful login with valid credentials + Given I have a valid username "testuser" + And I have a valid password "password123" + When I enter my credentials + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message "Welcome, testuser" + + @negative @authentication + Scenario: Failed login with invalid password + Given I have a valid username "testuser" + And I have an invalid password "wrongpassword" + When I enter my credentials + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page + + @edge-case + Scenario Outline: Login attempts with various credentials + Given I have username "" + And I have password "" + When I enter my credentials + And I click the login button + Then I should see result "" + + Examples: + | username | password | result | + | admin | admin123 | Dashboard | + | testuser | test123 | Dashboard | + | invalid | invalid123 | Invalid credentials | + | empty | | Password required | diff --git a/verify_gherkin_parsing.py b/verify_gherkin_parsing.py new file mode 100644 index 0000000..51db0e2 --- /dev/null +++ b/verify_gherkin_parsing.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Verification script for gherkin-official library parsing capabilities. +This script tests the parsing of .feature files and displays the parsed structure. +""" + +import json +from pathlib import Path +from gherkin.parser import Parser +from gherkin.token_scanner import TokenScanner + + +def parse_feature_file(feature_path: Path): + """Parse a Gherkin .feature file and return the parsed document.""" + print(f"\n{'='*80}") + print(f"Parsing: {feature_path.name}") + print(f"{'='*80}\n") + + try: + # Read the feature file + with open(feature_path, "r", encoding="utf-8") as f: + feature_text = f.read() + + # Parse using gherkin-official + parser = Parser() + token_scanner = TokenScanner(feature_text) + gherkin_document = parser.parse(token_scanner) + + # Display parsed structure + print("✓ Successfully parsed feature file!\n") + + # Extract key information + feature = gherkin_document.get("feature") + if feature: + print(f"Feature Name: {feature.get('name')}") + print(f"Description: {feature.get('description', 'N/A')}") + print(f"Language: {feature.get('language', 'en')}") + print(f"Tags: {[tag['name'] for tag in feature.get('tags', [])]}") + + # Count scenarios + scenarios = [child for child in feature.get("children", []) if child.get("scenario")] + scenario_outlines = [child for child in feature.get("children", []) if child.get("scenarioOutline")] + background = [child for child in feature.get("children", []) if child.get("background")] + + print(f"\nStructure:") + print(f" - Background: {len(background)}") + print(f" - Scenarios: {len(scenarios)}") + print(f" - Scenario Outlines: {len(scenario_outlines)}") + + # Display scenarios + print(f"\nScenarios Found:") + for idx, child in enumerate(feature.get("children", []), 1): + if child.get("scenario"): + scenario = child["scenario"] + tags = [tag["name"] for tag in scenario.get("tags", [])] + steps = scenario.get("steps", []) + print(f" {idx}. {scenario.get('name')} (Tags: {tags})") + print(f" Steps: {len(steps)}") + elif child.get("scenarioOutline"): + outline = child["scenarioOutline"] + tags = [tag["name"] for tag in outline.get("tags", [])] + examples = outline.get("examples", []) + print(f" {idx}. {outline.get('name')} (Outline, Tags: {tags})") + print(f" Examples rows: {len(examples[0].get('tableBody', [])) if examples else 0}") + + # Display full parsed document (formatted JSON) + print(f"\n{'-'*80}") + print("Full Parsed Document (JSON):") + print(f"{'-'*80}") + print(json.dumps(gherkin_document, indent=2)) + + return gherkin_document + + except Exception as e: + print(f"✗ Error parsing feature file: {e}") + raise + + +def main(): + """Main function to test gherkin parsing.""" + print("\n" + "=" * 80) + print("GHERKIN-OFFICIAL LIBRARY VERIFICATION") + print("=" * 80) + + # Test with the sample login feature + feature_path = Path(__file__).parent / "tests" / "test_data" / "FEATURE" / "sample_login.feature" + + if not feature_path.exists(): + print(f"\n✗ Feature file not found: {feature_path}") + return 1 + + try: + gherkin_doc = parse_feature_file(feature_path) + + print(f"\n{'='*80}") + print("✓ VERIFICATION SUCCESSFUL!") + print(f"{'='*80}") + print("\nKey Findings:") + print(" - gherkin-official library is working correctly") + print(" - Feature files can be parsed successfully") + print(" - Scenarios, steps, tags, and examples are extracted properly") + print(" - Ready for integration into TRCLI parser") + + return 0 + + except Exception as e: + print(f"\n{'='*80}") + print("✗ VERIFICATION FAILED!") + print(f"{'='*80}") + print(f"\nError: {e}") + return 1 + + +if __name__ == "__main__": + exit(main()) From c3b0749b9d8d1ee66107732ff90eb16e7a38ba0b Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 6 Nov 2025 14:50:12 +0800 Subject: [PATCH 03/33] TRCLI-191 Add initial gherkin parser to TRCLI --- README.md | 4 +- tests/test_data/cli_test_data.py | 16 +- tests/test_gherkin_parser.py | 179 +++++++++++++++++++ trcli/commands/cmd_parse_gherkin.py | 166 +++++++++++++++++ trcli/constants.py | 32 ++-- trcli/readers/gherkin_parser.py | 268 ++++++++++++++++++++++++++++ 6 files changed, 642 insertions(+), 23 deletions(-) create mode 100644 tests/test_gherkin_parser.py create mode 100644 trcli/commands/cmd_parse_gherkin.py create mode 100644 trcli/readers/gherkin_parser.py diff --git a/README.md b/README.md index adda450..5576efb 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ TestRail CLI v1.13.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) - - parse_bdd: Gherkin .feature files + - parse_gherkin: Gherkin .feature files (BDD) - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new empty test run @@ -86,7 +86,7 @@ Commands: add_run Add a new test run in TestRail labels Manage labels in TestRail parse_junit Parse JUnit report and upload results to TestRail - parse_bdd Parse Gherkin .feature files and upload results to TestRail + parse_gherkin Parse Gherkin .feature files and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail references Manage references in TestRail diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index 756feae..a346dd7 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -62,11 +62,15 @@ "key": "key_from_custom_config", } -trcli_description = ('Supported and loaded modules:\n' - ' - parse_junit: JUnit XML Files (& Similar)\n' - ' - parse_robot: Robot Framework XML Files\n' - ' - parse_openapi: OpenAPI YML Files\n' - ' - add_run: Create a new test run\n' - ' - labels: Manage labels (projects, cases, and tests)\n') +trcli_description = ( + "Supported and loaded modules:\n" + " - parse_junit: JUnit XML Files (& Similar)\n" + " - parse_gherkin: Gherkin .feature files (BDD)\n" + " - parse_robot: Robot Framework XML Files\n" + " - parse_openapi: OpenAPI YML Files\n" + " - add_run: Create a new test run\n" + " - labels: Manage labels (add, update, delete, list)\n" + " - references: Manage references (cases and runs)\n" +) trcli_help_description = "TestRail CLI" diff --git a/tests/test_gherkin_parser.py b/tests/test_gherkin_parser.py new file mode 100644 index 0000000..496a4bc --- /dev/null +++ b/tests/test_gherkin_parser.py @@ -0,0 +1,179 @@ +import pytest +from pathlib import Path +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser +from trcli.readers.gherkin_parser import GherkinParser + + +class TestGherkinParser: + """Tests for Gherkin .feature file parser""" + + @pytest.fixture + def sample_feature_path(self): + """Path to the sample login feature file""" + return Path(__file__).parent / "test_data" / "FEATURE" / "sample_login.feature" + + @pytest.fixture + def environment(self, sample_feature_path): + """Create a test environment""" + env = Environment() + env.file = str(sample_feature_path) + env.case_matcher = MatchersParser.AUTO + env.suite_name = None + env.verbose = False + return env + + @pytest.mark.parse_gherkin + def test_gherkin_parser_sample_file(self, environment, sample_feature_path): + """Test parsing of sample_login.feature""" + # Ensure file exists + assert sample_feature_path.exists(), f"Sample file not found: {sample_feature_path}" + + # Create parser and parse + parser = GherkinParser(environment) + suites = parser.parse_file() + + # Verify structure + assert suites is not None + assert len(suites) == 1, "Should parse into exactly one suite" + + suite = suites[0] + assert suite.name == "User Login" + assert suite.source == "sample_login.feature" + + # Check sections + assert len(suite.testsections) == 1 + section = suite.testsections[0] + assert section.name == "User Login" + + # Check background stored as property + assert section.properties is not None + assert len(section.properties) > 0 + background_prop = section.properties[0] + assert background_prop.name == "background" + assert "the application is running" in background_prop.value + + # Check test cases (should have expanded scenario outline) + # Expected: 2 regular scenarios + 4 scenario outline examples = 6 total + assert len(section.testcases) >= 2, "Should have at least 2 test cases" + + # Verify first test case structure + first_case = section.testcases[0] + assert first_case.title is not None + assert first_case.custom_automation_id is not None + assert first_case.result is not None + assert len(first_case.result.custom_step_results) > 0 + + @pytest.mark.parse_gherkin + def test_gherkin_parser_scenario_parsing(self, environment, sample_feature_path): + """Test that scenarios are correctly parsed with steps""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + test_cases = section.testcases + + # Find the "Successful login" scenario + successful_login_case = None + for case in test_cases: + if "Successful login" in case.title: + successful_login_case = case + break + + assert successful_login_case is not None, "Should find 'Successful login' test case" + + # Verify steps + steps = successful_login_case.result.custom_step_results + assert len(steps) == 6, "Successful login scenario should have 6 steps" + + # Check first step + first_step = steps[0] + assert "Given" in first_step.content + assert "valid username" in first_step.content + + @pytest.mark.parse_gherkin + def test_gherkin_parser_tags_in_automation_id(self, environment, sample_feature_path): + """Test that tags are included in automation ID""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + test_cases = section.testcases + + # Find a case with tags + tagged_case = None + for case in test_cases: + if "@smoke" in case.custom_automation_id or "@authentication" in case.custom_automation_id: + tagged_case = case + break + + assert tagged_case is not None, "Should find a test case with tags in automation_id" + assert "@" in tagged_case.custom_automation_id, "Automation ID should contain tags" + + @pytest.mark.parse_gherkin + def test_gherkin_parser_scenario_outline_expansion(self, environment, sample_feature_path): + """Test that Scenario Outlines are expanded into multiple test cases""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + test_cases = section.testcases + + # Find scenario outline examples + outline_examples = [case for case in test_cases if "Example" in case.title] + + assert len(outline_examples) >= 4, "Should have at least 4 example cases from Scenario Outline" + + # Verify example case has parameters + example_case = outline_examples[0] + assert "example_params" in example_case.case_fields + assert example_case.result is not None + + @pytest.mark.parse_gherkin + def test_gherkin_parser_with_custom_suite_name(self, environment, sample_feature_path): + """Test parser with custom suite name""" + environment.suite_name = "Custom Suite Name" + + parser = GherkinParser(environment) + suites = parser.parse_file() + + assert suites[0].name == "Custom Suite Name" + + @pytest.mark.parse_gherkin + def test_gherkin_parser_case_matcher_name(self, environment, sample_feature_path): + """Test parser with NAME case matcher""" + environment.case_matcher = MatchersParser.NAME + + parser = GherkinParser(environment) + suites = parser.parse_file() + + # Should parse without errors + assert suites is not None + assert len(suites) == 1 + + @pytest.mark.parse_gherkin + def test_gherkin_parser_missing_file(self): + """Test parser with non-existent file""" + env = Environment() + env.file = "nonexistent.feature" + env.case_matcher = MatchersParser.AUTO + + with pytest.raises(FileNotFoundError): + parser = GherkinParser(env) + + @pytest.mark.parse_gherkin + def test_gherkin_parser_all_steps_untested(self, environment, sample_feature_path): + """Test that all steps are marked as untested by default""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + + for test_case in section.testcases: + assert test_case.result.status_id == 3, "Result status should be 3 (Untested)" + for step in test_case.result.custom_step_results: + assert step.status_id == 3, "All steps should be untested (status_id=3)" diff --git a/trcli/commands/cmd_parse_gherkin.py b/trcli/commands/cmd_parse_gherkin.py new file mode 100644 index 0000000..fedcad2 --- /dev/null +++ b/trcli/commands/cmd_parse_gherkin.py @@ -0,0 +1,166 @@ +import json +import click + +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.constants import FAULT_MAPPING +from trcli.readers.gherkin_parser import GherkinParser +from serde import to_dict + + +@click.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "-f", + "--file", + type=click.Path(exists=True), + metavar="", + required=True, + help="Path to Gherkin .feature file to parse.", +) +@click.option("--output", type=click.Path(), metavar="", help="Optional output file path to save parsed JSON.") +@click.option("--pretty", is_flag=True, help="Pretty print JSON output with indentation.") +@click.option( + "--case-matcher", + metavar="", + default="auto", + type=click.Choice(["auto", "name", "property"], case_sensitive=False), + help="Mechanism to match cases between the report and TestRail.", +) +@click.option("--suite-name", metavar="", help="Override suite name (defaults to feature name).") +@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging output.") +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, file: str, output: str, pretty: bool, **kwargs): + """Parse Gherkin .feature files + + This command parses Gherkin/BDD .feature files and converts them into + TestRail data structure format. + + """ + environment.cmd = "parse_gherkin" + environment.file = file + environment.case_matcher = kwargs.get("case_matcher", "auto").upper() + environment.suite_name = kwargs.get("suite_name") + + # Set up logging + if kwargs.get("verbose"): + environment.verbose = True + + try: + # Parse the feature file + if environment.verbose: + environment.log(f"Starting Gherkin parser for file: {file}") + + parser = GherkinParser(environment) + parsed_suites = parser.parse_file() + + # Convert to dictionary format (manual serialization to include skipped fields) + suites_data = [] + for suite in parsed_suites: + # Manually serialize the suite to include testsections + sections_data = [] + for section in suite.testsections: + # Manually serialize test cases + cases_data = [] + for case in section.testcases: + case_dict = { + "title": case.title, + "case_id": case.case_id, + "custom_automation_id": case.custom_automation_id, + "case_fields": case.case_fields, + } + # Include result if present + if case.result: + result_data = { + "status_id": case.result.status_id, + "comment": case.result.comment, + "elapsed": case.result.elapsed, + } + # Include steps + if case.result.custom_step_results: + steps_data = [] + for step in case.result.custom_step_results: + steps_data.append( + { + "content": step.content, + "status_id": step.status_id if hasattr(step, "status_id") else None, + } + ) + result_data["custom_step_results"] = steps_data + case_dict["result"] = result_data + cases_data.append(case_dict) + + # Serialize properties + properties_data = [] + if section.properties: + for prop in section.properties: + properties_data.append( + { + "name": prop.name, + "value": prop.value, + } + ) + + section_dict = { + "name": section.name, + "testcases": cases_data, + "properties": properties_data, + } + sections_data.append(section_dict) + + suite_dict = { + "name": suite.name, + "source": suite.source, + "testsections": sections_data, + } + suites_data.append(suite_dict) + + # Prepare JSON output + output_data = { + "suites": suites_data, + "summary": { + "total_suites": len(suites_data), + "total_sections": sum(len(suite.get("testsections", [])) for suite in suites_data), + "total_cases": sum( + len(section.get("testcases", [])) + for suite in suites_data + for section in suite.get("testsections", []) + ), + "source_file": file, + }, + } + + # Format JSON + if pretty: + json_output = json.dumps(output_data, indent=2, ensure_ascii=False) + else: + json_output = json.dumps(output_data, ensure_ascii=False) + + # Output results + if output: + # Save to file + with open(output, "w", encoding="utf-8") as f: + f.write(json_output) + environment.log(f"✓ Parsed results saved to: {output}") + environment.log(f" Total suites: {output_data['summary']['total_suites']}") + environment.log(f" Total sections: {output_data['summary']['total_sections']}") + environment.log(f" Total test cases: {output_data['summary']['total_cases']}") + else: + # Print to stdout + print(json_output) + + if environment.verbose: + environment.log("✓ Gherkin parsing completed successfully") + + except FileNotFoundError: + environment.elog(FAULT_MAPPING["missing_file"]) + exit(1) + except ValueError as e: + environment.elog(f"Error parsing Gherkin file: {str(e)}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error during parsing: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/constants.py b/trcli/constants.py index 0dc9fec..858c556 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -5,9 +5,7 @@ missing_file="Please provide a valid path to your results file with the -f argument.", ) -PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict( - missing_title="Please give your Test Run a title using the --title argument." -) +PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict(missing_title="Please give your Test Run a title using the --title argument.") ADD_RUN_FAULT_MAPPING = dict( missing_title="Please give your Test Run a title using the --title argument.", @@ -58,23 +56,24 @@ f" - System Name: automation_id\n" f" - Type: Text (or String)\n" f" - Is Active: True", - proxy_connection_error= "Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.", - proxy_authentication_failed= "Proxy authentication failed for proxy. Please verify the username and password.", - proxy_timeout= "The connection to the proxy server timed out. Please try again later or check the proxy server's availability.", - proxy_bypass_error= "Failed to bypass the proxy for host. Please check the settings.", - proxy_invalid_configuration= "The provided proxy configuration is invalid. Please check the proxy URL and format.", - ssl_error_on_proxy= "SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", - no_proxy_match_error= "The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", - no_suites_found= "The project {project_id} does not have any suites.", - invalid_json_response= "Received invalid response from TestRail server (HTTP {status_code}). " + proxy_connection_error="Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.", + proxy_authentication_failed="Proxy authentication failed for proxy. Please verify the username and password.", + proxy_timeout="The connection to the proxy server timed out. Please try again later or check the proxy server's availability.", + proxy_bypass_error="Failed to bypass the proxy for host. Please check the settings.", + proxy_invalid_configuration="The provided proxy configuration is invalid. Please check the proxy URL and format.", + ssl_error_on_proxy="SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", + no_proxy_match_error="The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", + no_suites_found="The project {project_id} does not have any suites.", + invalid_json_response="Received invalid response from TestRail server (HTTP {status_code}). " "Please verify your TestRail host URL (-h) is correct and points to a valid TestRail instance. " "Response preview: {response_preview}", - invalid_api_response= "Invalid response from TestRail API: {error_details}" + invalid_api_response="Invalid response from TestRail API: {error_details}", ) COMMAND_FAULT_MAPPING = dict( add_run=dict(**FAULT_MAPPING, **ADD_RUN_FAULT_MAPPING), parse_junit=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), + parse_gherkin=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), parse_openapi=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), parse_robot=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), labels=dict(**FAULT_MAPPING), @@ -97,10 +96,12 @@ Copyright 2025 Gurock Software GmbH - www.gurock.com""" TOOL_USAGE = f"""Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) + - parse_gherkin: Gherkin .feature files (BDD) - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run - - labels: Manage labels (projects, cases, and tests)""" + - labels: Manage labels (add, update, delete, list) + - references: Manage references (cases and runs)""" MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. \nError: Missing command.""" @@ -128,6 +129,7 @@ class RevertMessages: run_deleted = "Deleted created run" run_not_deleted = "Unable to delete created run: {error}" + OLD_SYSTEM_NAME_AUTOMATION_ID = "custom_automation_id" # field name mismatch on testrail side (can not reproduce in cloud version TestRail v9.1.2) -UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id" \ No newline at end of file +UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id" diff --git a/trcli/readers/gherkin_parser.py b/trcli/readers/gherkin_parser.py new file mode 100644 index 0000000..fe980b4 --- /dev/null +++ b/trcli/readers/gherkin_parser.py @@ -0,0 +1,268 @@ +from pathlib import Path +from beartype.typing import List, Dict, Any, Optional +from gherkin.parser import Parser +from gherkin.token_scanner import TokenScanner + +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser, TestRailCaseFieldsOptimizer +from trcli.data_classes.dataclass_testrail import ( + TestRailCase, + TestRailSuite, + TestRailSection, + TestRailProperty, + TestRailResult, + TestRailSeparatedStep, +) +from trcli.readers.file_parser import FileParser + + +class GherkinParser(FileParser): + """Parser for Gherkin .feature files""" + + def __init__(self, environment: Environment): + super().__init__(environment) + self.case_matcher = environment.case_matcher + + def parse_file(self) -> List[TestRailSuite]: + """Parse a Gherkin .feature file and convert to TestRailSuite structure""" + self.env.log(f"Parsing Gherkin feature file: {self.filename}") + + # Read and parse the feature file + with open(self.filepath, "r", encoding="utf-8") as f: + feature_text = f.read() + + parser = Parser() + scanner = TokenScanner(feature_text) + gherkin_document = parser.parse(scanner) + + # Extract feature + feature = gherkin_document.get("feature") + if not feature: + raise ValueError("No feature found in the Gherkin file") + + # Parse feature into TestRail structure + suite_name = self.env.suite_name if self.env.suite_name else feature.get("name", self.filepath.stem) + sections = self._parse_feature_children(feature) + + cases_count = sum(len(section.testcases) for section in sections) + self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") + + testrail_suite = TestRailSuite( + name=suite_name, + testsections=sections, + source=self.filename, + ) + + return [testrail_suite] + + def _parse_feature_children(self, feature: Dict[str, Any]) -> List[TestRailSection]: + """Parse feature children (Background, Scenarios, Scenario Outlines) into sections""" + sections = [] + background_steps = None + + # First pass: extract background if present + for child in feature.get("children", []): + if "background" in child: + background_steps = self._extract_steps(child["background"]) + break + + # Group scenarios into a single section (using feature name) + feature_name = feature.get("name", "Feature") + section = TestRailSection(name=feature_name, testcases=[]) + + # Store background as section property if exists + if background_steps: + background_text = "\n".join([f"{step['keyword']}{step['text']}" for step in background_steps]) + section.properties = [TestRailProperty(name="background", value=background_text)] + + # Second pass: process scenarios + for child in feature.get("children", []): + if "scenario" in child: + scenario = child["scenario"] + # Check if it's a Scenario Outline + if scenario.get("keyword") == "Scenario Outline": + # Expand scenario outline into multiple test cases + test_cases = self._parse_scenario_outline(scenario, feature_name) + section.testcases.extend(test_cases) + else: + # Regular scenario + test_case = self._parse_scenario(scenario, feature_name) + if test_case: + section.testcases.append(test_case) + + if section.testcases: + sections.append(section) + + return sections + + def _parse_scenario(self, scenario: Dict[str, Any], feature_name: str) -> Optional[TestRailCase]: + """Parse a single Gherkin scenario into a TestRailCase""" + scenario_name = scenario.get("name", "Untitled Scenario") + tags = self._extract_tags(scenario) + steps = self._extract_steps(scenario) + + # Extract case ID if using name or property matcher + case_id = None + if self.case_matcher == MatchersParser.NAME: + case_id, scenario_name = MatchersParser.parse_name_with_id(scenario_name) + elif self.case_matcher == MatchersParser.PROPERTY: + # Look for @C tag pattern + for tag in tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + case_id = int(tag[2:]) + break + except ValueError: + pass + + # Create automation ID from feature, tags, and scenario name + # Format: "feature_name.@tag1.@tag2.scenario_name" + tag_part = ".".join(tags) if tags else "" + automation_id = f"{feature_name}.{tag_part}.{scenario_name}" if tag_part else f"{feature_name}.{scenario_name}" + + # Convert Gherkin steps to TestRail separated steps + step_results = [] + for step in steps: + step_content = f"{step['keyword']}{step['text']}" + tr_step = TestRailSeparatedStep(content=step_content) + tr_step.status_id = 3 # Untested by default + step_results.append(tr_step) + + # Create result object + result = TestRailResult( + case_id=case_id, + status_id=3, # Untested (no execution results yet) + comment=f"Gherkin scenario with {len(steps)} steps", + custom_step_results=step_results, + ) + + # Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=case_id, + result=result, + custom_automation_id=automation_id, + case_fields={"tags": ", ".join(tags)} if tags else {}, + ) + + return test_case + + def _parse_scenario_outline(self, scenario_outline: Dict[str, Any], feature_name: str) -> List[TestRailCase]: + """Parse a Scenario Outline into multiple TestRailCases (one per example row)""" + test_cases = [] + outline_name = scenario_outline.get("name", "Untitled Outline") + tags = self._extract_tags(scenario_outline) + steps = self._extract_steps(scenario_outline) + examples = scenario_outline.get("examples", []) + + if not examples: + # No examples, treat as regular scenario + test_case = self._parse_scenario(scenario_outline, feature_name) + if test_case: + return [test_case] + + # Process each example table + for example_table in examples: + table_header = example_table.get("tableHeader", {}) + table_body = example_table.get("tableBody", []) + + # Get column names from header + header_cells = table_header.get("cells", []) + column_names = [cell.get("value", "") for cell in header_cells] + + # Create a test case for each row + for row_idx, row in enumerate(table_body, start=1): + row_cells = row.get("cells", []) + row_values = [cell.get("value", "") for cell in row_cells] + + # Create parameter mapping + params = dict(zip(column_names, row_values)) + + # Replace placeholders in scenario name + scenario_name = self._replace_placeholders(outline_name, params) + scenario_name = f"{outline_name} [Example {row_idx}]" + + # Replace placeholders in steps + instantiated_steps = [] + for step in steps: + step_text = self._replace_placeholders(step["text"], params) + instantiated_steps.append( + {"keyword": step["keyword"], "text": step_text, "keywordType": step.get("keywordType")} + ) + + # Create automation ID + tag_part = ".".join(tags) if tags else "" + automation_id = ( + f"{feature_name}.{tag_part}.{outline_name}.example_{row_idx}" + if tag_part + else f"{feature_name}.{outline_name}.example_{row_idx}" + ) + + # Convert steps to TestRail format + step_results = [] + for step in instantiated_steps: + step_content = f"{step['keyword']}{step['text']}" + tr_step = TestRailSeparatedStep(content=step_content) + tr_step.status_id = 3 # Untested + step_results.append(tr_step) + + # Create result + result = TestRailResult( + case_id=None, + status_id=3, + comment=f"Scenario Outline example {row_idx}: {params}", + custom_step_results=step_results, + ) + + # Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=None, + result=result, + custom_automation_id=automation_id, + case_fields=( + {"tags": ", ".join(tags), "example_params": str(params)} + if tags + else {"example_params": str(params)} + ), + ) + + test_cases.append(test_case) + + return test_cases + + @staticmethod + def _extract_tags(scenario: Dict[str, Any]) -> List[str]: + """Extract tags from a scenario""" + tags = [] + for tag in scenario.get("tags", []): + tag_name = tag.get("name", "") + if tag_name: + tags.append(tag_name) + return tags + + @staticmethod + def _extract_steps(scenario_or_background: Dict[str, Any]) -> List[Dict[str, Any]]: + """Extract steps from a scenario or background""" + steps = [] + for step in scenario_or_background.get("steps", []): + steps.append( + { + "keyword": step.get("keyword", ""), + "text": step.get("text", ""), + "keywordType": step.get("keywordType", ""), + } + ) + return steps + + @staticmethod + def _replace_placeholders(text: str, params: Dict[str, str]) -> str: + """Replace with actual values from params""" + result = text + for key, value in params.items(): + result = result.replace(f"<{key}>", value) + return result From 1d81eaca7c01f9b61adace23133e25530db7253f Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 27 Nov 2025 12:30:05 +0800 Subject: [PATCH 04/33] TRCLI-193 Added new bdd commands, import and export gherkin, parse gherkin and cucumber json --- trcli/api/api_request_handler.py | 957 +++++++++++++++------------ trcli/commands/cmd_export_gherkin.py | 137 ++++ trcli/commands/cmd_import_gherkin.py | 143 ++++ trcli/commands/cmd_parse_cucumber.py | 147 ++++ trcli/constants.py | 8 +- trcli/readers/cucumber_json.py | 493 ++++++++++++++ 6 files changed, 1447 insertions(+), 438 deletions(-) create mode 100644 trcli/commands/cmd_export_gherkin.py create mode 100644 trcli/commands/cmd_import_gherkin.py create mode 100644 trcli/commands/cmd_parse_cucumber.py create mode 100644 trcli/readers/cucumber_json.py diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 3dcd196..b7fb5ff 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -7,7 +7,9 @@ from trcli.cli import Environment from trcli.constants import ( ProjectErrors, - FAULT_MAPPING, OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID, + FAULT_MAPPING, + OLD_SYSTEM_NAME_AUTOMATION_ID, + UPDATED_SYSTEM_NAME_AUTOMATION_ID, ) from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData @@ -33,7 +35,7 @@ def __init__( environment.case_fields, environment.run_description, environment.result_fields, - environment.section_id + environment.section_id, ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) @@ -48,11 +50,11 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: if not response.error_message: fields: List = response.response_text automation_id_field = next( - filter( + filter( lambda x: x["system_name"] in [OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID], - fields + fields, ), - None + None, ) if automation_id_field: if automation_id_field["is_active"] is False: @@ -79,11 +81,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project """ projects_data, error = self.__get_all_projects() if not error: - available_projects = [ - project - for project in projects_data - if project["name"] == project_name - ] + available_projects = [project for project in projects_data if project["name"] == project_name] if len(available_projects) == 1: return ProjectData( @@ -94,9 +92,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project elif len(available_projects) > 1: if project_id in [project["id"] for project in available_projects]: project_index = [ - index - for index, project in enumerate(available_projects) - if project["id"] == project_id + index for index, project in enumerate(available_projects) if project["id"] == project_id ][0] return ProjectData( project_id=int(available_projects[project_index]["id"]), @@ -131,11 +127,7 @@ def check_suite_id(self, project_id: int) -> Tuple[bool, str]: suite_id = self.suites_data_from_provider.suite_id suites_data, error = self.__get_all_suites(project_id) if not error: - available_suites = [ - suite - for suite in suites_data - if suite["id"] == suite_id - ] + available_suites = [suite for suite in suites_data if suite["id"] == suite_id] return ( (True, "") if len(available_suites) > 0 @@ -207,9 +199,7 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_suite/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -224,9 +214,11 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(suite_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(suite_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: @@ -246,20 +238,24 @@ def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: if self.environment.section_id: if section.section_id in sections_by_id.keys(): section_json = sections_by_id[section.section_id] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True if section.name in sections_by_name.keys(): section_json = sections_by_name[section.name] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True self.data_provider.update_data(section_data=section_data) @@ -281,9 +277,7 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_section/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -298,9 +292,11 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(section_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(section_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: @@ -327,12 +323,14 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: aut_id = test_case.custom_automation_id if aut_id in test_cases_by_aut_id.keys(): case = test_cases_by_aut_id[aut_id] - test_case_data.append({ - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id - }) + test_case_data.append( + { + "case_id": case["id"], + "section_id": case["section_id"], + "title": case["title"], + OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, + } + ) else: missing_cases_number += 1 self.data_provider.update_data(case_data=test_case_data) @@ -386,25 +384,25 @@ def add_cases(self) -> Tuple[List[dict], str]: { "case_id": response.response_text["id"], "section_id": response.response_text["section_id"], - "title": response.response_text["title"] + "title": response.response_text["title"], } for response in responses ] return returned_resources, error_message def add_run( - self, - project_id: int, - run_name: str, - milestone_id: int = None, - start_date: str = None, - end_date: str = None, - plan_id: int = None, - config_ids: List[int] = None, - assigned_to_id: int = None, - include_all: bool = False, - refs: str = None, - case_ids: List[int] = None, + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, ) -> Tuple[int, str]: """ Creates a new test run. @@ -432,7 +430,7 @@ def add_run( "name": add_run_data["name"], "suite_id": add_run_data["suite_id"], "config_ids": config_ids, - "runs": [add_run_data] + "runs": [add_run_data], } else: entry_data = add_run_data @@ -440,8 +438,16 @@ def add_run( run_id = response.response_text["runs"][0]["id"] return run_id, response.error_message - def update_run(self, run_id: int, run_name: str, start_date: str = None, - end_date: str = None, milestone_id: int = None, refs: str = None, refs_action: str = 'add') -> Tuple[dict, str]: + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: """ Updates an existing run :run_id: run id @@ -453,12 +459,13 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, run_response.error_message - + existing_description = run_response.response_text.get("description", "") existing_refs = run_response.response_text.get("refs", "") - add_run_data = self.data_provider.add_run(run_name, start_date=start_date, - end_date=end_date, milestone_id=milestone_id) + add_run_data = self.data_provider.add_run( + run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id + ) add_run_data["description"] = existing_description # Retain the current description # Handle references based on action @@ -473,7 +480,7 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, report_case_ids = add_run_data["case_ids"] joint_case_ids = list(set(report_case_ids + run_case_ids)) add_run_data["case_ids"] = joint_case_ids - + plan_id = run_response.response_text["plan_id"] config_ids = run_response.response_text["config_ids"] if not plan_id: @@ -505,29 +512,29 @@ def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> """ if not existing_refs: existing_refs = "" - - if action == 'update': + + if action == "update": # Replace all references with new ones return new_refs - elif action == 'delete': + elif action == "delete": if not new_refs: # Delete all references return "" else: # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + refs_to_delete = [ref.strip() for ref in new_refs.split(",") if ref.strip()] updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ','.join(updated_list) + return ",".join(updated_list) else: # action == 'add' (default) # Add new references to existing ones if not existing_refs: return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + new_list = [ref.strip() for ref in new_refs.split(",") if ref.strip()] # Avoid duplicates combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ','.join(combined_list) + return ",".join(combined_list) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: """ @@ -540,11 +547,11 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, [], [], run_response.error_message - + existing_refs = run_response.response_text.get("refs", "") or "" - + # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] # Deduplicate input references new_list = [] seen = set() @@ -553,28 +560,33 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic if ref_clean and ref_clean not in seen: new_list.append(ref_clean) seen.add(ref_clean) - + # Determine which references are new vs duplicates added_refs = [ref for ref in new_list if ref not in existing_list] skipped_refs = [ref for ref in new_list if ref in existing_list] - + # If no new references to add, return current state if not added_refs: return run_response.response_text, added_refs, skipped_refs, None - + # Combine references combined_list = existing_list + added_refs - combined_refs = ','.join(combined_list) - + combined_refs = ",".join(combined_list) + if len(combined_refs) > 250: - return None, [], [], f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit" - + return ( + None, + [], + [], + f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit", + ) + update_data = {"refs": combined_refs} - + # Determine the correct API endpoint based on plan membership plan_id = run_response.response_text.get("plan_id") config_ids = run_response.response_text.get("config_ids") - + if not plan_id: # Standalone run update_response = self.client.send_post(f"update_run/{run_id}", update_data) @@ -586,7 +598,7 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic plan_response = self.client.send_get(f"get_plan/{plan_id}") if plan_response.error_message: return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - + # Find the entry_id for this run entry_id = None for entry in plan_response.response_text.get("entries", []): @@ -596,19 +608,21 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic break if entry_id: break - + if not entry_id: return None, [], [], f"Could not find plan entry for run {run_id}" - + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - + if update_response.error_message: return None, [], [], update_response.error_message - + updated_run_response = self.client.send_get(f"get_run/{run_id}") return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message - def update_existing_case_references(self, case_id: int, junit_refs: str, strategy: str = "append") -> Tuple[bool, str, List[str], List[str]]: + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: """ Update existing case references with values from JUnit properties. :param case_id: ID of the test case @@ -618,62 +632,69 @@ def update_existing_case_references(self, case_id: int, junit_refs: str, strateg """ if not junit_refs or not junit_refs.strip(): return True, None, [], [] # No references to process - + # Parse and validate JUnit references, deduplicating input junit_ref_list = [] seen = set() - for ref in junit_refs.split(','): + for ref in junit_refs.split(","): ref_clean = ref.strip() if ref_clean and ref_clean not in seen: junit_ref_list.append(ref_clean) seen.add(ref_clean) - + if not junit_ref_list: return False, "No valid references found in JUnit property", [], [] - + # Get current case data case_response = self.client.send_get(f"get_case/{case_id}") if case_response.error_message: return False, case_response.error_message, [], [] - - existing_refs = case_response.response_text.get('refs', '') or '' - + + existing_refs = case_response.response_text.get("refs", "") or "" + if strategy == "replace": # Replace strategy: use JUnit refs as-is - new_refs = ','.join(junit_ref_list) + new_refs = ",".join(junit_ref_list) added_refs = junit_ref_list skipped_refs = [] else: # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] - + existing_ref_list = ( + [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] + ) + # Determine which references are new vs duplicates added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - + # If no new references to add, return current state if not added_refs: return True, None, added_refs, skipped_refs - + # Combine references combined_list = existing_ref_list + added_refs - new_refs = ','.join(combined_list) - + new_refs = ",".join(combined_list) + # Validate 2000 character limit for test case references if len(new_refs) > 2000: - return False, f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", [], [] - + return ( + False, + f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", + [], + [], + ) + # Update the case update_data = {"refs": new_refs} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.error_message: return False, update_response.error_message, [], [] - + return True, None, added_refs, skipped_refs def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """ Getting test result id and upload attachments for it. """ + """Getting test result id and upload attachments for it.""" tests_in_run, error = self.__get_all_tests_in_run(run_id) if not error: for report_result in report_results: @@ -698,26 +719,18 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: responses = [] error_message = "" # Get pre-validated user IDs if available - user_ids = getattr(self.environment, '_validated_user_ids', []) - - add_results_data_chunks = self.data_provider.add_results_for_cases( - self.environment.batch_size, user_ids - ) + user_ids = getattr(self.environment, "_validated_user_ids", []) + + add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) # Get assigned count from data provider - assigned_count = getattr(self.data_provider, '_assigned_count', 0) - - results_amount = sum( - [len(results["results"]) for results in add_results_data_chunks] - ) + assigned_count = getattr(self.data_provider, "_assigned_count", 0) - with self.environment.get_progress_bar( - results_amount=results_amount, prefix="Adding results" - ) as progress_bar: + results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) + + with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: futures = { - executor.submit( - self.client.send_post, f"add_results_for_cases/{run_id}", body - ): body + executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body for body in add_results_data_chunks } responses, error_message = self.handle_futures( @@ -730,11 +743,7 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: # Iterate through futures to get all responses from done tasks (not cancelled) responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) responses = [response.response_text for response in responses] - results = [ - result - for results_list in responses - for result in results_list - ] + results = [result for results_list in responses for result in results_list] report_results_w_attachments = [] for results_data_chunk in add_results_data_chunks: for test_result in results_data_chunk["results"]: @@ -744,22 +753,22 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: attachments_count = 0 for result in report_results_w_attachments: attachments_count += len(result["attachments"]) - self.environment.log(f"Uploading {attachments_count} attachments " - f"for {len(report_results_w_attachments)} test results.") + self.environment.log( + f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." + ) self.upload_attachments(report_results_w_attachments, results, run_id) else: self.environment.log(f"No attachments found to upload.") - + # Log assignment results if assignment was performed if user_ids: - total_failed = getattr(self.data_provider, '_total_failed_count', assigned_count) + total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) if assigned_count > 0: self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") else: self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + return responses, error_message, progress_bar.n def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -776,9 +785,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st if action_string == "add_case": arguments = arguments.to_dict() arguments.pop("case_id") - if not self.response_verifier.verify_returned_data( - arguments, response.response_text - ): + if not self.response_verifier.verify_returned_data(arguments, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] self.__cancel_running_futures(futures, action_string) @@ -786,9 +793,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st progress_bar.update(1) else: error_message = response.error_message - self.environment.log( - f"\nError during {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nError during {action_string}. Trying to cancel scheduled tasks.") self.__cancel_running_futures(futures, action_string) break else: @@ -826,9 +831,7 @@ def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: responses = [] error_message = "" for section in added_sections: - response = self.client.send_post( - f"delete_section/{section['section_id']}", payload={} - ) + response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) if not response.error_message: responses.append(response.response_text) else: @@ -868,45 +871,52 @@ def retrieve_results_after_cancelling(futures) -> list: def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ Validates a user email and returns the user ID if valid. - + :param email: User email to validate :returns: Tuple with user ID (or None if not found) and error message """ if not email or not email.strip(): return None, "Email cannot be empty" - + email = email.strip() # Use proper URL encoding for the query parameter import urllib.parse + encoded_email = urllib.parse.quote_plus(email) response = self.client.send_get(f"get_user_by_email&email={encoded_email}") - + if response.error_message: # Map TestRail's email validation error to our expected format if "Field :email is not a valid email address" in response.error_message: return None, f"User not found: {email}" return None, response.error_message - + if response.status_code == 200: try: user_data = response.response_text - if isinstance(user_data, dict) and 'id' in user_data: - return user_data['id'], "" + if isinstance(user_data, dict) and "id" in user_data: + return user_data["id"], "" else: return None, f"Invalid response format for user: {email}" except (KeyError, TypeError): return None, f"Invalid response format for user: {email}" elif response.status_code == 400: # Check if the response contains the email validation error - if (hasattr(response, 'response_text') and response.response_text and - isinstance(response.response_text, dict) and - "Field :email is not a valid email address" in str(response.response_text.get('error', ''))): + if ( + hasattr(response, "response_text") + and response.response_text + and isinstance(response.response_text, dict) + and "Field :email is not a valid email address" in str(response.response_text.get("error", "")) + ): return None, f"User not found: {email}" return None, f"User not found: {email}" else: # For other status codes, check if it's the email validation error - if (hasattr(response, 'response_text') and response.response_text and - "Field :email is not a valid email address" in str(response.response_text)): + if ( + hasattr(response, "response_text") + and response.response_text + and "Field :email is not a valid email address" in str(response.response_text) + ): return None, f"User not found: {email}" return None, f"API error (status {response.status_code}) when validating user: {email}" @@ -925,9 +935,7 @@ def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: return response def __cancel_running_futures(self, futures, action_string): - self.environment.log( - f"\nAborting: {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") for future in futures: future.cancel() @@ -936,33 +944,33 @@ def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], s Get all cases from all pages """ if suite_id is None: - return self.__get_all_entities('cases', f"get_cases/{project_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}") else: - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}") def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ Get all sections from all pages """ - return self.__get_all_entities('sections', f"get_sections/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}") def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ Get all tests from all pages """ - return self.__get_all_entities('tests', f"get_tests/{run_id}") + return self.__get_all_entities("tests", f"get_tests/{run_id}") def __get_all_projects(self) -> Tuple[List[dict], str]: """ Get all projects from all pages """ - return self.__get_all_entities('projects', f"get_projects") + return self.__get_all_entities("projects", f"get_projects") def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ Get all suites from all pages """ - return self.__get_all_entities('suites', f"get_suites/{project_id}") + return self.__get_all_entities("suites", f"get_suites/{project_id}") def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ @@ -979,9 +987,7 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ return response.response_text, response.error_message # Check if response is a string (JSON parse failed) if isinstance(response.response_text, str): - error_msg = FAULT_MAPPING["invalid_api_response"].format( - error_details=response.response_text[:200] - ) + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) return [], error_msg # Endpoints with pagination entities = entities + response.response_text[entity] @@ -1002,7 +1008,7 @@ def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: :returns: Tuple with created label data and error string """ # Use multipart/form-data like the working CURL command - files = {'title': (None, title)} + files = {"title": (None, title)} response = self.client.send_post(f"add_label/{project_id}", payload=None, files=files) return response.response_text, response.error_message @@ -1016,8 +1022,8 @@ def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict """ # Use multipart/form-data like add_label files = { - 'project_id': (None, str(project_id)), - 'title': (None, title) # Field name is 'title' (no colon) for form data + "project_id": (None, str(project_id)), + "title": (None, title), # Field name is 'title' (no colon) for form data } response = self.client.send_post(f"update_label/{label_id}", payload=None, files=files) return response.response_text, response.error_message @@ -1044,11 +1050,11 @@ def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tupl params.append(f"offset={offset}") if limit != 250: params.append(f"limit={limit}") - + url = f"get_labels/{project_id}" if params: url += "&" + "&".join(params) - + response = self.client.send_get(url) return response.response_text, response.error_message @@ -1070,16 +1076,19 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: """ # Send as form data with JSON array format import json + label_ids_json = json.dumps(label_ids) files = {"label_ids": (None, label_ids_json)} response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message - def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: + def add_labels_to_cases( + self, case_ids: List[int], title: str, project_id: int, suite_id: int = None + ) -> Tuple[dict, str]: """ Add a label to multiple test cases - + :param case_ids: List of test case IDs :param title: Label title (max 20 characters) :param project_id: Project ID for validation @@ -1087,122 +1096,113 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_cases': [], - 'failed_cases': [], - 'max_labels_reached': [], - 'case_not_found': [] - } - + results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} + # Check if project is multi-suite by getting all cases without suite_id all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) if error_message: return results, error_message - + # Check if project has multiple suites suite_ids = set() for case in all_cases_no_suite: - if 'suite_id' in case and case['suite_id']: - suite_ids.add(case['suite_id']) - + if "suite_id" in case and case["suite_id"]: + suite_ids.add(case["suite_id"]) + # If project has multiple suites and no suite_id provided, require it if len(suite_ids) > 1 and suite_id is None: return results, "This project is multisuite, suite id is required" - + # Get all cases to validate that the provided case IDs exist all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return results, error_message - + # Create a set of existing case IDs for quick lookup - existing_case_ids = {case['id'] for case in all_cases} - + existing_case_ids = {case["id"] for case in all_cases} + # Validate case IDs and separate valid from invalid ones invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - + # Record invalid case IDs for case_id in invalid_case_ids: - results['case_not_found'].append(case_id) - + results["case_not_found"].append(case_id) + # If no valid case IDs, return early if not valid_case_ids: return results, "" - + # Check if label exists or create it existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + # Collect case data and validate constraints cases_to_update = [] for case_id in valid_case_ids: # Get current case to check existing labels case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: - results['failed_cases'].append({ - 'case_id': case_id, - 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" - }) + results["failed_cases"].append( + {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} + ) continue - + case_data = case_response.response_text - current_labels = case_data.get('labels', []) - + current_labels = case_data.get("labels", []) + # Check if label already exists on this case - if any(label.get('id') == label_id for label in current_labels): - results['successful_cases'].append({ - 'case_id': case_id, - 'message': f"Label '{title}' already exists on case {case_id}" - }) + if any(label.get("id") == label_id for label in current_labels): + results["successful_cases"].append( + {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} + ) continue - + # Check maximum labels limit (10) if len(current_labels) >= 10: - results['max_labels_reached'].append(case_id) + results["max_labels_reached"].append(case_id) continue - + # Prepare case for update - existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({ - 'case_id': case_id, - 'labels': updated_label_ids - }) - + cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) + # Update cases using appropriate endpoint if len(cases_to_update) == 1: # Single case: use update_case/{case_id} case_info = cases_to_update[0] - case_update_data = {'labels': case_info['labels']} - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) elif len(cases_to_update) > 1: # Multiple cases: use update_cases/{suite_id} # Need to determine suite_id from the cases @@ -1210,62 +1210,72 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, if not case_suite_id: # Get suite_id from the first case if not provided first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get('suite_id') if first_case else None - + case_suite_id = first_case.get("suite_id") if first_case else None + if not case_suite_id: # Fall back to individual updates if no suite_id available for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) else: # Batch update using update_cases/{suite_id} batch_update_data = { - 'case_ids': [case_info['case_id'] for case_info in cases_to_update], - 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases + "case_ids": [case_info["case_id"] for case_info in cases_to_update], + "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases } - + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - + if batch_response.status_code == 200: for case_info in cases_to_update: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: # If batch update fails, fall back to individual updates for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) - + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + return results, "" - def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + def get_cases_by_label( + self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None + ) -> Tuple[List[dict], str]: """ Get test cases filtered by label ID or title - + :param project_id: Project ID :param suite_id: Suite ID (optional) :param label_ids: List of label IDs to filter by @@ -1276,234 +1286,228 @@ def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: L all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return [], error_message - + # If filtering by title, first get the label ID target_label_ids = label_ids or [] if label_title and not target_label_ids: labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Filter cases that have any of the target labels matching_cases = [] for case in all_cases: - case_labels = case.get('labels', []) - case_label_ids = [label.get('id') for label in case_labels] - + case_labels = case.get("labels", []) + case_label_ids = [label.get("id") for label in case_labels] + # Check if any of the target label IDs are present in this case if any(label_id in case_label_ids for label_id in target_label_ids): matching_cases.append(case) - + return matching_cases, "" - def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: """ Add labels to multiple tests - + :param test_ids: List of test IDs :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) :param project_id: Project ID for validation :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_tests': [], - 'failed_tests': [], - 'max_labels_reached': [], - 'test_not_found': [] - } - + results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + # Normalize titles to a list if isinstance(titles, str): title_list = [titles] else: title_list = titles - + # At this point, title_list should already be validated by the CLI # Just ensure we have clean titles title_list = [title.strip() for title in title_list if title.strip()] - + if not title_list: return {}, "No valid labels provided" - + # Validate test IDs by getting run information for each test valid_test_ids = [] for test_id in test_ids: # Get test information to validate it exists test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) continue - + test_data = test_response.response_text # Validate that the test belongs to the correct project - run_id = test_data.get('run_id') + run_id = test_data.get("run_id") if run_id: run_response = self.client.send_get(f"get_run/{run_id}") if run_response.status_code == 200: run_data = run_response.response_text - if run_data.get('project_id') == project_id: + if run_data.get("project_id") == project_id: valid_test_ids.append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) - + results["test_not_found"].append(test_id) + # If no valid test IDs, return early if not valid_test_ids: return results, "" - + # Check if labels exist or create them existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Process each title to get/create label IDs label_ids = [] label_id_to_title = {} # Map label IDs to their titles for title in title_list: # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + if label_id: label_ids.append(label_id) label_id_to_title[label_id] = title - + # Collect test data and validate constraints tests_to_update = [] for test_id in valid_test_ids: # Get current test to check existing labels test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['failed_tests'].append({ - 'test_id': test_id, - 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" - }) + results["failed_tests"].append( + {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} + ) continue - + test_data = test_response.response_text - current_labels = test_data.get('labels', []) - current_label_ids = [label.get('id') for label in current_labels if label.get('id')] - + current_labels = test_data.get("labels", []) + current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + new_label_ids = [] already_exists_titles = [] - + for label_id in label_ids: if label_id not in current_label_ids: new_label_ids.append(label_id) else: if label_id in label_id_to_title: already_exists_titles.append(label_id_to_title[label_id]) - + if not new_label_ids: - results['successful_tests'].append({ - 'test_id': test_id, - 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" - }) + results["successful_tests"].append( + { + "test_id": test_id, + "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", + } + ) continue - + # Check maximum labels limit (10) if len(current_label_ids) + len(new_label_ids) > 10: - results['max_labels_reached'].append(test_id) + results["max_labels_reached"].append(test_id) continue - + # Prepare test for update updated_label_ids = current_label_ids + new_label_ids - + new_label_titles = [] for label_id in new_label_ids: if label_id in label_id_to_title: new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append({ - 'test_id': test_id, - 'labels': updated_label_ids, - 'new_labels': new_label_ids, - 'new_label_titles': new_label_titles - }) - + + tests_to_update.append( + { + "test_id": test_id, + "labels": updated_label_ids, + "new_labels": new_label_ids, + "new_label_titles": new_label_titles, + } + ) + # Update tests using appropriate endpoint if len(tests_to_update) == 1: # Single test: use update_test/{test_id} test_info = tests_to_update[0] - test_update_data = {'labels': test_info['labels']} - + test_update_data = {"labels": test_info["labels"]} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) else: # Multiple tests: use individual updates to ensure each test gets its specific labels for test_info in tests_to_update: - test_update_data = {'labels': test_info['labels']} + test_update_data = {"labels": test_info["labels"]} update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) - + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + return results, "" - def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: """ Get tests filtered by label ID or title from specific runs - + :param project_id: Project ID :param label_ids: List of label IDs to filter by :param label_title: Label title to filter by @@ -1516,14 +1520,14 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Get runs for the project (either all runs or specific run IDs) if run_ids: # Use specific run IDs - validate they exist by getting run details @@ -1539,67 +1543,65 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label runs_response = self.client.send_get(f"get_runs/{project_id}") if runs_response.status_code != 200: return [], runs_response.error_message - + runs_data = runs_response.response_text - runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data - + runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + # Collect all tests from all runs matching_tests = [] for run in runs: - run_id = run.get('id') + run_id = run.get("id") if not run_id: continue - + # Get tests for this run tests_response = self.client.send_get(f"get_tests/{run_id}") if tests_response.status_code != 200: continue # Skip this run if we can't get tests - + tests_data = tests_response.response_text - tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data - + tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + # Filter tests that have any of the target labels for test in tests: - test_labels = test.get('labels', []) - test_label_ids = [label.get('id') for label in test_labels] - + test_labels = test.get("labels", []) + test_label_ids = [label.get("id") for label in test_labels] + # Check if any of the target label IDs are present in this test if any(label_id in test_label_ids for label_id in target_label_ids): matching_tests.append(test) - + return matching_tests, "" def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: """ Get labels for specific tests - + :param test_ids: List of test IDs to get labels for :returns: Tuple with list of test label information and error string """ results = [] - + for test_id in test_ids: # Get test information test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results.append({ - 'test_id': test_id, - 'error': f"Test {test_id} not found or inaccessible", - 'labels': [] - }) + results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) continue - + test_data = test_response.response_text - test_labels = test_data.get('labels', []) - - results.append({ - 'test_id': test_id, - 'title': test_data.get('title', 'Unknown'), - 'status_id': test_data.get('status_id'), - 'labels': test_labels, - 'error': None - }) - + test_labels = test_data.get("labels", []) + + results.append( + { + "test_id": test_id, + "title": test_data.get("title", "Unknown"), + "status_id": test_data.get("status_id"), + "labels": test_labels, + "error": None, + } + ) + return results, "" # Test case reference management methods @@ -1614,15 +1616,15 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + # Parse existing references existing_ref_list = [] if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references while preserving order deduplicated_input = [] seen = set() @@ -1631,24 +1633,24 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool if ref_clean and ref_clean not in seen: deduplicated_input.append(ref_clean) seen.add(ref_clean) - + # Add new references (avoid duplicates with existing) all_refs = existing_ref_list.copy() for ref in deduplicated_input: if ref not in all_refs: all_refs.append(ref) - + # Join all references - new_refs_string = ','.join(all_refs) - + new_refs_string = ",".join(all_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1669,18 +1671,18 @@ def update_case_references(self, case_id: int, references: List[str]) -> Tuple[b if ref_clean and ref_clean not in seen: deduplicated_refs.append(ref_clean) seen.add(ref_clean) - + # Join references - new_refs_string = ','.join(deduplicated_refs) - + new_refs_string = ",".join(deduplicated_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1695,37 +1697,120 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = """ if specific_references is None: # Delete all references by setting refs to empty string - update_data = {'refs': ''} + update_data = {"refs": ""} else: # First get the current test case to retrieve existing references case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + if not existing_refs: # No references to delete return True, "" - + # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references for efficient processing refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) - + # Remove specific references remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] - + # Join remaining references - new_refs_string = ','.join(remaining_refs) - update_data = {'refs': new_refs_string} - + new_refs_string = ",".join(remaining_refs) + update_data = {"refs": new_refs_string} + # Update the test case update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: return False, update_response.error_message + + def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: + """ + Upload .feature file to TestRail BDD endpoint + + Creates TestRail test case from Gherkin .feature content. + The Gherkin content is sent in the request body as plain text. + + Args: + section_id: TestRail section ID where test case will be created + feature_content: Raw .feature file content (Gherkin syntax) + + Returns: + Tuple of (case_ids, error_message) + - case_ids: List containing the created test case ID + - error_message: Empty string on success, error details on failure + + API Endpoint: POST /api/v2/add_bdd/{section_id} + Request Body: Raw Gherkin text + Response: Standard TestRail test case JSON with BDD custom fields + """ + # Send Gherkin content as file upload (multipart/form-data) + # TestRail expects the .feature file as an attachment + self.environment.vlog(f"Uploading .feature file to add_bdd/{section_id}") + + files = {"attachment": ("feature.feature", feature_content, "text/plain")} + + response = self.client.send_post(f"add_bdd/{section_id}", payload=None, files=files) + + if response.status_code == 200: + # Response is a test case object with 'id' field + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + return [case_id], "" + else: + return [], "Response missing 'id' field" + else: + return [], "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to upload feature file (HTTP {response.status_code})" + return [], error_msg + + def get_bdd(self, case_id: int) -> Tuple[str, str]: + """ + Retrieve BDD test case as .feature file content + + Args: + case_id: TestRail test case ID + + Returns: + Tuple of (feature_content, error_message) + - feature_content: .feature file content (Gherkin syntax) + - error_message: Empty string on success, error details on failure + + API Endpoint: GET /api/v2/get_bdd/{case_id} + Response: Raw Gherkin text + """ + self.environment.vlog(f"Retrieving BDD test case from get_bdd/{case_id}") + response = self.client.send_get(f"get_bdd/{case_id}") + + if response.status_code == 200: + # TestRail returns raw Gherkin text (not JSON) + # APIClient treats non-JSON as error and stores str(response.content) + if isinstance(response.response_text, dict): + # Some versions might return JSON with 'feature' field + feature_content = response.response_text.get("feature", "") + elif isinstance(response.response_text, str) and response.response_text.startswith("b'"): + # APIClient converted bytes to string representation: "b'text'" + # Need to extract the actual content + try: + # Remove b' prefix and ' suffix, then decode escape sequences + feature_content = response.response_text[2:-1].encode().decode("unicode_escape") + except (ValueError, AttributeError): + feature_content = response.response_text + else: + # Plain text response + feature_content = response.response_text + + return feature_content, "" + else: + error_msg = response.error_message or f"Failed to retrieve BDD test case (HTTP {response.status_code})" + return "", error_msg diff --git a/trcli/commands/cmd_export_gherkin.py b/trcli/commands/cmd_export_gherkin.py new file mode 100644 index 0000000..0dba035 --- /dev/null +++ b/trcli/commands/cmd_export_gherkin.py @@ -0,0 +1,137 @@ +import click +from pathlib import Path + +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.constants import FAULT_MAPPING +from trcli.api.api_client import APIClient +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.data_classes.dataclass_testrail import TestRailSuite +import trcli + + +@click.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "--case-id", + type=click.IntRange(min=1), + metavar="", + required=True, + help="TestRail test case ID to export as .feature file.", +) +@click.option( + "--output", + type=click.Path(), + metavar="", + help="Output path for the .feature file. If not specified, prints to stdout.", +) +@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging output.") +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, case_id: int, output: str, **kwargs): + """Export BDD test case from TestRail as .feature file + + This command retrieves a test case from TestRail's BDD endpoint + and exports it as a Gherkin .feature file. + + The test case must have been created via the BDD import functionality + for this command to work. + + Mapping Rules (TestRail to .feature): + - Test Case name → Feature: + - Preconditions field → Free text after Feature: + - BDD Scenario field → Background:/Scenario:/Scenario Outline:/Rule: + - Reference field → @Tags before Feature: (@ added) + - BDD field tags → @Tags before scenarios + + Examples: + # Export to file + trcli export_gherkin --case-id 456 --output login.feature --project-id 1 + + # Print to stdout + trcli export_gherkin --case-id 456 --project-id 1 + """ + environment.cmd = "export_gherkin" + environment.set_parameters(context) + environment.check_for_required_parameters() + + # Set up logging + if kwargs.get("verbose"): + environment.verbose = True + + try: + environment.vlog(f"Target case ID: {case_id}") + environment.vlog(f"API endpoint: GET /api/v2/get_bdd/{case_id}") + + # Initialize API client + environment.log("Connecting to TestRail...") + + # Create APIClient + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) + + # Set credentials after initialization + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler (BDD operations don't need suite data) + minimal_suite = TestRailSuite(name="BDD Export", testsections=[]) + + # Create ApiRequestHandler + api_request_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=minimal_suite, + ) + + # Get BDD test case + environment.log(f"Retrieving BDD test case {case_id}...") + feature_content, error_message = api_request_handler.get_bdd(case_id) + + if error_message: + environment.elog(f"Error retrieving test case: {error_message}") + exit(1) + + if not feature_content or not feature_content.strip(): + environment.elog(f"Error: No BDD content found for case ID {case_id}") + environment.elog("This test case may not have been created via BDD import.") + exit(1) + + # Output results + if output: + output_path = Path(output) + + if environment.verbose: + environment.log(f"Writing feature file to: {output_path}") + + # Create parent directory if it doesn't exist + output_path.parent.mkdir(parents=True, exist_ok=True) + + with open(output_path, "w", encoding="utf-8") as f: + f.write(feature_content) + + environment.log(f"\n✓ Successfully exported test case {case_id}") + environment.log(f" File: {output_path}") + environment.log(f" Size: {len(feature_content)} characters") + else: + # Print to stdout + print(feature_content) + + except PermissionError: + environment.elog(f"Error: Permission denied writing to file: {output}") + exit(1) + except IOError as e: + environment.elog(f"Error writing file: {str(e)}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/commands/cmd_import_gherkin.py b/trcli/commands/cmd_import_gherkin.py new file mode 100644 index 0000000..1ec9e87 --- /dev/null +++ b/trcli/commands/cmd_import_gherkin.py @@ -0,0 +1,143 @@ +import click +from pathlib import Path + +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.constants import FAULT_MAPPING +from trcli.api.api_client import APIClient +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.data_classes.dataclass_testrail import TestRailSuite +import trcli + + +@click.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "-f", + "--file", + type=click.Path(exists=True), + metavar="", + required=True, + help="Path to Gherkin .feature file to upload.", +) +@click.option( + "--section-id", + type=click.IntRange(min=1), + metavar="", + required=True, + help="TestRail section ID where test cases will be created.", +) +@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging output.") +@click.option("--json-output", is_flag=True, help="Output case IDs in JSON format.") +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, file: str, section_id: int, **kwargs): + """Upload Gherkin .feature file to TestRail + + This command uploads a Gherkin/BDD .feature file directly to TestRail, + which will create or update test cases based on the scenarios in the file. + + TestRail will parse the .feature file and automatically create test cases + for each scenario, maintaining the BDD structure in TestRail's native format. + + Mapping Rules (.feature to TestRail): + - Feature: → Test Case name + - Free text after Feature: → Preconditions field + - Background:/Scenario:/Scenario Outline:/Rule: → BDD Scenario field + - Examples: (under Scenario Outline/Rule) → Same BDD field as parent + - @Tags before Feature: → Reference field (@ stripped) + - @Tags before scenarios → BDD field + + Example: + trcli import_gherkin -f login.feature --section-id 123 --project-id 1 + """ + environment.cmd = "import_gherkin" + environment.set_parameters(context) + environment.check_for_required_parameters() + + # Set up logging + if kwargs.get("verbose"): + environment.verbose = True + + try: + # Read the feature file + feature_path = Path(file) + if environment.verbose: + environment.log(f"Reading feature file: {feature_path}") + + with open(feature_path, "r", encoding="utf-8") as f: + feature_content = f.read() + + if not feature_content.strip(): + environment.elog("Error: Feature file is empty") + exit(1) + + environment.vlog(f"Feature file size: {len(feature_content)} characters") + environment.vlog(f"Target section ID: {section_id}") + environment.vlog(f"API endpoint: POST /api/v2/add_bdd/{section_id}") + + # Initialize API client + environment.log("Connecting to TestRail...") + + # Create APIClient + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) + + # Set credentials after initialization + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler (BDD operations don't need suite data) + minimal_suite = TestRailSuite(name="BDD Import", testsections=[]) + + # Create ApiRequestHandler + api_request_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=minimal_suite, + ) + + # Upload feature file + environment.log(f"Uploading feature file to TestRail...") + case_ids, error_message = api_request_handler.add_bdd(section_id, feature_content) + + if error_message: + environment.elog(f"Error uploading feature file: {error_message}") + exit(1) + + if not case_ids: + environment.log("Warning: No case IDs returned from TestRail") + environment.log("Feature file was uploaded but no cases were created.") + exit(0) + + # Display results + if kwargs.get("json_output"): + import json + + print(json.dumps({"case_ids": case_ids, "count": len(case_ids)}, indent=2)) + else: + environment.log(f"\nSuccessfully uploaded feature file!") + environment.log(f" Created/updated {len(case_ids)} test case(s)") + environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") + + except FileNotFoundError: + environment.elog(f"Error: Feature file not found: {file}") + exit(1) + except PermissionError: + environment.elog(f"Error: Permission denied reading feature file: {file}") + exit(1) + except UnicodeDecodeError: + environment.elog(f"Error: Feature file must be UTF-8 encoded: {file}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py new file mode 100644 index 0000000..98bd46b --- /dev/null +++ b/trcli/commands/cmd_parse_cucumber.py @@ -0,0 +1,147 @@ +import click +import json + +from trcli.api.results_uploader import ResultsUploader +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.commands.results_parser_helpers import results_parser_options, print_config +from trcli.constants import FAULT_MAPPING +from trcli.data_classes.validation_exception import ValidationException +from trcli.readers.cucumber_json import CucumberParser + + +@click.command(context_settings=CONTEXT_SETTINGS) +@results_parser_options +@click.option( + "--upload-feature", + is_flag=True, + help="Generate and upload .feature file to create/update test cases via BDD endpoint.", +) +@click.option( + "--feature-section-id", + type=click.IntRange(min=1), + metavar="", + help="Section ID for uploading .feature file (required if --upload-feature is used).", +) +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, *args, **kwargs): + """Parse Cucumber JSON results and upload to TestRail + + This command parses Cucumber JSON test results and uploads them to TestRail. + It supports two workflows: + + Workflow 1 - Upload Results Only (requires existing test cases): + trcli parse_cucumber -f results.json -n --project-id 1 --suite-id 2 + + Workflow 2 - Create Cases + Upload Results (via BDD): + trcli parse_cucumber -f results.json --upload-feature \\ + --feature-section-id 123 --project-id 1 --suite-id 2 + + The --upload-feature flag will: + 1. Generate a .feature file from the Cucumber JSON + 2. Upload it to TestRail via add_bdd endpoint (applying mapping rules) + 3. Retrieve the created case IDs + 4. Upload test results to those cases + + Generated .feature Mapping Rules (Cucumber JSON → .feature → TestRail): + - Feature name/description → Feature: + free text → Test Case name + Preconditions + - Background → Background: → BDD Scenario field + - Scenarios → Scenario:/Scenario Outline: → BDD Scenario field + - Rules → Rule: → BDD Scenario field + - Examples → Examples: table → BDD field (under parent scenario) + - Feature/Scenario tags → @Tags → Reference/BDD fields + + Without --upload-feature, test cases must already exist in TestRail + and be matched via automation_id (use --case-matcher option). + """ + environment.cmd = "parse_cucumber" + environment.set_parameters(context) + environment.check_for_required_parameters() + + # Validate feature upload options + upload_feature = kwargs.get("upload_feature", False) + feature_section_id = kwargs.get("feature_section_id") + + if upload_feature and not feature_section_id: + environment.elog("Error: --feature-section-id is required when using --upload-feature") + exit(1) + + print_config(environment) + + try: + # Parse Cucumber JSON file + parsed_suites = CucumberParser(environment).parse_file() + + # Workflow: Upload feature file if requested + if upload_feature: + environment.log("\n=== Phase 1: Uploading Feature File ===") + + # Generate feature file content + parser = CucumberParser(environment) + feature_content = parser.generate_feature_file() + + if not feature_content: + environment.elog("Error: Could not generate feature file from Cucumber JSON") + exit(1) + + # Upload feature file + from trcli.api.api_request_handler import ApiRequestHandler + + api_handler = ApiRequestHandler( + environment=environment, + suites_input=parsed_suites, + project_id=environment.project_id, + ) + + environment.log(f"Uploading generated .feature file to section {feature_section_id}...") + case_ids, error_message = api_handler.add_bdd(feature_section_id, feature_content) + + if error_message: + environment.elog(f"Error uploading feature file: {error_message}") + exit(1) + + environment.log(f"✓ Created/updated {len(case_ids)} test case(s)") + environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") + + # Update parsed suites with case IDs (if we can map them) + # Note: This mapping assumes the order is preserved, which may not always be true + # A more robust implementation would match by automation_id + environment.log("\nNote: Proceeding to upload results for matched cases...") + + # Upload test results + environment.log("\n=== Phase 2: Uploading Test Results ===") + + run_id = None + for suite in parsed_suites: + result_uploader = ResultsUploader(environment=environment, suite=suite) + result_uploader.upload_results() + + if run_id is None and hasattr(result_uploader, "last_run_id"): + run_id = result_uploader.last_run_id + + # Summary + if run_id: + environment.log(f"\n✓ Results uploaded successfully to run ID: {run_id}") + else: + environment.log("\n✓ Results processing completed") + + except FileNotFoundError: + environment.elog(f"Error: Cucumber JSON file not found: {environment.file}") + exit(1) + except json.JSONDecodeError as e: + environment.elog(f"Error: Invalid JSON format in file: {environment.file}") + environment.elog(f" {str(e)}") + exit(1) + except ValidationException as e: + environment.elog(f"Validation error: {str(e)}") + exit(1) + except ValueError as e: + environment.elog(f"Error parsing Cucumber JSON: {str(e)}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/constants.py b/trcli/constants.py index 858c556..dc5f5f9 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -73,7 +73,9 @@ COMMAND_FAULT_MAPPING = dict( add_run=dict(**FAULT_MAPPING, **ADD_RUN_FAULT_MAPPING), parse_junit=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), - parse_gherkin=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), + import_gherkin=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), + export_gherkin=dict(**FAULT_MAPPING), + parse_cucumber=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), parse_openapi=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), parse_robot=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), labels=dict(**FAULT_MAPPING), @@ -96,7 +98,9 @@ Copyright 2025 Gurock Software GmbH - www.gurock.com""" TOOL_USAGE = f"""Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) - - parse_gherkin: Gherkin .feature files (BDD) + - parse_cucumber: Cucumber JSON results (BDD) + - import_gherkin: Upload .feature files to TestRail BDD + - export_gherkin: Export BDD test cases as .feature files - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py new file mode 100644 index 0000000..d4b2b6d --- /dev/null +++ b/trcli/readers/cucumber_json.py @@ -0,0 +1,493 @@ +import json +from pathlib import Path +from beartype.typing import List, Dict, Any, Optional + +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser, TestRailCaseFieldsOptimizer +from trcli.data_classes.dataclass_testrail import ( + TestRailCase, + TestRailSuite, + TestRailSection, + TestRailResult, + TestRailSeparatedStep, +) +from trcli.readers.file_parser import FileParser + + +class CucumberParser(FileParser): + """Parser for Cucumber JSON results format""" + + def __init__(self, environment: Environment): + super().__init__(environment) + self.case_matcher = environment.case_matcher + + def parse_file(self) -> List[TestRailSuite]: + """Parse Cucumber JSON results file and convert to TestRailSuite structure + + Returns: + List of TestRailSuite objects with test cases and results + """ + self.env.log(f"Parsing Cucumber JSON file: {self.filename}") + + # Read and parse the JSON file + with open(self.filepath, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + # Cucumber JSON is typically an array of features + if not isinstance(cucumber_data, list): + raise ValueError("Cucumber JSON must be an array of features") + + # Parse features into TestRail structure + sections = [] + for feature in cucumber_data: + feature_sections = self._parse_feature(feature) + sections.extend(feature_sections) + + cases_count = sum(len(section.testcases) for section in sections) + self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") + + # Create suite + suite_name = self.env.suite_name if self.env.suite_name else "Cucumber Test Results" + testrail_suite = TestRailSuite( + name=suite_name, + testsections=sections, + source=self.filename, + ) + + return [testrail_suite] + + def _parse_feature(self, feature: Dict[str, Any]) -> List[TestRailSection]: + """Parse a single Cucumber feature into TestRail sections + + Args: + feature: Feature object from Cucumber JSON + + Returns: + List of TestRailSection objects + """ + feature_name = feature.get("name", "Untitled Feature") + feature_tags = self._extract_tags(feature.get("tags", [])) + + # Create a section for this feature + section = TestRailSection(name=feature_name, testcases=[]) + + # Parse scenarios/scenario outlines + for element in feature.get("elements", []): + element_type = element.get("type", "") + + if element_type in ("scenario", "scenario_outline"): + test_case = self._parse_scenario(element, feature_name, feature_tags) + if test_case: + section.testcases.append(test_case) + + return [section] if section.testcases else [] + + def _parse_scenario( + self, scenario: Dict[str, Any], feature_name: str, feature_tags: List[str] + ) -> Optional[TestRailCase]: + """Parse a Cucumber scenario into TestRailCase + + Args: + scenario: Scenario object from Cucumber JSON + feature_name: Name of the parent feature + feature_tags: Tags from the parent feature + + Returns: + TestRailCase object or None + """ + scenario_name = scenario.get("name", "Untitled Scenario") + scenario_tags = self._extract_tags(scenario.get("tags", [])) + all_tags = feature_tags + scenario_tags + + # Build automation ID + automation_id = self._build_automation_id(feature_name, all_tags, scenario_name) + + # Extract case ID if using matcher + case_id = None + if self.case_matcher == MatchersParser.NAME: + case_id, scenario_name = MatchersParser.parse_name_with_id(scenario_name) + elif self.case_matcher == MatchersParser.PROPERTY: + # Look for @C tag pattern + for tag in all_tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + case_id = int(tag[2:]) + break + except ValueError: + pass + + # Parse steps and determine overall status + steps = scenario.get("steps", []) + step_results, overall_status = self._parse_steps(steps) + + # Calculate elapsed time + elapsed_time = self._calculate_elapsed_time(steps) + + # Build comment from failures + comment = self._build_comment_from_failures(steps) + + # Create result object + result = TestRailResult( + case_id=case_id, + status_id=overall_status, + comment=comment, + elapsed=elapsed_time, + custom_step_results=step_results, + ) + + # Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=case_id, + result=result, + custom_automation_id=automation_id, + case_fields={"tags": ", ".join(all_tags)} if all_tags else {}, + ) + + return test_case + + def _parse_steps(self, steps: List[Dict[str, Any]]) -> tuple: + """Parse Cucumber steps into TestRail step results + + Args: + steps: List of step objects from Cucumber JSON + + Returns: + Tuple of (list of TestRailSeparatedStep, overall_status_id) + """ + step_results = [] + overall_status = 1 # Passed by default + + for step in steps: + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + step_content = f"{keyword} {step_name}".strip() + + # Determine step status + result = step.get("result", {}) + result_status = result.get("status", "").lower() + + # Map Cucumber status to TestRail status ID + # 1=Passed, 3=Untested, 4=Skipped, 5=Failed + if result_status == "passed": + step_status_id = 1 + elif result_status == "failed": + step_status_id = 5 + overall_status = 5 # Test failed + elif result_status == "skipped": + step_status_id = 4 + if overall_status == 1: # Only update if not already failed + overall_status = 4 + elif result_status == "pending": + step_status_id = 3 + if overall_status == 1: + overall_status = 3 + elif result_status == "undefined": + step_status_id = 3 + if overall_status == 1: + overall_status = 3 + else: + step_status_id = 3 + + # Create step result + tr_step = TestRailSeparatedStep(content=step_content) + tr_step.status_id = step_status_id + step_results.append(tr_step) + + return step_results, overall_status + + def _calculate_elapsed_time(self, steps: List[Dict[str, Any]]) -> Optional[str]: + """Calculate total elapsed time from steps + + Args: + steps: List of step objects + + Returns: + Elapsed time string or None + """ + total_duration = 0 + for step in steps: + result = step.get("result", {}) + duration = result.get("duration", 0) + if duration: + total_duration += duration + + if total_duration > 0: + # Convert nanoseconds to seconds + total_seconds = total_duration / 1_000_000_000 + # Always return at least 1s if there was any duration + if total_seconds >= 1: + return f"{round(total_seconds)}s" + else: + return "1s" + + return None + + def _build_comment_from_failures(self, steps: List[Dict[str, Any]]) -> str: + """Build comment string from failed steps + + Args: + steps: List of step objects + + Returns: + Comment string describing failures + """ + failures = [] + for step in steps: + result = step.get("result", {}) + if result.get("status", "").lower() == "failed": + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + error_message = result.get("error_message", "") + + failure_text = f"Failed: {keyword} {step_name}" + if error_message: + failure_text += f"\n Error: {error_message}" + + failures.append(failure_text) + + return "\n\n".join(failures) if failures else "" + + def _extract_tags(self, tags: List[Dict[str, str]]) -> List[str]: + """Extract tag names from Cucumber tag objects + + Args: + tags: List of tag objects with 'name' field + + Returns: + List of tag name strings + """ + return [tag.get("name", "") for tag in tags if tag.get("name")] + + def _build_automation_id(self, feature_name: str, tags: List[str], scenario_name: str) -> str: + """Build automation ID from feature, tags, and scenario name + + Args: + feature_name: Feature name + tags: List of tags + scenario_name: Scenario name + + Returns: + Automation ID string + """ + parts = [feature_name] + + # Add tags if present + if tags: + parts.extend(tags) + + # Add scenario name + parts.append(scenario_name) + + return ".".join(parts) + + def generate_feature_file(self) -> str: + """Generate .feature file content from parsed Cucumber JSON + + This reconstructs Gherkin syntax from the Cucumber JSON results. + Useful for creating/updating BDD test cases in TestRail. + + Returns: + Feature file content as string + """ + with open(self.filepath, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + if not isinstance(cucumber_data, list) or not cucumber_data: + return "" + + # Generate feature files (one per feature in JSON) + feature_files = [] + + for feature in cucumber_data: + feature_content = self._generate_feature_content(feature) + if feature_content: + feature_files.append(feature_content) + + return "\n\n".join(feature_files) + + def _generate_feature_content(self, feature: Dict[str, Any]) -> str: + """Generate Gherkin feature content from Cucumber feature object + + Args: + feature: Feature object from Cucumber JSON + + Returns: + Gherkin feature content as string + """ + lines = [] + + # Feature tags + feature_tags = self._extract_tags(feature.get("tags", [])) + if feature_tags: + lines.append(" ".join(feature_tags)) + + # Feature header + feature_name = feature.get("name", "Untitled Feature") + feature_description = feature.get("description", "") + + lines.append(f"Feature: {feature_name}") + if feature_description: + for desc_line in feature_description.split("\n"): + if desc_line.strip(): + lines.append(f" {desc_line.strip()}") + + lines.append("") # Empty line after feature header + + # Process elements in order: Background first, then scenarios/rules + for element in feature.get("elements", []): + element_type = element.get("type", "") + + if element_type == "background": + background_content = self._generate_background_content(element) + if background_content: + lines.append(background_content) + lines.append("") # Empty line after background + + elif element_type in ("scenario", "scenario_outline"): + scenario_content = self._generate_scenario_content(element) + if scenario_content: + lines.append(scenario_content) + lines.append("") # Empty line between scenarios + + elif element_type == "rule": + rule_content = self._generate_rule_content(element) + if rule_content: + lines.append(rule_content) + lines.append("") # Empty line after rule + + return "\n".join(lines) + + def _generate_scenario_content(self, scenario: Dict[str, Any]) -> str: + """Generate Gherkin scenario content + + Args: + scenario: Scenario object from Cucumber JSON + + Returns: + Gherkin scenario content as string + """ + lines = [] + + # Scenario tags + scenario_tags = self._extract_tags(scenario.get("tags", [])) + if scenario_tags: + lines.append(" " + " ".join(scenario_tags)) + + # Scenario header + scenario_type = scenario.get("type", "scenario") + scenario_name = scenario.get("name", "Untitled Scenario") + + if scenario_type == "scenario_outline": + lines.append(f" Scenario Outline: {scenario_name}") + else: + lines.append(f" Scenario: {scenario_name}") + + # Steps + for step in scenario.get("steps", []): + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + lines.append(f" {keyword} {step_name}") + + # Examples table (for Scenario Outline) + if scenario_type == "scenario_outline": + examples = scenario.get("examples", []) + if examples: + for example_group in examples: + lines.append("") # Empty line before examples + + # Examples tags (if any) + example_tags = self._extract_tags(example_group.get("tags", [])) + if example_tags: + lines.append(" " + " ".join(example_tags)) + + # Examples keyword + lines.append(" Examples:") + + # Examples table + rows = example_group.get("rows", []) + if rows: + for row in rows: + cells = row.get("cells", []) + if cells: + row_content = " | ".join(cells) + lines.append(f" | {row_content} |") + + return "\n".join(lines) + + def _generate_background_content(self, background: Dict[str, Any]) -> str: + """Generate Gherkin background content + + Args: + background: Background object from Cucumber JSON + + Returns: + Gherkin background content as string + """ + lines = [] + + # Background header + background_name = background.get("name", "") + if background_name: + lines.append(f" Background: {background_name}") + else: + lines.append(" Background:") + + # Steps + for step in background.get("steps", []): + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + lines.append(f" {keyword} {step_name}") + + return "\n".join(lines) + + def _generate_rule_content(self, rule: Dict[str, Any]) -> str: + """Generate Gherkin rule content + + Args: + rule: Rule object from Cucumber JSON + + Returns: + Gherkin rule content as string + """ + lines = [] + + # Rule tags (if any) + rule_tags = self._extract_tags(rule.get("tags", [])) + if rule_tags: + lines.append(" " + " ".join(rule_tags)) + + # Rule header + rule_name = rule.get("name", "Untitled Rule") + lines.append(f" Rule: {rule_name}") + + # Rule description (if any) + rule_description = rule.get("description", "") + if rule_description: + for desc_line in rule_description.split("\n"): + if desc_line.strip(): + lines.append(f" {desc_line.strip()}") + + # Background within rule (if any) + for element in rule.get("children", []): + element_type = element.get("type", "") + if element_type == "background": + lines.append("") + background_content = self._generate_background_content(element) + # Indent background under rule + for line in background_content.split("\n"): + lines.append(" " + line if line else "") + + # Scenarios within rule + for element in rule.get("children", []): + element_type = element.get("type", "") + if element_type in ("scenario", "scenario_outline"): + lines.append("") + scenario_content = self._generate_scenario_content(element) + # Indent scenario under rule + for line in scenario_content.split("\n"): + lines.append(" " + line if line else "") + + return "\n".join(lines) From b537a2b9111cb4428b459cb375b548b1172dc24f Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 27 Nov 2025 12:41:50 +0800 Subject: [PATCH 05/33] TRCLI-198 Added unit and functional tests for import and export gherkin, parse cucumber and gherkin with sample data and reports --- tests/test_api_request_handler.py | 398 ++-- tests/test_cmd_export_gherkin.py | 254 +++ tests/test_cmd_import_gherkin.py | 255 +++ tests/test_cmd_parse_cucumber.py | 298 +++ tests/test_cmd_parse_gherkin.py | 189 ++ tests/test_cucumber_parser.py | 256 +++ tests/test_data/CUCUMBER/sample_cucumber.json | 175 ++ .../CUCUMBER/sample_cucumber_advanced.json | 234 +++ tests/test_data/FEATURE/sample_bdd.feature | 23 + tests/test_data/cli_test_data.py | 4 +- .../reports_cucumber/sample_cucumber.json | 175 ++ .../sample_cucumber_advanced.json | 234 +++ tests_e2e/reports_gherkin/sample_bdd.feature | 23 + .../reports_gherkin/sample_login.feature | 41 + tests_e2e/test_end2end.py | 1651 ++++++++++------- 15 files changed, 3333 insertions(+), 877 deletions(-) create mode 100644 tests/test_cmd_export_gherkin.py create mode 100644 tests/test_cmd_import_gherkin.py create mode 100644 tests/test_cmd_parse_cucumber.py create mode 100644 tests/test_cmd_parse_gherkin.py create mode 100644 tests/test_cucumber_parser.py create mode 100644 tests/test_data/CUCUMBER/sample_cucumber.json create mode 100644 tests/test_data/CUCUMBER/sample_cucumber_advanced.json create mode 100644 tests/test_data/FEATURE/sample_bdd.feature create mode 100644 tests_e2e/reports_cucumber/sample_cucumber.json create mode 100644 tests_e2e/reports_cucumber/sample_cucumber_advanced.json create mode 100644 tests_e2e/reports_gherkin/sample_bdd.feature create mode 100644 tests_e2e/reports_gherkin/sample_login.feature diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index 4f17c37..3e01bf1 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -23,9 +23,7 @@ def _make_handler(verify=False, custom_json=None): environment.batch_size = 10 environment.case_matcher = MatchersParser.AUTO if custom_json is None: - json_path = ( - Path(__file__).parent / "test_data/json/api_request_handler.json" - ) + json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" else: json_path = custom_json file_json = open(json_path) @@ -49,17 +47,13 @@ def api_request_handler_verify(handler_maker): @pytest.fixture(scope="function") def api_request_handler_update_case_json(handler_maker): - json_path = ( - Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" - ) + json_path = Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" yield handler_maker(custom_json=json_path, verify=False) class TestApiRequestHandler: @pytest.mark.api_handler - def test_return_project( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = { "offset": 0, "limit": 250, @@ -72,7 +66,7 @@ def test_return_project( {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, {"id": 3, "name": "DataHub", "suite_mode": 1}, - ] + ], } requests_mock.get(create_url("get_projects"), json=mocked_response) assert api_request_handler.get_project_data("Test Project") == ProjectData( @@ -107,9 +101,7 @@ def test_return_project( ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_return_project_legacy_response( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_legacy_response(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = [ {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, @@ -131,15 +123,15 @@ def test_return_project_legacy_response_with_buggy_authentication_prefix( {"id": 3, "name": "DataHub", "suite_mode": 1}, ] - requests_mock.get(create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n"+json.dumps(mocked_response)) + requests_mock.get( + create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n" + json.dumps(mocked_response) + ) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=2, suite_mode=1, error_message="" ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_check_suite_exists( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_check_suite_exists(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, @@ -155,7 +147,7 @@ def test_check_suite_exists( False, FAULT_MAPPING["missing_suite"].format(suite_id=6), ), "Given suite id should NOT exist in mocked response." - + @pytest.mark.api_handler def test_check_suite_exists_with_pagination(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 @@ -167,7 +159,7 @@ def test_check_suite_exists_with_pagination(self, api_request_handler: ApiReques "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) @@ -207,16 +199,13 @@ def test_add_suite(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in add_suite" assert ( - api_request_handler.suites_data_from_provider.suite_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.suite_id == mocked_response["id"] ), "Added suite id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_check_missing_sections_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -225,25 +214,19 @@ def test_check_missing_sections_true( "suite_id": 4, "name": "Skipped test", } - ] + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) - update_data_mock.assert_called_with( - section_data=[{'section_id': 0, 'suite_id': 4, 'name': 'Skipped test'}] - ) + update_data_mock.assert_called_with(section_data=[{"section_id": 0, "suite_id": 4, "name": "Skipped test"}]) assert missing, "There should be missing section" @pytest.mark.api_handler - def test_check_missing_sections_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -256,19 +239,17 @@ def test_check_missing_sections_false( "id": 2, "suite_id": 4, "name": "Passed test", - } - ] + }, + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) update_data_mock.assert_called_with( section_data=[ - {'name': 'Skipped test', 'section_id': 1, 'suite_id': 4}, - {'name': 'Passed test', 'section_id': 2, 'suite_id': 4} + {"name": "Skipped test", "section_id": 1, "suite_id": 4}, + {"name": "Passed test", "section_id": 2, "suite_id": 4}, ] ) assert not missing, "There should be no missing section" @@ -282,9 +263,7 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc "name": "Passed test", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_sections(project_id) assert ( @@ -296,20 +275,17 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc assert error == "", "Error occurred in add_section" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.testsections[1].section_id == mocked_response["id"] ), "Added section id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_add_section_and_cases( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, "suite_id": 4, "name": "Passed test", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_1 = { @@ -317,7 +293,7 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_2 = { @@ -325,12 +301,10 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -371,9 +345,7 @@ def test_add_run(self, api_request_handler: ApiRequestHandler, requests_mock): requests_mock.post(create_url(f"add_run/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_run(project_id, run_name) - assert ( - mocked_response["id"] == resources_added - ), "Added run id doesn't match mocked response id" + assert mocked_response["id"] == resources_added, "Added run id doesn't match mocked response id" assert error == "", "Error occurred in add_case" @pytest.mark.api_handler @@ -394,40 +366,37 @@ def test_add_results(self, api_request_handler: ApiRequestHandler, requests_mock "version": "1.0RC1", } ] - requests_mock.post( - create_url(f"add_results_for_cases/{run_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_results_for_cases/{run_id}"), json=mocked_response) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 4, - 'case_id': 1, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 4, + "case_id": 1, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) attachments_mock_response = {"attachment_id": 123} - requests_mock.post( - create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response - ) + requests_mock.post(create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response) with patch("builtins.open", mock_open()) as mock_file: resources_added, error, results_added = api_request_handler.add_results(run_id) assert [mocked_response] == resources_added, "Invalid response from add_results" assert error == "", "Error occurred in add_results" - assert results_added == len(mocked_response), \ - f"Expected {len(mocked_response)} results to be added but got {results_added} instead." + assert results_added == len( + mocked_response + ), f"Expected {len(mocked_response)} results to be added but got {results_added} instead." mock_file.assert_any_call("./path1", "rb") mock_file.assert_any_call("./path2", "rb") @@ -446,12 +415,10 @@ def test_close_run(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in close_run" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": None, @@ -459,16 +426,14 @@ def test_check_missing_test_cases_ids_true( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } requests_mock.get( create_url(f"get_cases/{project_id}&suite_id={suite_id}"), json=mocked_response_page_1, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ @@ -476,27 +441,24 @@ def test_check_missing_test_cases_ids_true( "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, - "custom_automation_id": - "Skipped test.testCase2", + "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" - } + "title": "testCase2", + }, ] ) assert missing_ids, "There is one missing test case" assert error == "", "Error occurred in check" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": f"/api/v2/get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1", @@ -504,7 +466,7 @@ def test_check_missing_test_cases_ids_false( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } mocked_response_page_2 = { @@ -521,29 +483,22 @@ def test_check_missing_test_cases_ids_false( create_url(f"get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1"), json=mocked_response_page_2, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ { "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" + "title": "testCase2", }, - { - "case_id": 1, - "custom_automation_id": "Passed test.testCase3", - "section_id": 2, - "title": "testCase3" - } + {"case_id": 1, "custom_automation_id": "Passed test.testCase3", "section_id": 2, "title": "testCase3"}, ] ) assert not missing_ids, "No missing ids" @@ -560,35 +515,30 @@ def test_get_suite_ids(self, api_request_handler: ApiRequestHandler, requests_mo requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.get_suite_ids(project_id) assert ( - resources_added[0] == mocked_response[0]["id"] and - resources_added[1] == mocked_response[1]["id"] + resources_added[0] == mocked_response[0]["id"] and resources_added[1] == mocked_response[1]["id"] ), "ID in response doesn't match mocked response" @pytest.mark.api_handler - def test_get_suite_ids_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_get_suite_ids_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) - + + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) + suite_ids, error = api_request_handler.get_suite_ids(project_id) - + assert suite_ids == [], "Should return empty list on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_resolve_suite_id_using_name( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_resolve_suite_id_using_name(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_name = "Suite2" api_request_handler.suites_data_from_provider.name = suite_name - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "offset": 0, @@ -598,43 +548,36 @@ def test_resolve_suite_id_using_name( "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } - + requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) - + suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) - + assert suite_id == 5, "Should return the correct suite ID for matching name with pagination" assert error == "", "Should have no error message" - + update_data_mock.assert_called_once_with([{"suite_id": 5, "name": "Suite2"}]) @pytest.mark.api_handler - def test_resolve_suite_id_using_name_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_resolve_suite_id_using_name_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) assert suite_id == -1, "Should return -1 on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" - + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_return_project_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_error(self, api_request_handler: ApiRequestHandler, requests_mock): - requests_mock.get( - create_url("get_projects"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url("get_projects"), exc=requests.exceptions.ConnectTimeout) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=-3, suite_mode=-1, @@ -643,9 +586,7 @@ def test_return_project_error( ), "Get project should return proper project data object with error" @pytest.mark.api_handler - def test_add_suite_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_suite_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 api_request_handler.suites_data_from_provider.suite_id = None @@ -658,15 +599,12 @@ def test_add_suite_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_sections_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_sections_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 requests_mock.post( create_url(f"add_section/{project_id}"), @@ -676,20 +614,16 @@ def test_add_sections_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - is None + api_request_handler.suites_data_from_provider.testsections[1].section_id is None ), "No resources should be added to DataProvider" @pytest.mark.api_handler - def test_add_section_and_cases_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, @@ -702,7 +636,7 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } mocked_response_for_case_2 = { @@ -710,12 +644,10 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "Passed test.testCase3" + "custom_automation_id": "Passed test.testCase3", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -735,65 +667,55 @@ def test_add_section_and_cases_error( mocked_response_for_case_1["id"], ], "Added case id doesn't match mocked response id" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_results_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_results_error(self, api_request_handler: ApiRequestHandler, requests_mock): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 18319, - 'case_id': 6086, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 18319, + "case_id": 6086, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) resources_added, error, results_added = api_request_handler.add_results(run_id) assert resources_added == [], "Expected empty list of added resources" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert results_added == 0, "Expected 0 resources to be added." @pytest.mark.api_handler - def test_add_results_keyboard_interrupt( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_add_results_keyboard_interrupt(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) - mocker.patch( - "trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt - ) + mocker.patch("trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt) with pytest.raises(KeyboardInterrupt) as exception: api_request_handler.add_results(run_id) @pytest.mark.api_handler - def test_add_suite_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_suite_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = { "description": "..", @@ -818,9 +740,7 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): "description": "Some description", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert error == "", "There should be no error in verification." @@ -828,45 +748,34 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert ( - error - == "Data verification failed. Server added different resource than expected." + error == "Data verification failed. Server added different resource than expected." ), "There should be error in verification." @pytest.mark.api_handler - def test_add_case_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_case_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): mocked_response_for_case = { "id": 3, "suite_id": 4, "section_id": 1234, "title": "testCase2", "estimate": "30s", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } requests_mock.post( create_url(f"add_case/{mocked_response_for_case['section_id']}"), json=mocked_response_for_case, ) - del api_request_handler_verify.suites_data_from_provider.testsections[ - 1 - ].testcases[0] + del api_request_handler_verify.suites_data_from_provider.testsections[1].testcases[0] resources_added, error = api_request_handler_verify.add_cases() assert error == "", "There should be no error in verification." mocked_response_for_case["estimate"] = "60s" - api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[ - 1 - ].case_id = None + api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[1].case_id = None resources_added, error = api_request_handler_verify.add_cases() - assert ( - error == FAULT_MAPPING["data_verification_error"] - ), "There should be error in verification." + assert error == FAULT_MAPPING["data_verification_error"], "There should be error in verification." @pytest.mark.api_handler - def test_delete_section( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_section(self, api_request_handler_verify: ApiRequestHandler, requests_mock): sections_id = [{"section_id": 1}] mocked_response_for_case = {"success": 200} @@ -879,9 +788,7 @@ def test_delete_section( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_suite( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_suite(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 mocked_response_for_case = {"success": 200} @@ -894,9 +801,7 @@ def test_delete_suite( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_cases( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_cases(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 cases = [{"case_id": 1}] mocked_response_for_case = {"success": 200} @@ -906,15 +811,11 @@ def test_delete_cases( json=mocked_response_for_case, ) - resources_added, error = api_request_handler_verify.delete_cases( - suite_id, cases - ) + resources_added, error = api_request_handler_verify.delete_cases(suite_id, cases) assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_run( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, requests_mock): run_id = 1 mocked_response_for_case = {"success": 200} @@ -925,3 +826,42 @@ def test_delete_run( resources_added, error = api_request_handler_verify.delete_run(run_id) assert error == "", "There should be no error in verification." + + @pytest.mark.api_handler + def test_add_bdd_success(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test successful .feature file upload via add_bdd endpoint""" + section_id = 123 + feature_content = "@smoke\nFeature: User Login\n Scenario: Successful login" + + # API returns standard TestRail test case JSON with 'id' field + # File upload uses multipart/form-data + mocked_response = {"id": 101, "title": "Successful login", "section_id": 123, "template_id": 1} + + requests_mock.post( + create_url(f"add_bdd/{section_id}"), + json=mocked_response, + ) + + case_ids, error = api_request_handler.add_bdd(section_id, feature_content) + + assert case_ids == [101], "Should return list with single case ID" + assert error == "", "There should be no error" + + @pytest.mark.api_handler + def test_get_bdd_success(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test successful .feature file retrieval via get_bdd endpoint""" + case_id = 456 + expected_feature = "@smoke\nFeature: User Login" + + # API returns raw Gherkin text + mocked_response = expected_feature + + requests_mock.get( + create_url(f"get_bdd/{case_id}"), + text=mocked_response, + ) + + feature_content, error = api_request_handler.get_bdd(case_id) + + assert feature_content == expected_feature, "Should return feature content" + assert error == "", "There should be no error" diff --git a/tests/test_cmd_export_gherkin.py b/tests/test_cmd_export_gherkin.py new file mode 100644 index 0000000..e305c00 --- /dev/null +++ b/tests/test_cmd_export_gherkin.py @@ -0,0 +1,254 @@ +import pytest +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_export_gherkin + + +class TestCmdExportGherkin: + """Test class for export_gherkin command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.sample_feature_content = """@smoke +Feature: User Login + As a user + I want to log in + + Scenario: Successful login + Given I am on the login page + When I enter valid credentials + Then I should see the dashboard +""" + + # Set up environment with required parameters + self.environment = Environment(cmd="export_gherkin") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_success_to_file(self, mock_api_client_class, mock_api_handler_class): + """Test successful export to file""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + with self.runner.isolated_filesystem(): + result = self.runner.invoke( + cmd_export_gherkin.cli, ["--case-id", "456", "--output", "exported.feature"], obj=self.environment + ) + + assert result.exit_code == 0 + assert "successfully exported" in result.output.lower() + assert "exported.feature" in result.output + + # Verify file was created with correct content + with open("exported.feature", "r") as f: + content = f.read() + assert "Feature: User Login" in content + assert "@smoke" in content + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_success_to_stdout(self, mock_api_client_class, mock_api_handler_class): + """Test successful export to stdout""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"], obj=self.environment) + + assert result.exit_code == 0 + # Content should be printed to stdout + assert "Feature: User Login" in result.output + assert "@smoke" in result.output + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_verbose_logging(self, mock_api_client_class, mock_api_handler_class): + """Test export with verbose logging""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456", "--verbose"], obj=self.environment) + + assert result.exit_code == 0 + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_api_error_case_not_found(self, mock_api_client_class, mock_api_handler_class): + """Test API error when case not found""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with error + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = ("", "Failed to retrieve BDD test case (HTTP 404)") + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "99999"], obj=self.environment) + + assert result.exit_code == 1 + assert "error" in result.output.lower() + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_empty_content(self, mock_api_client_class, mock_api_handler_class): + """Test when no BDD content is returned""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with empty content + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = ("", "") # Empty content, no error + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"], obj=self.environment) + + assert result.exit_code == 1 + assert "no bdd content found" in result.output.lower() + + @pytest.mark.cmd_export_gherkin + def test_export_gherkin_required_parameters(self): + """Test that required parameters are validated""" + # Missing --case-id + result = self.runner.invoke(cmd_export_gherkin.cli, ["--project-id", "1"]) + assert result.exit_code == 2 # Click error for missing required option + + # Missing --project-id (handled by check_for_required_parameters) + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"]) + # Will fail due to missing required params + + @pytest.mark.cmd_export_gherkin + def test_export_gherkin_invalid_case_id(self): + """Test with invalid case ID (negative or zero)""" + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "-1"], obj=self.environment) + + # Click IntRange validation should catch this + assert result.exit_code == 2 + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + @patch("builtins.open", side_effect=PermissionError("Permission denied")) + def test_export_gherkin_permission_error(self, mock_open, mock_api_client_class, mock_api_handler_class): + """Test file write permission error""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + result = self.runner.invoke( + cmd_export_gherkin.cli, + ["--case-id", "456", "--output", "/root/no_permission.feature"], + obj=self.environment, + ) + + assert result.exit_code == 1 + # Check for various error messages related to file writing + assert ( + "permission denied" in result.output.lower() + or "read-only file system" in result.output.lower() + or "error writing file" in result.output.lower() + ) + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_create_nested_directory(self, mock_api_client_class, mock_api_handler_class): + """Test that parent directories are created if they don't exist""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + with self.runner.isolated_filesystem(): + output_path = "nested/dir/exported.feature" + result = self.runner.invoke( + cmd_export_gherkin.cli, ["--case-id", "456", "--output", output_path], obj=self.environment + ) + + assert result.exit_code == 0 + # Verify nested directory was created + assert Path(output_path).exists() + assert Path(output_path).is_file() + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_unicode_content(self, mock_api_client_class, mock_api_handler_class): + """Test export with unicode characters""" + unicode_content = """@test +Feature: Tëst with ūnīcödé 测试 + Scenario: Test scenario + Given test step with émojis 🎉 +""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (unicode_content, "") + + with self.runner.isolated_filesystem(): + result = self.runner.invoke( + cmd_export_gherkin.cli, ["--case-id", "456", "--output", "unicode.feature"], obj=self.environment + ) + + assert result.exit_code == 0 + + # Verify unicode content is preserved + with open("unicode.feature", "r", encoding="utf-8") as f: + content = f.read() + assert "ūnīcödé" in content + assert "测试" in content + assert "🎉" in content diff --git a/tests/test_cmd_import_gherkin.py b/tests/test_cmd_import_gherkin.py new file mode 100644 index 0000000..0bfde94 --- /dev/null +++ b/tests/test_cmd_import_gherkin.py @@ -0,0 +1,255 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_import_gherkin + + +class TestCmdImportGherkin: + """Test class for import_gherkin command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.test_feature_path = str(Path(__file__).parent / "test_data" / "FEATURE" / "sample_bdd.feature") + + # Set up environment with required parameters + self.environment = Environment(cmd="import_gherkin") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_success(self, mock_api_client_class, mock_api_handler_class): + """Test successful feature file upload""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([456], "") # Success: case ID 456, no error + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test scenario\n Given test step\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 + assert "successfully uploaded" in result.output.lower() + assert "456" in result.output + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_json_output(self, mock_api_client_class, mock_api_handler_class): + """Test feature file upload with JSON output""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([101, 102], "") # Success: 2 case IDs + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test 1\n Scenario: Test 2\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--section-id", "123", "--json-output"], + obj=self.environment, + ) + + assert result.exit_code == 0 + # Output contains logging messages + JSON, extract JSON (starts with '{') + json_start = result.output.find("{") + assert json_start >= 0, "No JSON found in output" + json_str = result.output[json_start:] + output_data = json.loads(json_str) + assert "case_ids" in output_data + assert output_data["case_ids"] == [101, 102] + assert output_data["count"] == 2 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_verbose_logging(self, mock_api_client_class, mock_api_handler_class): + """Test feature file upload with verbose logging""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([456], "") + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--section-id", "123", "--verbose"], + obj=self.environment, + ) + + assert result.exit_code == 0 + # Verbose output should show API endpoint + # (verbose logs might not appear in captured output but command should succeed) + + @pytest.mark.cmd_import_gherkin + def test_import_gherkin_missing_file(self): + """Test with non-existent file""" + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "/nonexistent/file.feature", "--section-id", "123"], obj=self.environment + ) + + # Click returns exit code 2 for invalid parameter (file doesn't exist) + assert result.exit_code in [1, 2] + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_empty_file(self, mock_api_client_class, mock_api_handler_class): + """Test with empty feature file""" + with self.runner.isolated_filesystem(): + # Create empty file + with open("empty.feature", "w") as f: + f.write("") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "empty.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 1 + assert "empty" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_api_error(self, mock_api_client_class, mock_api_handler_class): + """Test API error handling""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with error + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([], "API Error: Section not found") + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "999"], obj=self.environment + ) + + assert result.exit_code == 1 + assert "error" in result.output.lower() + assert "section not found" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_no_cases_created(self, mock_api_client_class, mock_api_handler_class): + """Test when no case IDs are returned from API""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with empty case IDs + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([], "") # No error, but no cases created + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 # Not an error, just a warning + assert "warning" in result.output.lower() + assert "no case" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + def test_import_gherkin_required_parameters(self): + """Test that required parameters are validated""" + # Missing --file + result = self.runner.invoke(cmd_import_gherkin.cli, ["--section-id", "123", "--project-id", "1"]) + assert result.exit_code == 2 # Click error for missing required option + + # Missing --section-id + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n") + + result = self.runner.invoke(cmd_import_gherkin.cli, ["--file", "test.feature", "--project-id", "1"]) + assert result.exit_code == 2 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_invalid_section_id(self, mock_api_client_class, mock_api_handler_class): + """Test with invalid section ID (negative number)""" + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", self.test_feature_path, "--section-id", "-1"], # Invalid: negative + obj=self.environment, + ) + + # Click IntRange validation should catch this + assert result.exit_code == 2 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_unicode_content(self, mock_api_client_class, mock_api_handler_class): + """Test feature file with unicode characters""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([456], "") + + with self.runner.isolated_filesystem(): + # Create feature file with unicode + with open("unicode.feature", "w", encoding="utf-8") as f: + f.write("Feature: Tëst with ūnīcödé\n Scenario: Test 测试\n Given test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "unicode.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 diff --git a/tests/test_cmd_parse_cucumber.py b/tests/test_cmd_parse_cucumber.py new file mode 100644 index 0000000..be8ee22 --- /dev/null +++ b/tests/test_cmd_parse_cucumber.py @@ -0,0 +1,298 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_parse_cucumber + + +class TestCmdParseCucumber: + """Test class for parse_cucumber command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.test_cucumber_path = str(Path(__file__).parent / "test_data" / "CUCUMBER" / "sample_cucumber.json") + + # Set up environment with required parameters + self.environment = Environment(cmd="parse_cucumber") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_workflow1_results_only(self, mock_parser_class, mock_uploader_class): + """Test Workflow 1: Parse and upload results only (no feature upload)""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_parser.parse_file.return_value = [mock_suite] + + # Mock uploader + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 0 + mock_parser.parse_file.assert_called_once() + mock_uploader.upload_results.assert_called_once() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_workflow2_upload_feature( + self, mock_parser_class, mock_uploader_class, mock_api_handler_class + ): + """Test Workflow 2: Generate feature, upload, then upload results""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_parser.parse_file.return_value = [mock_suite] + mock_parser.generate_feature_file.return_value = "Feature: Test\n Scenario: Test\n" + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + mock_api_handler.add_bdd.return_value = ([101, 102], "") + + # Mock uploader + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 0 + mock_parser.generate_feature_file.assert_called_once() + mock_api_handler.add_bdd.assert_called_once() + mock_uploader.upload_results.assert_called() + + @pytest.mark.cmd_parse_cucumber + def test_parse_cucumber_upload_feature_requires_section_id(self): + """Test that --upload-feature requires --feature-section-id""" + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + # Missing --feature-section-id + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "feature-section-id is required" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + def test_parse_cucumber_missing_file(self): + """Test with non-existent Cucumber JSON file""" + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", "/nonexistent/results.json", "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "not found" in result.output.lower() or result.exception is not None + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_invalid_json(self, mock_parser_class): + """Test with invalid JSON format""" + # Mock parser to raise JSONDecodeError + mock_parser_class.side_effect = json.JSONDecodeError("Invalid JSON", "", 0) + + with self.runner.isolated_filesystem(): + # Create invalid JSON file + with open("invalid.json", "w") as f: + f.write("This is not valid JSON{{{") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", "invalid.json", "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_empty_json(self, mock_parser_class): + """Test with empty JSON file""" + with self.runner.isolated_filesystem(): + # Create empty JSON file + with open("empty.json", "w") as f: + f.write("[]") + + # Mock parser to return empty list + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_parser.parse_file.return_value = [] + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", "empty.json", "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + # Should handle gracefully (may succeed with warning or fail) + # Exit code depends on implementation + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_feature_generation_failure(self, mock_parser_class, mock_api_handler_class): + """Test when feature file generation fails""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_parser.parse_file.return_value = [mock_suite] + mock_parser.generate_feature_file.return_value = "" # Empty content + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "could not generate feature file" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_api_error_during_feature_upload(self, mock_parser_class, mock_api_handler_class): + """Test API error during feature file upload""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_parser.parse_file.return_value = [mock_suite] + mock_parser.generate_feature_file.return_value = "Feature: Test\n" + + # Mock API handler with error + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + mock_api_handler.add_bdd.return_value = ([], "API Error: Section not found") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "error uploading feature file" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + def test_parse_cucumber_required_parameters(self): + """Test that required parameters are validated""" + # Missing --file + result = self.runner.invoke( + cmd_parse_cucumber.cli, ["--project-id", "1", "--suite-id", "2", "--title", "Test Run"] + ) + # Will fail due to missing required params + + # Missing --project-id (handled by check_for_required_parameters) + result = self.runner.invoke( + cmd_parse_cucumber.cli, ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"] + ) + # Will fail + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_validation_exception(self, mock_parser_class, mock_uploader_class): + """Test handling of ValidationException""" + from trcli.data_classes.validation_exception import ValidationException + + # Mock parser to raise ValidationException + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_parser.parse_file.side_effect = ValidationException("CucumberParser", "Validation error occurred") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "validation error" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_value_error(self, mock_parser_class, mock_uploader_class): + """Test handling of ValueError during parsing""" + # Mock parser to raise ValueError + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_parser.parse_file.side_effect = ValueError("Invalid Cucumber JSON structure") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "error parsing" in result.output.lower() diff --git a/tests/test_cmd_parse_gherkin.py b/tests/test_cmd_parse_gherkin.py new file mode 100644 index 0000000..11a30e5 --- /dev/null +++ b/tests/test_cmd_parse_gherkin.py @@ -0,0 +1,189 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch, mock_open +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_parse_gherkin +from trcli.readers.gherkin_parser import GherkinParser + + +class TestCmdParseGherkin: + """Test class for parse_gherkin command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.test_feature_path = str(Path(__file__).parent / "test_data" / "FEATURE" / "sample_login.feature") + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_success_stdout(self): + """Test successful parsing with output to stdout""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path]) + + assert result.exit_code == 0 + # Output contains logging messages + JSON, extract JSON (starts with '{') + json_start = result.output.find("{") + assert json_start >= 0, "No JSON found in output" + json_str = result.output[json_start:] + output_data = json.loads(json_str) + assert "suites" in output_data + assert "summary" in output_data + assert output_data["summary"]["total_suites"] >= 1 + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_success_with_output_file(self): + """Test successful parsing with output to file""" + with self.runner.isolated_filesystem(): + output_file = "parsed_output.json" + result = self.runner.invoke( + cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--output", output_file] + ) + + assert result.exit_code == 0 + assert "parsed results saved to" in result.output.lower() + + # Verify file was created + with open(output_file, "r") as f: + output_data = json.load(f) + assert "suites" in output_data + assert "summary" in output_data + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_pretty_print(self): + """Test parsing with pretty print formatting""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--pretty"]) + + assert result.exit_code == 0 + # Extract JSON from output + json_start = result.output.find("{") + json_str = result.output[json_start:] + output_data = json.loads(json_str) + assert "suites" in output_data + # Check that JSON portion contains newlines and indentation (pretty format) + assert "\n" in json_str + assert " " in json_str # Indentation + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_custom_suite_name(self): + """Test parsing with custom suite name""" + custom_suite_name = "My Custom Suite" + result = self.runner.invoke( + cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--suite-name", custom_suite_name] + ) + + assert result.exit_code == 0 + json_start = result.output.find("{") + output_data = json.loads(result.output[json_start:]) + assert output_data["suites"][0]["name"] == custom_suite_name + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_case_matcher_name(self): + """Test parsing with NAME case matcher""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--case-matcher", "name"]) + + assert result.exit_code == 0 + json_start = result.output.find("{") + output_data = json.loads(result.output[json_start:]) + assert "suites" in output_data + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_case_matcher_property(self): + """Test parsing with PROPERTY case matcher""" + result = self.runner.invoke( + cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--case-matcher", "property"] + ) + + assert result.exit_code == 0 + json_start = result.output.find("{") + output_data = json.loads(result.output[json_start:]) + assert "suites" in output_data + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_verbose_logging(self): + """Test parsing with verbose logging enabled""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--verbose"]) + + assert result.exit_code == 0 + # Extract JSON from output (may have verbose logs before and after) + json_start = result.output.find("{") + json_end = result.output.rfind("}") + 1 # Find last closing brace + json_str = result.output[json_start:json_end] + output_data = json.loads(json_str) + assert "suites" in output_data + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_missing_file(self): + """Test parsing with non-existent file""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", "/nonexistent/file.feature"]) + + # Click returns exit code 2 for invalid parameter (file doesn't exist) + assert result.exit_code in [1, 2] # Either our error handling or Click's + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_invalid_feature_file(self): + """Test parsing with invalid Gherkin syntax""" + with self.runner.isolated_filesystem(): + # Create invalid feature file + invalid_file = "invalid.feature" + with open(invalid_file, "w") as f: + f.write("This is not valid Gherkin syntax at all!!!") + + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", invalid_file]) + + assert result.exit_code == 1 + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_required_file_parameter(self): + """Test that --file parameter is required""" + result = self.runner.invoke(cmd_parse_gherkin.cli, []) + + assert result.exit_code == 2 # Click returns 2 for missing required params + assert "Missing option" in result.output or "required" in result.output.lower() + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_output_structure(self): + """Test that output has correct structure""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path]) + + assert result.exit_code == 0 + json_start = result.output.find("{") + output_data = json.loads(result.output[json_start:]) + + # Verify top-level structure + assert "suites" in output_data + assert "summary" in output_data + + # Verify summary structure + summary = output_data["summary"] + assert "total_suites" in summary + assert "total_sections" in summary + assert "total_cases" in summary + assert "source_file" in summary + + # Verify suites structure + if output_data["suites"]: + suite = output_data["suites"][0] + assert "name" in suite + assert "source" in suite + assert "testsections" in suite + + if suite["testsections"]: + section = suite["testsections"][0] + assert "name" in section + assert "testcases" in section + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_empty_file(self): + """Test parsing with empty feature file""" + with self.runner.isolated_filesystem(): + empty_file = "empty.feature" + with open(empty_file, "w") as f: + f.write("") + + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", empty_file]) + + # Should fail with parsing error + assert result.exit_code == 1 diff --git a/tests/test_cucumber_parser.py b/tests/test_cucumber_parser.py new file mode 100644 index 0000000..03b3388 --- /dev/null +++ b/tests/test_cucumber_parser.py @@ -0,0 +1,256 @@ +import pytest +from pathlib import Path +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser +from trcli.readers.cucumber_json import CucumberParser + + +class TestCucumberParser: + """Tests for Cucumber JSON parser""" + + @pytest.fixture + def sample_cucumber_path(self): + """Path to the sample Cucumber JSON file""" + return Path(__file__).parent / "test_data" / "CUCUMBER" / "sample_cucumber.json" + + @pytest.fixture + def environment(self, sample_cucumber_path): + """Create a test environment""" + env = Environment() + env.file = str(sample_cucumber_path) + env.case_matcher = MatchersParser.AUTO + env.suite_name = None + env.verbose = False + return env + + @pytest.mark.parse_cucumber + def test_cucumber_parser_basic(self, environment, sample_cucumber_path): + """Test basic Cucumber JSON parsing""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + assert len(suites) == 1 + suite = suites[0] + + # Check suite structure + assert suite.name == "Cucumber Test Results" + assert len(suite.testsections) == 1 + + # Check section + section = suite.testsections[0] + assert section.name == "User Login" + assert len(section.testcases) == 2 + + @pytest.mark.parse_cucumber + def test_cucumber_parser_scenarios(self, environment): + """Test that scenarios are parsed correctly""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + cases = section.testcases + + # First scenario - passed + case1 = cases[0] + assert "Successful login" in case1.title + assert case1.result.status_id == 1 # Passed + assert len(case1.result.custom_step_results) == 5 + + # Second scenario - failed + case2 = cases[1] + assert "Failed login" in case2.title + assert case2.result.status_id == 5 # Failed + assert len(case2.result.custom_step_results) == 5 + + @pytest.mark.parse_cucumber + def test_cucumber_parser_steps(self, environment): + """Test that steps are parsed with correct status""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check steps + steps = case1.result.custom_step_results + assert all(step.status_id == 1 for step in steps) # All passed + + # Check step content + assert "Given" in steps[0].content + assert "I am on the login page" in steps[0].content + + @pytest.mark.parse_cucumber + def test_cucumber_parser_automation_id(self, environment): + """Test automation ID generation""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check automation ID includes feature name, tags, and scenario name + assert case1.custom_automation_id is not None + assert "User Login" in case1.custom_automation_id + assert "@positive" in case1.custom_automation_id + + @pytest.mark.parse_cucumber + def test_cucumber_parser_tags(self, environment): + """Test that tags are extracted correctly""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check tags in case_fields + assert "tags" in case1.case_fields + tags_str = case1.case_fields["tags"] + assert "@smoke" in tags_str + assert "@authentication" in tags_str + assert "@positive" in tags_str + + @pytest.mark.parse_cucumber + def test_cucumber_generate_feature_file(self, environment): + """Test .feature file generation""" + parser = CucumberParser(environment) + feature_content = parser.generate_feature_file() + + assert feature_content + assert "Feature: User Login" in feature_content + assert "Scenario: Successful login" in feature_content + assert "Scenario: Failed login" in feature_content + assert "Given I am on the login page" in feature_content + assert "@smoke" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_parser_elapsed_time(self, environment): + """Test elapsed time calculation""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check elapsed time is calculated (may be None if very short duration) + # The proper_format_for_elapsed in TestRailResult may strip very small values + if case1.result.elapsed is not None: + assert case1.result.elapsed.endswith("s") + + @pytest.fixture + def advanced_cucumber_path(self): + """Path to the advanced Cucumber JSON file with Background, Examples, and Rules""" + return Path(__file__).parent / "test_data" / "CUCUMBER" / "sample_cucumber_advanced.json" + + @pytest.fixture + def advanced_environment(self, advanced_cucumber_path): + """Create a test environment for advanced features""" + env = Environment() + env.file = str(advanced_cucumber_path) + env.case_matcher = MatchersParser.AUTO + env.suite_name = None + env.verbose = False + return env + + @pytest.mark.parse_cucumber + def test_cucumber_generate_background(self, advanced_environment): + """Test Background element generation in .feature file""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + assert "Background: User is logged in" in feature_content + assert "Given I am logged in as a customer" in feature_content + assert "And my shopping cart is empty" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_generate_scenario_outline_with_examples(self, advanced_environment): + """Test Scenario Outline with Examples table generation""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Check Scenario Outline + assert "Scenario Outline: Add items to cart" in feature_content + + # Check Examples section + assert "Examples:" in feature_content + assert "| quantity | product | price |" in feature_content + assert "| 1 | Laptop | $1000 |" in feature_content + assert "| 2 | Mouse | $40 |" in feature_content + assert "| 3 | Keyboard | $150 |" in feature_content + + # Check Examples tags + assert "@products" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_generate_rule_with_nested_elements(self, advanced_environment): + """Test Rule element with nested Background and Scenario""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Check Rule + assert "Rule: Payment validation" in feature_content + assert "@validation" in feature_content + + # Check nested Background under Rule + assert "Background: Setup payment environment" in feature_content + assert "Given the payment gateway is available" in feature_content + + # Check nested Scenario under Rule + assert "Scenario: Valid credit card payment" in feature_content + assert "When I pay with a valid credit card" in feature_content + assert "Then the payment should be approved" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_advanced_feature_structure(self, advanced_environment): + """Test complete feature structure with all elements""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Check feature tags and name + assert "@shopping" in feature_content + assert "@cart" in feature_content + assert "Feature: Shopping Cart" in feature_content + + # Check feature description + assert "As a customer" in feature_content + assert "I want to manage my shopping cart" in feature_content + + # Verify proper ordering: Background before Scenarios + background_pos = feature_content.find("Background:") + scenario_outline_pos = feature_content.find("Scenario Outline:") + assert background_pos < scenario_outline_pos, "Background should appear before Scenario Outline" + + @pytest.mark.parse_cucumber + def test_cucumber_multiple_features_in_output(self, advanced_environment): + """Test that multiple features are separated correctly""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Should have both features + assert "Feature: Shopping Cart" in feature_content + assert "Feature: Payment Processing" in feature_content + + # Features should be separated by double newline + features = feature_content.split("\n\n") + # Should have at least 2 distinct feature sections + feature_count = feature_content.count("Feature:") + assert feature_count == 2, "Should have exactly 2 features" + + @pytest.mark.parse_cucumber + def test_cucumber_indentation_in_generated_feature(self, advanced_environment): + """Test proper indentation in generated .feature file""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + lines = feature_content.split("\n") + + # Background should be indented with 2 spaces + background_lines = [l for l in lines if "Background:" in l] + assert any(l.startswith(" Background:") for l in background_lines) + + # Steps should be indented with 4 spaces + given_lines = [l for l in lines if l.strip().startswith("Given")] + assert any(l.startswith(" Given") for l in given_lines) + + # Examples should be indented with 4 spaces + examples_lines = [l for l in lines if "Examples:" in l] + assert any(l.startswith(" Examples:") for l in examples_lines) diff --git a/tests/test_data/CUCUMBER/sample_cucumber.json b/tests/test_data/CUCUMBER/sample_cucumber.json new file mode 100644 index 0000000..b1863d2 --- /dev/null +++ b/tests/test_data/CUCUMBER/sample_cucumber.json @@ -0,0 +1,175 @@ +[ + { + "uri": "features/login.feature", + "id": "user-login", + "keyword": "Feature", + "name": "User Login", + "description": " As a user\n I want to log into the application\n So that I can access my account", + "line": 1, + "tags": [ + { + "name": "@smoke", + "line": 1 + }, + { + "name": "@authentication", + "line": 1 + } + ], + "elements": [ + { + "id": "user-login;successful-login-with-valid-credentials", + "keyword": "Scenario", + "name": "Successful login with valid credentials", + "description": "", + "line": 7, + "type": "scenario", + "tags": [ + { + "name": "@positive", + "line": 6 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 8, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter valid username \"testuser\"", + "line": 9, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter valid password \"password123\"", + "line": 10, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 11, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should be redirected to the dashboard", + "line": 12, + "match": { + "location": "step_definitions/login_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 543210987 + } + } + ] + }, + { + "id": "user-login;failed-login-with-invalid-credentials", + "keyword": "Scenario", + "name": "Failed login with invalid credentials", + "description": "", + "line": 15, + "type": "scenario", + "tags": [ + { + "name": "@negative", + "line": 14 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 16, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter invalid username \"baduser\"", + "line": 17, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter invalid password \"wrongpass\"", + "line": 18, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 19, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should see an error message \"Invalid credentials\"", + "line": 20, + "match": { + "location": "step_definitions/login_steps.js:35" + }, + "result": { + "status": "failed", + "duration": 543210987, + "error_message": "AssertionError: expected 'Please try again' to equal 'Invalid credentials'" + } + } + ] + } + ] + } +] diff --git a/tests/test_data/CUCUMBER/sample_cucumber_advanced.json b/tests/test_data/CUCUMBER/sample_cucumber_advanced.json new file mode 100644 index 0000000..19ac15a --- /dev/null +++ b/tests/test_data/CUCUMBER/sample_cucumber_advanced.json @@ -0,0 +1,234 @@ +[ + { + "uri": "features/shopping_cart.feature", + "id": "shopping-cart", + "keyword": "Feature", + "name": "Shopping Cart", + "description": " As a customer\n I want to manage my shopping cart\n So that I can purchase items", + "line": 1, + "tags": [ + { + "name": "@shopping", + "line": 1 + }, + { + "name": "@cart", + "line": 1 + } + ], + "elements": [ + { + "id": "shopping-cart;background", + "keyword": "Background", + "name": "User is logged in", + "description": "", + "line": 5, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "I am logged in as a customer", + "line": 6, + "match": { + "location": "step_definitions/auth_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "my shopping cart is empty", + "line": 7, + "match": { + "location": "step_definitions/cart_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ] + }, + { + "id": "shopping-cart;add-items-to-cart", + "keyword": "Scenario Outline", + "name": "Add items to cart", + "description": "", + "line": 10, + "type": "scenario_outline", + "tags": [ + { + "name": "@positive", + "line": 9 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I add \"\" of \"\" to my cart", + "line": 11, + "match": { + "location": "step_definitions/cart_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 2000000000 + } + }, + { + "keyword": "Then ", + "name": "my cart should contain \"\" items", + "line": 12, + "match": { + "location": "step_definitions/cart_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "the total price should be \"\"", + "line": 13, + "match": { + "location": "step_definitions/cart_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ], + "examples": [ + { + "keyword": "Examples", + "name": "Valid products", + "description": "", + "line": 15, + "tags": [ + { + "name": "@products", + "line": 14 + } + ], + "rows": [ + { + "cells": ["quantity", "product", "price"], + "line": 16 + }, + { + "cells": ["1", "Laptop", "$1000"], + "line": 17 + }, + { + "cells": ["2", "Mouse", "$40"], + "line": 18 + }, + { + "cells": ["3", "Keyboard", "$150"], + "line": 19 + } + ] + } + ] + } + ] + }, + { + "uri": "features/payment.feature", + "id": "payment-processing", + "keyword": "Feature", + "name": "Payment Processing", + "description": " Customers can pay using various methods", + "line": 1, + "tags": [ + { + "name": "@payment", + "line": 1 + } + ], + "elements": [ + { + "id": "payment-processing;payment-validation", + "keyword": "Rule", + "name": "Payment validation", + "description": " All payments must be validated before processing", + "line": 5, + "type": "rule", + "tags": [ + { + "name": "@validation", + "line": 4 + } + ], + "children": [ + { + "id": "payment-processing;payment-validation;background", + "keyword": "Background", + "name": "Setup payment environment", + "description": "", + "line": 8, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "the payment gateway is available", + "line": 9, + "match": { + "location": "step_definitions/payment_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 1500000000 + } + } + ] + }, + { + "id": "payment-processing;payment-validation;valid-credit-card", + "keyword": "Scenario", + "name": "Valid credit card payment", + "description": "", + "line": 11, + "type": "scenario", + "tags": [ + { + "name": "@credit-card", + "line": 10 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I pay with a valid credit card", + "line": 12, + "match": { + "location": "step_definitions/payment_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 3000000000 + } + }, + { + "keyword": "Then ", + "name": "the payment should be approved", + "line": 13, + "match": { + "location": "step_definitions/payment_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + } + ] + } + ] + } + ] + } +] diff --git a/tests/test_data/FEATURE/sample_bdd.feature b/tests/test_data/FEATURE/sample_bdd.feature new file mode 100644 index 0000000..945200a --- /dev/null +++ b/tests/test_data/FEATURE/sample_bdd.feature @@ -0,0 +1,23 @@ +@smoke @authentication +Feature: User Authentication + As a user + I want to authenticate securely + So that I can access the system + + @positive @login + Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid username "testuser" + And I enter valid password "password123" + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message + + @negative @login + Scenario: Failed login with invalid password + Given I am on the login page + When I enter valid username "testuser" + And I enter invalid password "wrongpass" + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index a346dd7..cf6f59b 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -65,7 +65,9 @@ trcli_description = ( "Supported and loaded modules:\n" " - parse_junit: JUnit XML Files (& Similar)\n" - " - parse_gherkin: Gherkin .feature files (BDD)\n" + " - parse_cucumber: Cucumber JSON results (BDD)\n" + " - import_gherkin: Upload .feature files to TestRail BDD\n" + " - export_gherkin: Export BDD test cases as .feature files\n" " - parse_robot: Robot Framework XML Files\n" " - parse_openapi: OpenAPI YML Files\n" " - add_run: Create a new test run\n" diff --git a/tests_e2e/reports_cucumber/sample_cucumber.json b/tests_e2e/reports_cucumber/sample_cucumber.json new file mode 100644 index 0000000..b1863d2 --- /dev/null +++ b/tests_e2e/reports_cucumber/sample_cucumber.json @@ -0,0 +1,175 @@ +[ + { + "uri": "features/login.feature", + "id": "user-login", + "keyword": "Feature", + "name": "User Login", + "description": " As a user\n I want to log into the application\n So that I can access my account", + "line": 1, + "tags": [ + { + "name": "@smoke", + "line": 1 + }, + { + "name": "@authentication", + "line": 1 + } + ], + "elements": [ + { + "id": "user-login;successful-login-with-valid-credentials", + "keyword": "Scenario", + "name": "Successful login with valid credentials", + "description": "", + "line": 7, + "type": "scenario", + "tags": [ + { + "name": "@positive", + "line": 6 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 8, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter valid username \"testuser\"", + "line": 9, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter valid password \"password123\"", + "line": 10, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 11, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should be redirected to the dashboard", + "line": 12, + "match": { + "location": "step_definitions/login_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 543210987 + } + } + ] + }, + { + "id": "user-login;failed-login-with-invalid-credentials", + "keyword": "Scenario", + "name": "Failed login with invalid credentials", + "description": "", + "line": 15, + "type": "scenario", + "tags": [ + { + "name": "@negative", + "line": 14 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 16, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter invalid username \"baduser\"", + "line": 17, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter invalid password \"wrongpass\"", + "line": 18, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 19, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should see an error message \"Invalid credentials\"", + "line": 20, + "match": { + "location": "step_definitions/login_steps.js:35" + }, + "result": { + "status": "failed", + "duration": 543210987, + "error_message": "AssertionError: expected 'Please try again' to equal 'Invalid credentials'" + } + } + ] + } + ] + } +] diff --git a/tests_e2e/reports_cucumber/sample_cucumber_advanced.json b/tests_e2e/reports_cucumber/sample_cucumber_advanced.json new file mode 100644 index 0000000..19ac15a --- /dev/null +++ b/tests_e2e/reports_cucumber/sample_cucumber_advanced.json @@ -0,0 +1,234 @@ +[ + { + "uri": "features/shopping_cart.feature", + "id": "shopping-cart", + "keyword": "Feature", + "name": "Shopping Cart", + "description": " As a customer\n I want to manage my shopping cart\n So that I can purchase items", + "line": 1, + "tags": [ + { + "name": "@shopping", + "line": 1 + }, + { + "name": "@cart", + "line": 1 + } + ], + "elements": [ + { + "id": "shopping-cart;background", + "keyword": "Background", + "name": "User is logged in", + "description": "", + "line": 5, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "I am logged in as a customer", + "line": 6, + "match": { + "location": "step_definitions/auth_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "my shopping cart is empty", + "line": 7, + "match": { + "location": "step_definitions/cart_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ] + }, + { + "id": "shopping-cart;add-items-to-cart", + "keyword": "Scenario Outline", + "name": "Add items to cart", + "description": "", + "line": 10, + "type": "scenario_outline", + "tags": [ + { + "name": "@positive", + "line": 9 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I add \"\" of \"\" to my cart", + "line": 11, + "match": { + "location": "step_definitions/cart_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 2000000000 + } + }, + { + "keyword": "Then ", + "name": "my cart should contain \"\" items", + "line": 12, + "match": { + "location": "step_definitions/cart_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "the total price should be \"\"", + "line": 13, + "match": { + "location": "step_definitions/cart_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ], + "examples": [ + { + "keyword": "Examples", + "name": "Valid products", + "description": "", + "line": 15, + "tags": [ + { + "name": "@products", + "line": 14 + } + ], + "rows": [ + { + "cells": ["quantity", "product", "price"], + "line": 16 + }, + { + "cells": ["1", "Laptop", "$1000"], + "line": 17 + }, + { + "cells": ["2", "Mouse", "$40"], + "line": 18 + }, + { + "cells": ["3", "Keyboard", "$150"], + "line": 19 + } + ] + } + ] + } + ] + }, + { + "uri": "features/payment.feature", + "id": "payment-processing", + "keyword": "Feature", + "name": "Payment Processing", + "description": " Customers can pay using various methods", + "line": 1, + "tags": [ + { + "name": "@payment", + "line": 1 + } + ], + "elements": [ + { + "id": "payment-processing;payment-validation", + "keyword": "Rule", + "name": "Payment validation", + "description": " All payments must be validated before processing", + "line": 5, + "type": "rule", + "tags": [ + { + "name": "@validation", + "line": 4 + } + ], + "children": [ + { + "id": "payment-processing;payment-validation;background", + "keyword": "Background", + "name": "Setup payment environment", + "description": "", + "line": 8, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "the payment gateway is available", + "line": 9, + "match": { + "location": "step_definitions/payment_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 1500000000 + } + } + ] + }, + { + "id": "payment-processing;payment-validation;valid-credit-card", + "keyword": "Scenario", + "name": "Valid credit card payment", + "description": "", + "line": 11, + "type": "scenario", + "tags": [ + { + "name": "@credit-card", + "line": 10 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I pay with a valid credit card", + "line": 12, + "match": { + "location": "step_definitions/payment_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 3000000000 + } + }, + { + "keyword": "Then ", + "name": "the payment should be approved", + "line": 13, + "match": { + "location": "step_definitions/payment_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + } + ] + } + ] + } + ] + } +] diff --git a/tests_e2e/reports_gherkin/sample_bdd.feature b/tests_e2e/reports_gherkin/sample_bdd.feature new file mode 100644 index 0000000..945200a --- /dev/null +++ b/tests_e2e/reports_gherkin/sample_bdd.feature @@ -0,0 +1,23 @@ +@smoke @authentication +Feature: User Authentication + As a user + I want to authenticate securely + So that I can access the system + + @positive @login + Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid username "testuser" + And I enter valid password "password123" + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message + + @negative @login + Scenario: Failed login with invalid password + Given I am on the login page + When I enter valid username "testuser" + And I enter invalid password "wrongpass" + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page diff --git a/tests_e2e/reports_gherkin/sample_login.feature b/tests_e2e/reports_gherkin/sample_login.feature new file mode 100644 index 0000000..e0287b4 --- /dev/null +++ b/tests_e2e/reports_gherkin/sample_login.feature @@ -0,0 +1,41 @@ +Feature: User Login + As a registered user + I want to log in to the application + So that I can access my account + + Background: + Given the application is running + And I am on the login page + + @smoke @authentication + Scenario: Successful login with valid credentials + Given I have a valid username "testuser" + And I have a valid password "password123" + When I enter my credentials + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message "Welcome, testuser" + + @negative @authentication + Scenario: Failed login with invalid password + Given I have a valid username "testuser" + And I have an invalid password "wrongpassword" + When I enter my credentials + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page + + @edge-case + Scenario Outline: Login attempts with various credentials + Given I have username "" + And I have password "" + When I enter my credentials + And I click the login button + Then I should see result "" + + Examples: + | username | password | result | + | admin | admin123 | Dashboard | + | testuser | test123 | Dashboard | + | invalid | invalid123 | Invalid credentials | + | empty | | Password required | diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 2879972..93ec0f7 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -26,7 +26,7 @@ def _run_cmd(multiline_cmd: str): process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE) with process.stdout: output = "" - for line in iter(process.stdout.readline, b''): + for line in iter(process.stdout.readline, b""): output += line.decode() print(output) process.wait() @@ -57,7 +57,7 @@ def _run_cmd_allow_failure(multiline_cmd: str): process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) with process.stdout: output = "" - for line in iter(process.stdout.readline, b''): + for line in iter(process.stdout.readline, b""): output += line.decode() print(output) process.wait() @@ -69,53 +69,58 @@ class TestsEndToEnd: # TestRail 101 instance has the required configuration for this test run TR_INSTANCE = "https://testrail101.testrail.io/" # Uncomment and enter your credentials below in order to execute the tests locally - #os.environ.setdefault("TR_CLI_USERNAME", "") - #os.environ.setdefault("TR_CLI_PASSWORD", "") + # os.environ.setdefault("TR_CLI_USERNAME", "") + # os.environ.setdefault("TR_CLI_PASSWORD", "") @pytest.fixture(autouse=True, scope="module") def install_trcli(self): _run_cmd("cd .. && pip install .") def test_cli_robot_report_RF50(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_robot \\ --title "[CLI-E2E-Tests] ROBOT FRAMEWORK PARSER" \\ -f "reports_robot/simple_report_RF50.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) def test_cli_robot_report_RF70(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_robot \\ --title "[CLI-E2E-Tests] ROBOT FRAMEWORK PARSER" \\ -f "reports_robot/simple_report_RF50.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) def test_cli_plan_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -123,19 +128,21 @@ def test_cli_plan_id(self): --plan-id 1578 \\ --title "[CLI-E2E-Tests] With Plan ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_plan_id_and_config_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -144,19 +151,21 @@ def test_cli_plan_id_and_config_id(self): --config-ids 142,143 \\ --title "[CLI-E2E-Tests] With Plan ID and Config ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_update_run_in_plan(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -164,19 +173,21 @@ def test_cli_update_run_in_plan(self): --run-id 1550 \\ --title "[CLI-E2E-Tests] Update Run in Plan" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) - + def test_cli_update_run_in_plan_with_configs(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -184,38 +195,42 @@ def test_cli_update_run_in_plan_with_configs(self): --run-id 1551 \\ --title "[CLI-E2E-Tests] Update Run in Plan with Configs" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Matcher: AUTO" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto_update_run(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -224,38 +239,42 @@ def test_cli_matchers_auto_update_run(self): --run-id "1568" \\ --milestone-id "107" \\ -f "reports_junit/generic_ids_auto_plus_one.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto_multiple_files(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Matcher: AUTO with multiple files" \\ -f "reports_junit/junit_multiple_parts_*" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [MULTIPART-REPORT-2]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 4 test results in" - ] + "Submitted 4 test results in", + ], ) - + def test_cli_matchers_name(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -n \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -263,7 +282,8 @@ def test_cli_matchers_name(self): --title "[CLI-E2E-Tests] Matcher: NAME" \\ --case-matcher "NAME" \\ -f "reports_junit/generic_ids_name.xml" - """) + """ + ) _assert_contains( output, [ @@ -271,12 +291,13 @@ def test_cli_matchers_name(self): "Found 3 test cases without case ID in the report file.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def test_cli_matchers_property(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -n \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -284,7 +305,8 @@ def test_cli_matchers_property(self): --title "[CLI-E2E-Tests] Matcher: PROPERTY" \\ --case-matcher "PROPERTY" \\ -f "reports_junit/generic_ids_property.xml" - """) + """ + ) _assert_contains( output, [ @@ -292,30 +314,34 @@ def test_cli_matchers_property(self): "Found 3 test cases without case ID in the report file.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def test_cli_attachments(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Attachments test" \\ -f "reports_junit/attachments.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [ATTACHMENTS]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 4 attachments for 2 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) + def test_cli_multisuite_with_suite_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ @@ -323,7 +349,8 @@ def test_cli_multisuite_with_suite_id(self): --title "[CLI-E2E-Tests] Multisuite with suite id" \\ --suite-id 128 \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -333,12 +360,13 @@ def test_cli_multisuite_with_suite_id(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) def test_cli_multisuite_with_suite_name(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ @@ -346,7 +374,8 @@ def test_cli_multisuite_with_suite_name(self): --suite-name "My suite" \\ --title "[CLI-E2E-Tests] Multisuite without suite id" \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -356,19 +385,21 @@ def test_cli_multisuite_with_suite_name(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) def test_cli_multisuite_without_suite_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ parse_junit \\ --title "[CLI-E2E-Tests] Multisuite without suite id" \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -378,12 +409,13 @@ def test_cli_multisuite_without_suite_id(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) - + def test_cli_saucelabs(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -391,7 +423,8 @@ def test_cli_saucelabs(self): --title "[CLI-E2E-Tests] saucectl parser" \\ --special-parser "saucectl" \\ -f "reports_junit/saucelabs.xml" - """) + """ + ) _assert_contains( output, [ @@ -399,109 +432,114 @@ def test_cli_saucelabs(self): "Processing JUnit suite - Firefox", "Processing JUnit suite - Chrome", "Processed 1 test cases in section [SAUCELABS]", - f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view" - ] + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + ], ) - + def test_cli_openapi(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_openapi \\ -f "openapi_specs/openapi.yml" - """) - _assert_contains( - output, - [ - "Processed 22 test cases based on possible responses.", - "Submitted 22 test cases" - ] + """ ) + _assert_contains(output, ["Processed 22 test cases based on possible responses.", "Submitted 22 test cases"]) def test_cli_add_run(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Create run_config.yml" \\ -f "run_config.yml" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", "title: [CLI-E2E-Tests] ADD RUN TEST: Create run_config.yml", - "Writing test run data to file (run_config.yml). Done." - ] + "Writing test run data to file (run_config.yml). Done.", + ], ) - + def test_cli_add_run_include_all(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run --run-include-all\\ --title "[CLI-E2E-Tests] ADD RUN TEST: Include All Cases" \\ -f "run_config.yml" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", "title: [CLI-E2E-Tests] ADD RUN TEST: Include All Cases", - "Writing test run data to file (run_config.yml). Done." - ] + "Writing test run data to file (run_config.yml). Done.", + ], ) def test_cli_add_run_upload_results(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ -c run_config.yml \\ parse_junit \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results" - ] + "Submitted 6 test results", + ], ) - + def test_cli_add_run_and_plan_with_due_date(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run --run-include-all \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date" \\ --run-start-date "03/01/2030" --run-end-date "03/12/2030" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", - "title: [CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date" - ] + "title: [CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date", + ], ) def test_cli_add_run_refs_with_references(self): """Test creating a run with references""" import random import string - + # Generate random suffix to avoid conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - - output = _run_cmd(f""" + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -509,7 +547,8 @@ def test_cli_add_run_refs_with_references(self): --title "[CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}" \\ --run-refs "JIRA-100,JIRA-200,REQ-{random_suffix}" \\ -f "run_config_refs.yml" - """) + """ + ) _assert_contains( output, [ @@ -517,32 +556,32 @@ def test_cli_add_run_refs_with_references(self): f"Test run: {self.TR_INSTANCE}index.php?/runs/view", f"title: [CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}", f"Refs: JIRA-100,JIRA-200,REQ-{random_suffix}", - "Writing test run data to file (run_config_refs.yml). Done." - ] + "Writing test run data to file (run_config_refs.yml). Done.", + ], ) def test_cli_add_run_refs_validation_error(self): """Test references validation (too long)""" long_refs = "A" * 251 # Exceeds 250 character limit - - output, return_code = _run_cmd_allow_failure(f""" + + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Too Long" \\ --run-refs "{long_refs}" - """) - - assert return_code != 0 - _assert_contains( - output, - ["Error: References field cannot exceed 250 characters."] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: References field cannot exceed 250 characters."]) + def test_cli_add_run_refs_update_action_validation(self): """Test that update/delete actions require run_id""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -550,12 +589,15 @@ def test_cli_add_run_refs_update_action_validation(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Invalid Action" \\ --run-refs "JIRA-123" \\ --run-refs-action "update" - """) - + """ + ) + assert return_code != 0 _assert_contains( output, - ["Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)."] + [ + "Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)." + ], ) def test_cli_add_run_refs_update_workflow(self): @@ -563,12 +605,13 @@ def test_cli_add_run_refs_update_workflow(self): import random import string import re - + # Generate random suffix to avoid conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Step 1: Create a run with initial references - create_output = _run_cmd(f""" + create_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -576,24 +619,19 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "JIRA-100,JIRA-200" \\ -f "run_config_workflow.yml" - """) - + """ + ) + # Extract run ID from output - run_id_match = re.search(r'run_id: (\d+)', create_output) + run_id_match = re.search(r"run_id: (\d+)", create_output) assert run_id_match, "Could not extract run ID from output" run_id = run_id_match.group(1) - - _assert_contains( - create_output, - [ - "Creating test run.", - f"run_id: {run_id}", - "Refs: JIRA-100,JIRA-200" - ] - ) - + + _assert_contains(create_output, ["Creating test run.", f"run_id: {run_id}", "Refs: JIRA-100,JIRA-200"]) + # Step 2: Add more references to the existing run - add_output = _run_cmd(f""" + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -602,19 +640,14 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "JIRA-300,REQ-{random_suffix}" \\ --run-refs-action "add" - """) - - _assert_contains( - add_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs Action: add" - ] + """ ) - + + _assert_contains(add_output, ["Updating test run.", f"run_id: {run_id}", "Refs Action: add"]) + # Step 3: Update (replace) all references - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -623,20 +656,16 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "NEW-100,NEW-200" \\ --run-refs-action "update" - """) - + """ + ) + _assert_contains( - update_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs: NEW-100,NEW-200", - "Refs Action: update" - ] + update_output, ["Updating test run.", f"run_id: {run_id}", "Refs: NEW-100,NEW-200", "Refs Action: update"] ) - + # Step 4: Delete specific references - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -645,19 +674,14 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "NEW-100" \\ --run-refs-action "delete" - """) - - _assert_contains( - delete_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs Action: delete" - ] + """ ) - + + _assert_contains(delete_output, ["Updating test run.", f"run_id: {run_id}", "Refs Action: delete"]) + # Step 5: Delete all references - delete_all_output = _run_cmd(f""" + delete_all_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -665,21 +689,16 @@ def test_cli_add_run_refs_update_workflow(self): --run-id {run_id} \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs-action "delete" - """) - - _assert_contains( - delete_all_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs: ", - "Refs Action: delete" - ] + """ ) + _assert_contains( + delete_all_output, ["Updating test run.", f"run_id: {run_id}", "Refs: ", "Refs Action: delete"] + ) def bug_test_cli_robot_description_bug(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -687,18 +706,20 @@ def bug_test_cli_robot_description_bug(self): --title "[CLI-E2E-Tests] RUN DESCRIPTION BUG" \\ -f "reports_robot/simple_report_RF50.xml" \\ --run-id 2332 - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def bug_test_automation_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -706,225 +727,221 @@ def bug_test_automation_id(self): parse_junit \\ --title "(DO NOT DELETE) [CLI-E2E-Tests] Test updated Automation ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results" - ] + "Submitted 6 test results", + ], ) def test_labels_full_workflow(self): """Test complete labels workflow: add, list, get, update, delete""" - + # Generate random suffix to avoid conflicts with existing labels import random import string - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) label_title = f"e2e-{random_suffix}" assert len(label_title) <= 20, f"Label title '{label_title}' exceeds 20 characters" - + # Step 1: Add a new label - add_output = _run_cmd(f""" + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label_title}" - """) + """ + ) _assert_contains( - add_output, - [ - f"Adding label '{label_title}'...", - "Successfully added label: ID=", - f"Title='{label_title}'" - ] + add_output, [f"Adding label '{label_title}'...", "Successfully added label: ID=", f"Title='{label_title}'"] ) - + # Extract label ID from the add output import re + label_id_match = re.search(r"ID=(\d+)", add_output) assert label_id_match, f"Could not find label ID in output: {add_output}" label_id = label_id_match.group(1) print(f"Created label with ID: {label_id}") - + # Step 2: List labels to verify it exists - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) - _assert_contains( - list_output, - [ - "Retrieving labels...", - "Found", - f"ID: {label_id}, Title: '{label_title}'" - ] + """ ) - + _assert_contains(list_output, ["Retrieving labels...", "Found", f"ID: {label_id}, Title: '{label_title}'"]) + # Step 3: Get the specific label - get_output = _run_cmd(f""" + get_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id {label_id} - """) + """ + ) _assert_contains( get_output, - [ - f"Retrieving label with ID {label_id}...", - "Label details:", - f"ID: {label_id}", - f"Title: '{label_title}'" - ] + [f"Retrieving label with ID {label_id}...", "Label details:", f"ID: {label_id}", f"Title: '{label_title}'"], ) - + # Step 4: Update the label updated_title = f"upd-{random_suffix}" assert len(updated_title) <= 20, f"Updated title '{updated_title}' exceeds 20 characters" - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels update \\ --id {label_id} \\ --title "{updated_title}" - """) + """ + ) _assert_contains( update_output, [ f"Updating label with ID {label_id}...", f"Successfully updated label: ID={label_id}", - f"Title='{updated_title}'" - ] + f"Title='{updated_title}'", + ], ) - + # Step 5: Verify the update by getting the label again - get_updated_output = _run_cmd(f""" + get_updated_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id {label_id} - """) - _assert_contains( - get_updated_output, - [ - f"ID: {label_id}", - f"Title: '{updated_title}'" - ] + """ ) - + _assert_contains(get_updated_output, [f"ID: {label_id}", f"Title: '{updated_title}'"]) + # Step 6: Delete the label (with confirmation) - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) - _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + """ ) + _assert_contains(delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"]) def test_labels_add_multiple_and_delete_multiple(self): """Test adding multiple labels and deleting them in batch""" - + # Generate random suffix to avoid conflicts with existing labels import random import string - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Add first label label1_title = f"b1-{random_suffix}" assert len(label1_title) <= 20, f"Label1 title '{label1_title}' exceeds 20 characters" - add_output1 = _run_cmd(f""" + add_output1 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label1_title}" - """) - + """ + ) + # Add second label label2_title = f"b2-{random_suffix}" assert len(label2_title) <= 20, f"Label2 title '{label2_title}' exceeds 20 characters" - add_output2 = _run_cmd(f""" + add_output2 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label2_title}" - """) - + """ + ) + # Add third label label3_title = f"b3-{random_suffix}" assert len(label3_title) <= 20, f"Label3 title '{label3_title}' exceeds 20 characters" - add_output3 = _run_cmd(f""" + add_output3 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label3_title}" - """) - + """ + ) + # Extract all label IDs import re + label_id1 = re.search(r"ID=(\d+)", add_output1).group(1) label_id2 = re.search(r"ID=(\d+)", add_output2).group(1) label_id3 = re.search(r"ID=(\d+)", add_output3).group(1) - + label_ids = f"{label_id1},{label_id2},{label_id3}" print(f"Created labels with IDs: {label_ids}") - + # Verify all labels exist in list - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) + """ + ) _assert_contains( list_output, [ f"ID: {label_id1}, Title: '{label1_title}'", f"ID: {label_id2}, Title: '{label2_title}'", - f"ID: {label_id3}, Title: '{label3_title}'" - ] + f"ID: {label_id3}, Title: '{label3_title}'", + ], ) - + # Delete all labels in batch - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_ids} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_ids}...", - "Successfully deleted 3 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_ids}...", "Successfully deleted 3 label(s)"] ) def test_labels_help_commands(self): """Test labels help functionality""" - + # Test main labels help - main_help_output = _run_cmd(f""" + main_help_output = _run_cmd( + f""" trcli labels --help - """) + """ + ) _assert_contains( main_help_output, [ @@ -933,27 +950,26 @@ def test_labels_help_commands(self): "delete Delete labels from TestRail", "get Get a specific label by ID", "list List all labels in the project", - "update Update an existing label in TestRail" - ] + "update Update an existing label in TestRail", + ], ) - + # Test add command help - add_help_output = _run_cmd(f""" + add_help_output = _run_cmd( + f""" trcli labels add --help - """) + """ + ) _assert_contains( - add_help_output, - [ - "Add a new label in TestRail", - "--title", - "Title of the label to add (max 20 characters)" - ] + add_help_output, ["Add a new label in TestRail", "--title", "Title of the label to add (max 20 characters)"] ) - + # Test update command help - update_help_output = _run_cmd(f""" + update_help_output = _run_cmd( + f""" trcli labels update --help - """) + """ + ) _assert_contains( update_help_output, [ @@ -961,345 +977,313 @@ def test_labels_help_commands(self): "--id", "--title", "ID of the label to update", - "New title for the label (max 20 characters)" - ] + "New title for the label (max 20 characters)", + ], ) - + # Test delete command help - delete_help_output = _run_cmd(f""" + delete_help_output = _run_cmd( + f""" trcli labels delete --help - """) + """ + ) _assert_contains( - delete_help_output, - [ - "Delete labels from TestRail", - "--ids", - "Comma-separated list of label IDs to delete" - ] + delete_help_output, ["Delete labels from TestRail", "--ids", "Comma-separated list of label IDs to delete"] ) - + # Test list command help - list_help_output = _run_cmd(f""" + list_help_output = _run_cmd( + f""" trcli labels list --help - """) + """ + ) _assert_contains( list_help_output, - [ - "List all labels in the project", - "--offset", - "--limit", - "Offset for pagination", - "Limit for pagination" - ] - ) - + ["List all labels in the project", "--offset", "--limit", "Offset for pagination", "Limit for pagination"], + ) + # Test get command help - get_help_output = _run_cmd(f""" + get_help_output = _run_cmd( + f""" trcli labels get --help - """) - _assert_contains( - get_help_output, - [ - "Get a specific label by ID", - "--id", - "ID of the label to retrieve" - ] + """ ) + _assert_contains(get_help_output, ["Get a specific label by ID", "--id", "ID of the label to retrieve"]) def test_labels_pagination(self): """Test labels pagination functionality""" - + # Test basic list command - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) - _assert_contains( - list_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) - + _assert_contains(list_output, ["Retrieving labels...", "Found"]) + # Test pagination with limit - paginated_output = _run_cmd(f""" + paginated_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list \\ --limit 5 - """) - _assert_contains( - paginated_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) - + _assert_contains(paginated_output, ["Retrieving labels...", "Found"]) + # Test pagination with offset and limit - offset_output = _run_cmd(f""" + offset_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list \\ --offset 0 \\ --limit 10 - """) - _assert_contains( - offset_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) + _assert_contains(offset_output, ["Retrieving labels...", "Found"]) def test_labels_validation_errors(self): """Test labels validation and error handling""" - + # Test title too long (more than 20 characters) - long_title_output, returncode = _run_cmd_allow_failure(f""" + long_title_output, returncode = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "ThisTitleIsWayTooLongForTheValidationLimit" - """) + """ + ) # Should fail with validation error assert returncode != 0, f"Expected validation error but command succeeded: {long_title_output}" assert "Error: Label title must be 20 characters or less." in long_title_output - + # Test invalid label ID for get - invalid_get_output, returncode = _run_cmd_allow_failure(f""" + invalid_get_output, returncode = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id 999999 - """) + """ + ) # Should fail with API error assert returncode != 0, f"Expected API error but command succeeded: {invalid_get_output}" assert "Failed to retrieve label:" in invalid_get_output - + # Test invalid label ID format for delete - invalid_delete_output, returncode = _run_cmd_allow_failure(f""" + invalid_delete_output, returncode = _run_cmd_allow_failure( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids "abc,def" - """) + """ + ) # Should fail with format validation error assert returncode != 0, f"Expected validation error but command succeeded: {invalid_delete_output}" assert "Error: Invalid label IDs format" in invalid_delete_output def test_labels_edge_cases(self): """Test labels edge cases and boundary conditions""" - + # Test with exactly 20 character title (boundary condition) twenty_char_title = "ExactlyTwentyCharss!" # Exactly 20 characters assert len(twenty_char_title) == 20, "Test title should be exactly 20 characters" - - add_output = _run_cmd(f""" + + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{twenty_char_title}" - """) - _assert_contains( - add_output, - [ - f"Adding label '{twenty_char_title}'...", - "Successfully added label:" - ] + """ ) - + _assert_contains(add_output, [f"Adding label '{twenty_char_title}'...", "Successfully added label:"]) + # Extract label ID for cleanup import re + label_id_match = re.search(r"ID=(\d+)", add_output) if label_id_match: label_id = label_id_match.group(1) - + # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"] ) - def test_labels_cases_full_workflow(self): """Test complete workflow of test case label operations""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) case_label_title = f"e2e-case-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{case_label_title}" - """) - _assert_contains( - add_label_output, - [ - f"Adding label '{case_label_title}'...", - "Successfully added label:" - ] + """ ) - + _assert_contains(add_label_output, [f"Adding label '{case_label_title}'...", "Successfully added label:"]) + # Extract label ID for later use import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) - + try: # Use known test case IDs that should exist in the test project test_case_ids = ["24964", "24965"] # Multiple test cases for batch testing - + # Add labels to test cases - add_cases_output = _run_cmd(f""" + add_cases_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{','.join(test_case_ids)}" \\ --title "{case_label_title}" - """) + """ + ) _assert_contains( add_cases_output, [ f"Adding label '{case_label_title}' to {len(test_case_ids)} test case(s)...", - "Successfully processed" - ] + "Successfully processed", + ], ) - + # List test cases by label title - list_by_title_output = _run_cmd(f""" + list_by_title_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "{case_label_title}" - """) + """ + ) _assert_contains( list_by_title_output, - [ - f"Retrieving test cases with label title '{case_label_title}'...", - "matching test case(s):" - ] + [f"Retrieving test cases with label title '{case_label_title}'...", "matching test case(s):"], ) - + # List test cases by label ID - list_by_id_output = _run_cmd(f""" + list_by_id_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --ids "{label_id}" - """) + """ + ) _assert_contains( - list_by_id_output, - [ - f"Retrieving test cases with label IDs: {label_id}...", - "matching test case(s):" - ] + list_by_id_output, [f"Retrieving test cases with label IDs: {label_id}...", "matching test case(s):"] ) - + finally: # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"] ) def test_labels_cases_validation_errors(self): """Test validation errors for test case label commands""" # Test title too long for add cases - long_title_output, return_code = _run_cmd_allow_failure(f""" + long_title_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "1" \\ --title "this-title-is-way-too-long-for-testrail" - """) - assert return_code != 0 - _assert_contains( - long_title_output, - ["Error: Label title must be 20 characters or less."] + """ ) - + assert return_code != 0 + _assert_contains(long_title_output, ["Error: Label title must be 20 characters or less."]) + # Test invalid case IDs format - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "invalid,ids" \\ --title "test" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) - + # Test missing filter for list cases - no_filter_output, return_code = _run_cmd_allow_failure(f""" + no_filter_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list - """) - assert return_code != 0 - _assert_contains( - no_filter_output, - ["Error: Either --ids or --title must be provided."] + """ ) - + assert return_code != 0 + _assert_contains(no_filter_output, ["Error: Either --ids or --title must be provided."]) + # Test title too long for list cases - long_title_list_output, return_code = _run_cmd_allow_failure(f""" + long_title_list_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "this-title-is-way-too-long-for-testrail" - """) - assert return_code != 0 - _assert_contains( - long_title_list_output, - ["Error: Label title must be 20 characters or less."] + """ ) + assert return_code != 0 + _assert_contains(long_title_list_output, ["Error: Label title must be 20 characters or less."]) def test_labels_cases_help_commands(self): """Test help output for test case label commands""" @@ -1311,22 +1295,17 @@ def test_labels_cases_help_commands(self): "Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]...", "Manage labels for test cases", "add Add a label to test cases", - "list List test cases filtered by label ID or title" - ] + "list List test cases filtered by label ID or title", + ], ) - + # Test cases add help cases_add_help_output = _run_cmd("trcli labels cases add --help") _assert_contains( cases_add_help_output, - [ - "Usage: trcli labels cases add [OPTIONS]", - "Add a label to test cases", - "--case-ids", - "--title" - ] + ["Usage: trcli labels cases add [OPTIONS]", "Add a label to test cases", "--case-ids", "--title"], ) - + # Test cases list help cases_list_help_output = _run_cmd("trcli labels cases list --help") _assert_contains( @@ -1335,73 +1314,76 @@ def test_labels_cases_help_commands(self): "Usage: trcli labels cases list [OPTIONS]", "List test cases filtered by label ID or title", "--ids", - "--title" - ] + "--title", + ], ) def test_labels_cases_no_matching_cases(self): """Test behavior when no test cases match the specified label""" # Test with non-existent label title - no_match_output = _run_cmd(f""" + no_match_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "non-existent-label" - """) + """ + ) _assert_contains( no_match_output, [ "Retrieving test cases with label title 'non-existent-label'...", "Found 0 matching test case(s):", - "No test cases found with label title 'non-existent-label'." - ] + "No test cases found with label title 'non-existent-label'.", + ], ) - + # Test with non-existent label ID - no_match_id_output = _run_cmd(f""" + no_match_id_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --ids "99999" - """) + """ + ) _assert_contains( no_match_id_output, [ "Retrieving test cases with label IDs: 99999...", "Found 0 matching test case(s):", - "No test cases found with the specified label IDs." - ] + "No test cases found with the specified label IDs.", + ], ) def test_labels_cases_single_case_workflow(self): """Test single case label operations using update_case endpoint""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) single_case_label_title = f"e2e-single-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( - add_label_output, - [ - f"Adding label '{single_case_label_title}'...", - "Successfully added label:" - ] + add_label_output, [f"Adding label '{single_case_label_title}'...", "Successfully added label:"] ) # Extract label ID for later use import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) @@ -1411,77 +1393,80 @@ def test_labels_cases_single_case_workflow(self): single_case_id = "24964" # Add label to single test case - add_single_case_output = _run_cmd(f""" + add_single_case_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{single_case_id}" \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( add_single_case_output, [ f"Adding label '{single_case_label_title}' to 1 test case(s)...", "Successfully processed 1 case(s):", - f"Successfully added label '{single_case_label_title}' to case {single_case_id}" - ] + f"Successfully added label '{single_case_label_title}' to case {single_case_id}", + ], ) # Verify the label was added by listing cases with this label - list_cases_output = _run_cmd(f""" + list_cases_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( list_cases_output, [ f"Retrieving test cases with label title '{single_case_label_title}'...", "Found 1 matching test case(s):", - f"Case ID: {single_case_id}" - ] + f"Case ID: {single_case_id}", + ], ) finally: # Clean up: delete the test label - _run_cmd(f""" + _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) def test_labels_tests_full_workflow(self): """Test complete workflow of test label operations""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) test_label_title = f"e2e-test-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{test_label_title}" - """) - _assert_contains( - add_label_output, - [ - f"Adding label '{test_label_title}'...", - "Successfully added label:" - ] + """ ) + _assert_contains(add_label_output, [f"Adding label '{test_label_title}'...", "Successfully added label:"]) # Extract label ID for cleanup import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) @@ -1491,126 +1476,122 @@ def test_labels_tests_full_workflow(self): test_ids = ["266149", "266151"] # Real test IDs for functional testing # Test 1: Add labels to tests using --test-ids - add_tests_output = _run_cmd(f""" + add_tests_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-ids "{','.join(test_ids)}" \\ --title "{test_label_title}" - """) - - _assert_contains( - add_tests_output, - [ - f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..." - ] + """ ) + _assert_contains(add_tests_output, [f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..."]) + # Test 2: Add labels to tests using CSV file import os + csv_file_path = os.path.join(os.path.dirname(__file__), "sample_csv", "test_ids.csv") - - add_tests_csv_output = _run_cmd(f""" + + add_tests_csv_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-id-file "{csv_file_path}" \\ --title "{test_label_title}" - """) - + """ + ) + _assert_contains( add_tests_csv_output, - [ - "Loaded 2 test ID(s) from file", - f"Adding label '{test_label_title}' to 2 test(s)..." - ] + ["Loaded 2 test ID(s) from file", f"Adding label '{test_label_title}' to 2 test(s)..."], ) # Test 3: Get test labels for specific tests - get_test_labels_output = _run_cmd(f""" + get_test_labels_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests get \\ --test-ids "{','.join(test_ids)}" - """) + """ + ) _assert_contains( - get_test_labels_output, - [ - f"Retrieving labels for {len(test_ids)} test(s)...", - "Test label information:" - ] + get_test_labels_output, [f"Retrieving labels for {len(test_ids)} test(s)...", "Test label information:"] ) finally: # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) def test_labels_tests_validation_errors(self): """Test validation errors for test label commands""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Test title too long (21 characters exceeds 20 character limit) long_title = f"this-is-a-very-long-title-{random_suffix}" # This will be > 20 chars - title_error_output, return_code = _run_cmd_allow_failure(f""" + title_error_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-ids "266149" \\ --title "{long_title}" - """) - assert return_code != 0 - _assert_contains( - title_error_output, - ["exceeds 20 character limit and will be skipped."] + """ ) + assert return_code != 0 + _assert_contains(title_error_output, ["exceeds 20 character limit and will be skipped."]) # Test missing test-ids and file valid_title = f"test-{random_suffix}"[:20] # Ensure valid length - missing_ids_output, return_code = _run_cmd_allow_failure(f""" + missing_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --title "{valid_title}" - """) - assert return_code != 0 - _assert_contains( - missing_ids_output, - ["Error: Either --test-ids or --test-id-file must be provided."] + """ ) + assert return_code != 0 + _assert_contains(missing_ids_output, ["Error: Either --test-ids or --test-id-file must be provided."]) # Test invalid label IDs format in list command - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests list \\ --run-id "1" \\ --ids "invalid,ids" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) def test_labels_tests_help_commands(self): """Test help output for test label commands""" - + # Test main tests help tests_help_output = _run_cmd("trcli labels tests --help") _assert_contains( @@ -1620,9 +1601,9 @@ def test_labels_tests_help_commands(self): "Manage labels for tests", "Commands:", "add", - "list", - "get" - ] + "list", + "get", + ], ) # Test tests add help @@ -1634,8 +1615,8 @@ def test_labels_tests_help_commands(self): "Add label(s) to tests", "--test-ids", "--test-id-file", - "--title" - ] + "--title", + ], ) # Test tests list help @@ -1646,35 +1627,27 @@ def test_labels_tests_help_commands(self): "Usage: trcli labels tests list [OPTIONS]", "List tests filtered by label ID from specific runs", "--run-id", - "--ids" - ] + "--ids", + ], ) # Test tests get help tests_get_help_output = _run_cmd("trcli labels tests get --help") _assert_contains( tests_get_help_output, - [ - "Usage: trcli labels tests get [OPTIONS]", - "Get the labels of tests using test IDs", - "--test-id" - ] + ["Usage: trcli labels tests get [OPTIONS]", "Get the labels of tests using test IDs", "--test-id"], ) def test_references_cases_help_commands(self): """Test references cases help commands""" - + # Test main references help references_help_output = _run_cmd("trcli references --help") _assert_contains( references_help_output, - [ - "Usage: trcli references [OPTIONS] COMMAND [ARGS]...", - "Manage references in TestRail", - "cases" - ] + ["Usage: trcli references [OPTIONS] COMMAND [ARGS]...", "Manage references in TestRail", "cases"], ) - + # Test references cases help cases_help_output = _run_cmd("trcli references cases --help") _assert_contains( @@ -1683,23 +1656,18 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases [OPTIONS] COMMAND [ARGS]...", "Manage references for test cases", "add", - "update", - "delete" - ] + "update", + "delete", + ], ) - + # Test references cases add help add_help_output = _run_cmd("trcli references cases add --help") _assert_contains( add_help_output, - [ - "Usage: trcli references cases add [OPTIONS]", - "Add references to test cases", - "--case-ids", - "--refs" - ] + ["Usage: trcli references cases add [OPTIONS]", "Add references to test cases", "--case-ids", "--refs"], ) - + # Test references cases update help update_help_output = _run_cmd("trcli references cases update --help") _assert_contains( @@ -1708,10 +1676,10 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases update [OPTIONS]", "Update references on test cases by replacing existing ones", "--case-ids", - "--refs" - ] + "--refs", + ], ) - + # Test references cases delete help delete_help_output = _run_cmd("trcli references cases delete --help") _assert_contains( @@ -1720,66 +1688,66 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases delete [OPTIONS]", "Delete all or specific references from test cases", "--case-ids", - "--refs" - ] + "--refs", + ], ) def test_references_cases_error_scenarios(self): """Test references cases error scenarios""" - + # Test invalid test case IDs format - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "invalid,ids" \\ --refs "REQ-1" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) - + # Test empty references - empty_refs_output, return_code = _run_cmd_allow_failure(f""" + empty_refs_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "321" \\ --refs ",,," - """) - assert return_code != 0 - _assert_contains( - empty_refs_output, - ["Error: No valid references provided."] + """ ) - + assert return_code != 0 + _assert_contains(empty_refs_output, ["Error: No valid references provided."]) + # Test references too long (over 2000 characters) - long_refs = ','.join([f'REQ-{i}' * 100 for i in range(10)]) # Create very long references - long_refs_output, return_code = _run_cmd_allow_failure(f""" + long_refs = ",".join([f"REQ-{i}" * 100 for i in range(10)]) # Create very long references + long_refs_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "321" \\ --refs "{long_refs}" - """) - assert return_code != 0 - _assert_contains( - long_refs_output, - ["exceeds 2000 character limit"] + """ ) + assert return_code != 0 + _assert_contains(long_refs_output, ["exceeds 2000 character limit"]) # ==================== ASSIGN FEATURE TESTS ==================== - + def test_assign_failures_single_user(self): """Test --assign feature with single user""" # Note: This test assumes a valid TestRail user exists in the instance # In a real environment, you would use actual user emails - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1787,7 +1755,8 @@ def test_assign_failures_single_user(self): --title "[CLI-E2E-Tests] Assign Failures - Single User" \\ --assign "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1795,13 +1764,14 @@ def test_assign_failures_single_user(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_multiple_users(self): """Test --assign feature with multiple users (round-robin assignment)""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1809,7 +1779,8 @@ def test_assign_failures_multiple_users(self): --title "[CLI-E2E-Tests] Assign Failures - Multiple Users" \\ --assign "trcli@gurock.io,trcli@testrail.com" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1817,13 +1788,14 @@ def test_assign_failures_multiple_users(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_short_form(self): """Test --assign feature using -a short form""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1831,7 +1803,8 @@ def test_assign_failures_short_form(self): --title "[CLI-E2E-Tests] Assign Failures - Short Form" \\ -a "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1839,35 +1812,38 @@ def test_assign_failures_short_form(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_without_assign_option(self): """Test that normal operation works without --assign option""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] No Assign Option" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ "Auto-assign failures: No", "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) # Should NOT contain assignment message assert "Assigning failed results:" not in output def test_assign_failures_invalid_user(self): """Test --assign feature with invalid user email""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1875,19 +1851,16 @@ def test_assign_failures_invalid_user(self): --title "[CLI-E2E-Tests] Assign Failures - Invalid User" \\ --assign "invalid.user@nonexistent.com" \\ -f "reports_junit/assign_test_failures.xml" - """) - - assert return_code != 0 - _assert_contains( - output, - [ - "Error: User not found: invalid.user@nonexistent.com" - ] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: User not found: invalid.user@nonexistent.com"]) + def test_assign_failures_mixed_valid_invalid_users(self): """Test --assign feature with mix of valid and invalid users""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1895,19 +1868,16 @@ def test_assign_failures_mixed_valid_invalid_users(self): --title "[CLI-E2E-Tests] Assign Failures - Mixed Users" \\ --assign "trcli@gurock.io,invalid.user@nonexistent.com" \\ -f "reports_junit/assign_test_failures.xml" - """) - - assert return_code != 0 - _assert_contains( - output, - [ - "Error: User not found: invalid.user@nonexistent.com" - ] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: User not found: invalid.user@nonexistent.com"]) + def test_assign_failures_whitespace_handling(self): """Test --assign feature with whitespace in email list""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1915,7 +1885,8 @@ def test_assign_failures_whitespace_handling(self): --title "[CLI-E2E-Tests] Assign Failures - Whitespace" \\ --assign " trcli@gurock.io , trcli@testrail.com " \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1923,42 +1894,41 @@ def test_assign_failures_whitespace_handling(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_help_documentation(self): """Test that --assign option appears in help documentation""" help_output = _run_cmd("trcli parse_junit --help") _assert_contains( - help_output, - [ - "-a, --assign", - "Comma-separated list of user emails to assign failed", - "test results to." - ] + help_output, ["-a, --assign", "Comma-separated list of user emails to assign failed", "test results to."] ) def test_assign_failures_with_existing_run(self): """Test --assign feature when updating an existing run""" # First create a run - create_output = _run_cmd(f""" + create_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ -f "reports_junit/generic_ids_auto.xml" - """) - + """ + ) + # Extract run ID from output import re - run_id_match = re.search(r'runs/view/(\d+)', create_output) + + run_id_match = re.search(r"runs/view/(\d+)", create_output) assert run_id_match, "Could not extract run ID from output" run_id = run_id_match.group(1) - + # Update the run with failed tests and assignment - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1967,14 +1937,401 @@ def test_assign_failures_with_existing_run(self): --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ --assign "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( update_output, [ "Auto-assign failures: Yes (trcli@gurock.io)", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view/{run_id}", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], + ) + + # ==================== BDD/GHERKIN FEATURE TESTS ==================== + + def test_parse_gherkin_local_parsing(self): + """Test parse_gherkin command for local .feature file parsing (no TestRail upload)""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" + """ + ) + _assert_contains( + output, + [ + "Parsing Gherkin feature file:", + "sample_login.feature", + '"suites"', + '"summary"', + '"total_suites"', + '"total_cases"', + ], + ) + + def test_parse_gherkin_with_output_file(self): + """Test parse_gherkin command with output file option""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" \\ + --output "parsed_gherkin.json" + """ + ) + _assert_contains( + output, + ["Parsing Gherkin feature file:", "sample_login.feature", "Parsed results saved to", "parsed_gherkin.json"], + ) + + def test_parse_gherkin_pretty_format(self): + """Test parse_gherkin command with pretty print formatting""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" \\ + --pretty + """ + ) + _assert_contains(output, ["Parsing Gherkin feature file:", "sample_login.feature", '"suites"', '"summary"']) + + def test_parse_gherkin_custom_suite_name(self): + """Test parse_gherkin command with custom suite name""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" \\ + --suite-name "Custom BDD Suite" + """ + ) + _assert_contains(output, ["Parsing Gherkin feature file:", '"name": "Custom BDD Suite"']) + + def test_import_gherkin_upload_feature(self): + """Test import_gherkin command to upload .feature file to TestRail""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 + """ + ) + _assert_contains( + output, + [ + "Connecting to TestRail...", + "Uploading feature file to TestRail...", + "Successfully uploaded feature file!", + "Created/updated", + "test case(s)", + "Case IDs:", + ], + ) + + def test_import_gherkin_with_json_output(self): + """Test import_gherkin command with JSON output format""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --json-output + """ + ) + _assert_contains( + output, ["Connecting to TestRail...", "Uploading feature file to TestRail...", '"case_ids"', '"count"'] + ) + + def test_import_gherkin_with_verbose(self): + """Test import_gherkin command with verbose logging""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --verbose + """ + ) + _assert_contains( + output, + [ + "Connecting to TestRail...", + "Uploading feature file to TestRail...", + "Successfully uploaded feature file!", + ], + ) + + def test_export_gherkin_download_to_stdout(self): + """Test export_gherkin command to download BDD test case to stdout""" + # First, import a feature file to ensure we have a case to export + import_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --json-output + """ + ) + + # Extract case ID from JSON output + import re + import json + + json_start = import_output.find("{") + if json_start >= 0: + json_str = import_output[json_start:] + output_data = json.loads(json_str) + case_id = output_data.get("case_ids", [])[0] if output_data.get("case_ids") else None + + if case_id: + # Now export the case + export_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + export_gherkin \\ + --case-id {case_id} + """ + ) + _assert_contains(export_output, ["Connecting to TestRail...", "Retrieving BDD test case", "Feature:"]) + + def test_export_gherkin_download_to_file(self): + """Test export_gherkin command to download BDD test case to file""" + # First, import a feature file to ensure we have a case to export + import_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --json-output + """ + ) + + # Extract case ID from JSON output + import re + import json + + json_start = import_output.find("{") + if json_start >= 0: + json_str = import_output[json_start:] + output_data = json.loads(json_str) + case_id = output_data.get("case_ids", [])[0] if output_data.get("case_ids") else None + + if case_id: + # Now export the case to a file + export_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + export_gherkin \\ + --case-id {case_id} \\ + --output "exported_bdd.feature" + """ + ) + _assert_contains( + export_output, + [ + "Connecting to TestRail...", + "Retrieving BDD test case", + "Successfully exported BDD test case", + "exported_bdd.feature", + ], + ) + + def test_parse_cucumber_workflow1_results_only(self): + """Test parse_cucumber Workflow 1: Parse and upload results only (no feature upload)""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_cucumber \\ + --title "[CLI-E2E-Tests] Cucumber Parser - Results Only" \\ + --suite-id 128 \\ + -f "reports_cucumber/sample_cucumber.json" + """ + ) + _assert_contains( + output, + [ + "Processed", + "test cases", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted", + "test results", + ], + ) + + def test_parse_cucumber_workflow2_with_feature_upload(self): + """Test parse_cucumber Workflow 2: Generate feature, upload, then upload results""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_cucumber \\ + --title "[CLI-E2E-Tests] Cucumber Parser - With Feature Upload" \\ + --suite-id 128 \\ + --upload-feature \\ + --feature-section-id 2388 \\ + -f "reports_cucumber/sample_cucumber.json" + """ ) - \ No newline at end of file + _assert_contains( + output, + [ + "Generating .feature file from Cucumber JSON...", + "Generated .feature file", + "Uploading .feature file to TestRail...", + "Successfully uploaded", + "test case(s) from .feature file", + "Processed", + "test cases", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted", + "test results", + ], + ) + + def test_parse_cucumber_advanced_features(self): + """Test parse_cucumber with advanced Gherkin features (Background, Examples, Rules)""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_cucumber \\ + --title "[CLI-E2E-Tests] Cucumber Parser - Advanced Features" \\ + --suite-id 128 \\ + --upload-feature \\ + --feature-section-id 2388 \\ + -f "reports_cucumber/sample_cucumber_advanced.json" + """ + ) + _assert_contains( + output, + [ + "Generating .feature file from Cucumber JSON...", + "Generated .feature file", + "Uploading .feature file to TestRail...", + "Successfully uploaded", + "test case(s) from .feature file", + "Processed", + "test cases", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + ], + ) + + def test_parse_cucumber_with_verbose_logging(self): + """Test parse_cucumber with verbose logging enabled""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_cucumber \\ + --title "[CLI-E2E-Tests] Cucumber Parser - Verbose" \\ + --suite-id 128 \\ + --verbose \\ + -f "reports_cucumber/sample_cucumber.json" + """ + ) + _assert_contains( + output, ["Processed", "test cases", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view"] + ) + + def test_bdd_help_commands(self): + """Test that all BDD commands appear in help documentation""" + + # Test main CLI help shows BDD commands + main_help_output = _run_cmd("trcli --help") + _assert_contains(main_help_output, ["parse_gherkin", "import_gherkin", "export_gherkin", "parse_cucumber"]) + + # Test parse_gherkin help + parse_gherkin_help = _run_cmd("trcli parse_gherkin --help") + _assert_contains( + parse_gherkin_help, + [ + "Parse Gherkin .feature file locally", + "-f, --file", + "--output", + "--pretty", + "--suite-name", + "--case-matcher", + ], + ) + + # Test import_gherkin help + import_gherkin_help = _run_cmd("trcli import_gherkin --help") + _assert_contains( + import_gherkin_help, + [ + "Upload Gherkin .feature file to TestRail", + "-f, --file", + "--section-id", + "--json-output", + "-v, --verbose", + ], + ) + + # Test export_gherkin help + export_gherkin_help = _run_cmd("trcli export_gherkin --help") + _assert_contains( + export_gherkin_help, + ["Export BDD test case from TestRail as .feature file", "--case-id", "--output", "-v, --verbose"], + ) + + # Test parse_cucumber help + parse_cucumber_help = _run_cmd("trcli parse_cucumber --help") + _assert_contains( + parse_cucumber_help, + [ + "Parse Cucumber JSON results and upload to TestRail", + "--upload-feature", + "--feature-section-id", + "--title", + "--suite-id", + ], + ) + + def test_bdd_error_handling_invalid_file(self): + """Test BDD commands with invalid file paths""" + + # Test parse_gherkin with non-existent file + invalid_parse_output, return_code = _run_cmd_allow_failure( + """ +trcli parse_gherkin \\ + -f "nonexistent.feature" + """ + ) + assert return_code != 0 + + # Test import_gherkin with non-existent file + invalid_import_output, return_code = _run_cmd_allow_failure( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "nonexistent.feature" \\ + --section-id 2388 + """ + ) + assert return_code != 0 From b2bf75e9a076dc537316aadcf555ded57756356b Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 3 Dec 2025 21:17:21 +0800 Subject: [PATCH 06/33] TRCLI-193 Updated parse_cucumber, parse_gherkin, import and export gherkin commands also updated readme file with comprehensive guide --- README.md | 394 ++++++++++++++++++++++++++- trcli/api/api_request_handler.py | 150 ++++++++++ trcli/cli.py | 56 ++-- trcli/commands/cmd_export_gherkin.py | 5 - trcli/commands/cmd_import_gherkin.py | 11 +- trcli/commands/cmd_parse_cucumber.py | 215 +++++++++++---- trcli/commands/cmd_parse_gherkin.py | 31 +-- trcli/readers/cucumber_json.py | 106 ++++++- 8 files changed, 842 insertions(+), 126 deletions(-) diff --git a/README.md b/README.md index 5576efb..9fa3ea6 100644 --- a/README.md +++ b/README.md @@ -33,22 +33,25 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.13.0 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) - - parse_gherkin: Gherkin .feature files (BDD) + - parse_cucumber: Cucumber JSON results (BDD) + - import_gherkin: Upload .feature files to TestRail BDD + - export_gherkin: Export BDD test cases as .feature files - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - - add_run: Create a new empty test run + - add_run: Create a new test run - labels: Manage labels (add, update, delete, list) + - references: Manage references (cases and runs) ``` CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -84,9 +87,12 @@ Options: Commands: add_run Add a new test run in TestRail + export_gherkin Export BDD test case from TestRail as .feature file + import_gherkin Upload Gherkin .feature file to TestRail labels Manage labels in TestRail + parse_cucumber Parse Cucumber JSON results and upload to TestRail + parse_gherkin Parse Gherkin .feature file locally parse_junit Parse JUnit report and upload results to TestRail - parse_gherkin Parse Gherkin .feature files and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail references Manage references in TestRail @@ -323,6 +329,380 @@ Assigning failed results: 3/3, Done. Submitted 25 test results in 2.1 secs. ``` +## Behavior-Driven Development (BDD) Support + +The TestRail CLI provides comprehensive support for Behavior-Driven Development workflows using Gherkin syntax. The BDD features enable you to manage test cases written in Gherkin format, execute BDD tests with various frameworks (Cucumber, Behave, pytest-bdd, etc.), and seamlessly upload results to TestRail. + +### BDD Commands Overview + +The TestRail CLI provides four commands for complete BDD workflow management: + +| Command | Purpose | Use Case | +|---------|---------|----------| +| `import_gherkin` | Import .feature files to create test cases | Create BDD test cases in TestRail from existing .feature files | +| `export_gherkin` | Export test cases as .feature files | Extract test cases from TestRail for automation | +| `parse_cucumber` | Parse Cucumber JSON and upload results | Upload test results from Cucumber/Behave/pytest-bdd execution | +| `parse_gherkin` | Parse .feature files locally (no upload) | Validate syntax, convert to JSON, preview TestRail structure | + +### Uploading Cucumber/BDD Test Results + +The `parse_cucumber` command allows you to upload automated test results from BDD frameworks that generate Cucumber JSON format, including: +- **Cucumber (Java, JavaScript, Ruby)** +- **Behave (Python)** +- **pytest-bdd (Python)** +- **SpecFlow (.NET)** (with Cucumber JSON output) +- **Cucumber-JVM (Java)** + +#### Reference +```shell +$ trcli parse_cucumber --help +Usage: trcli parse_cucumber [OPTIONS] + + Parse Cucumber JSON results and upload to TestRail + +Options: + -f, --file Filename and path. + --close-run Close the newly created run + --title Title of Test Run to be created in TestRail. + --case-matcher Mechanism to match cases between the report and + TestRail. + --suite-id Suite ID to submit results to. [x>=1] + --suite-name Suite name to submit results to. + --run-id Run ID for the results they are reporting. [x>=1] + --plan-id Plan ID with which the Test Run will be associated. [x>=1] + --config-ids Comma-separated configuration IDs to use along with Test Plans. + --milestone-id Milestone ID to which the Test Run should be associated to. [x>=1] + --section-id Section ID to create new sections with test cases under. [x>=1] + --run-description Summary text to be added to the test run. + --case-fields List of case fields and values for new test cases creation. + --result-fields List of result fields and values for test results creation. + --allow-ms Allows using milliseconds for elapsed times. + --upload-feature Generate and upload .feature file to create/update test cases via BDD endpoint. + --feature-section-id Section ID for uploading .feature file (required if --upload-feature is used). [x>=1] + -v, --verbose Enable verbose logging output. + --help Show this message and exit. +``` + +#### Cucumber JSON Format Example +```json +[ + { + "id": "user-login", + "name": "User Login", + "description": "As a registered user\n\tI want to log in to the application\n\tSo that I can access my account", + "uri": "features/login.feature", + "elements": [ + { + "id": "user-login;successful-login-with-valid-credentials", + "name": "Successful login with valid credentials", + "type": "scenario", + "description": "", + "keyword": "Scenario", + "tags": [ + {"name": "@smoke"}, + {"name": "@authentication"} + ], + "steps": [ + { + "keyword": "Given ", + "name": "I have a valid username \"testuser\"", + "result": { + "status": "passed", + "duration": 1500000000 + } + }, + { + "keyword": "When ", + "name": "I enter my credentials", + "result": { + "status": "passed", + "duration": 500000000 + } + }, + { + "keyword": "Then ", + "name": "I should be redirected to the dashboard", + "result": { + "status": "passed", + "duration": 300000000 + } + } + ] + } + ] + } +] +``` + +**Mapping Cucumber JSON to TestRail entities:** + +| Cucumber JSON Element | TestRail Entity | Notes | +|----------------------|-----------------|-------| +| `feature` | Section | Feature name becomes section name | +| `scenario` / `scenario outline` | Test Case | Each scenario creates a test case | +| `step` | Test Step | Steps with results become step results | +| `tags` | Case Tags/Refs | Tags like @smoke, @C123 map to TestRail fields | + +#### Two Workflows for BDD Test Results + +##### Workflow 1: Upload Results Only (Code-First) + +Use this workflow when test cases already exist in TestRail and you want to match them using automation_id. + +```shell +# Upload results to existing test cases +$ trcli parse_cucumber -f cucumber-results.json \ + --project "Your Project" \ + --suite-id 2 \ + --title "BDD Test Run" \ + -n + +# With automation (auto-create test cases if missing) +$ trcli parse_cucumber -f cucumber-results.json \ + --project "Your Project" \ + --suite-id 2 \ + --title "BDD Test Run" \ + -y +``` + +**How it works:** +- Parser creates automation_id from feature name + scenario name +- TestRail CLI matches scenarios to existing cases via automation_id +- Results are uploaded to matched test cases +- With `-y`: Creates new test cases if no match found +- With `-n`: Skips scenarios without matching test cases + +##### Workflow 2: Create BDD Test Cases + Upload Results (Specification-First) + +Use this workflow to automatically create BDD test cases from Cucumber results using TestRail's BDD endpoint. + +```shell +# Create BDD test cases and upload results +$ trcli parse_cucumber -f cucumber-results.json \ + --project "Your Project" \ + --suite-id 2 \ + --upload-feature \ + --feature-section-id 123 \ + --title "BDD Test Run" \ + -y +``` + +**How it works:** +1. Parses Cucumber JSON results +2. Generates complete .feature files (one per feature) +3. Uploads .feature files to TestRail via `add_bdd` endpoint +4. TestRail creates BDD test cases with Gherkin content +5. Maps created case IDs to test results +6. Uploads all scenario results to their respective test cases +7. Sets automation_id on created test cases for future matching + +#### Case Matching for BDD Tests + +BDD test matching works similarly to JUnit, with automation_id generated from your test structure: + +**Automation ID Format:** +``` +. +``` + +**Example:** +``` +Feature: User Login + Scenario: Successful login with valid credentials + +Automation ID: User Login.Successful login with valid credentials +``` + +You can also use Case ID matching with `@C` tags: + +```gherkin +Feature: User Login + @C123 + Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid credentials + Then I should see the dashboard +``` + +### Importing Gherkin Feature Files + +The `import_gherkin` command allows you to upload BDD test cases in TestRail from existing .feature files. + +#### Reference +```shell +$ trcli import_gherkin --help +Usage: trcli import_gherkin [OPTIONS] + + Import Gherkin .feature file to create BDD test cases in TestRail + +Options: + -f, --file Path to .feature file to import [required] + --section-id Section ID where test cases will be created [x>=1] [required] + -v, --verbose Enable verbose logging output + --help Show this message and exit. +``` + +#### Usage Example +```shell +# Import a single feature file +$ trcli import_gherkin -f features/login.feature \ + --project "Your Project" \ + --section-id 456 \ + -y + +# Import with custom project settings +$ trcli import_gherkin -f features/checkout.feature \ + --project-id 10 \ + --section-id 789 \ + -v -y +``` + +**How it works:** +1. Reads the .feature file +2. Uploads to TestRail via `add_bdd` endpoint +3. TestRail creates test case(s) with complete Gherkin content +4. Returns created case ID(s) + +**Example .feature file:** +```gherkin +Feature: User Login + As a registered user + I want to log in to the application + So that I can access my account + + Background: + Given the application is running + And I am on the login page + + @smoke @authentication + Scenario: Successful login with valid credentials + Given I have a valid username "testuser" + And I have a valid password "password123" + When I enter my credentials + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message "Welcome, testuser" + + @negative @authentication + Scenario: Failed login with invalid password + Given I have a valid username "testuser" + And I have an invalid password "wrongpassword" + When I enter my credentials + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page +``` + +### Exporting BDD Test Cases + +The `export_gherkin` command allows you to export existing BDD test cases from TestRail as .feature files. + +#### Reference +```shell +$ trcli export_gherkin --help +Usage: trcli export_gherkin [OPTIONS] + + Export BDD test case from TestRail as .feature file + +Options: + --case-id TestRail test case ID to export [x>=1] [required] + --output Output path for the .feature file (prints to stdout if not specified) + -v, --verbose Enable verbose logging output + --help Show this message and exit. +``` + +#### Usage Examples +```shell +# Export to stdout +$ trcli export_gherkin --case-id 123 \ + --project "Your Project" + +# Export to file +$ trcli export_gherkin --case-id 123 \ + --project "Your Project" \ + --output features/exported-login.feature + +# Export with verbose logging +$ trcli export_gherkin --case-id 456 \ + --project-id 10 \ + --output features/checkout.feature \ + -v +``` + +**Output example:** +``` +Connecting to TestRail... +Retrieving BDD test case 123... + +✓ Successfully exported test case 123 + File: features/exported-login.feature + Size: 1247 characters +``` + +**Use cases:** +- Extract test cases for automation +- Synchronize TestRail with version control +- Generate documentation from test cases +- Migrate test cases between projects + +### Parsing Gherkin Feature Files Locally + +The `parse_gherkin` command parses Gherkin .feature files locally and converts them into TestRail data structure format without uploading to TestRail. This is useful for validation, conversion, or integration with custom workflows. + +#### Reference +```shell +$ trcli parse_gherkin --help +Usage: trcli parse_gherkin [OPTIONS] + + Parse Gherkin .feature file locally + + This command parses Gherkin/BDD .feature files and converts them into + TestRail data structure format without uploading to TestRail. + +Options: + -f, --file Path to Gherkin .feature file to parse [required] + --output Optional output file path to save parsed JSON + --pretty Pretty print JSON output with indentation + --help Show this message and exit. +``` + +#### Usage Examples +```shell +# Parse a feature file and output to console +$ trcli parse_gherkin -f features/login.feature + +# Parse and save to JSON file with pretty formatting +$ trcli parse_gherkin -f features/login.feature \ + --output parsed-output.json \ + --pretty + +# Parse multiple feature files +$ trcli parse_gherkin -f features/checkout.feature \ + --output checkout.json \ + --pretty +``` + +**Use cases:** +- Validate Gherkin syntax locally before uploading +- Convert .feature files to TestRail JSON format +- Preview how features will be structured in TestRail +- Integrate with custom automation workflows +- Debug feature file parsing issues + +### BDD Mapping to TestRail + +When using parse_cucumber with `--upload-feature`, the following mapping rules apply: + +| Gherkin Element | TestRail Field | Description | +|----------------|----------------|-------------| +| `Feature:` name + description | Test Case title + Preconditions | Feature metadata becomes test case info | +| `Background:` | BDD Scenario field | Shared setup steps | +| `Scenario:` / `Scenario Outline:` | BDD Scenario field | Individual test scenarios | +| `Given`/`When`/`Then`/`And`/`But` | BDD Scenario field | Test steps with keywords | +| `Examples:` table | BDD Scenario field | Data table for scenario outlines | +| `@tags` | References/BDD fields | Tags become references (e.g., @JIRA-123) | +| `@C` tags | Case ID | Map to existing test cases (e.g., @C456) | + ### Exploring other features #### General features @@ -1096,7 +1476,7 @@ Options: ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -1220,7 +1600,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index b7fb5ff..e719172 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1732,6 +1732,34 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = else: return False, update_response.error_message + def update_case_automation_id(self, case_id: int, automation_id: str) -> Tuple[bool, str]: + """ + Update the automation_id field of a test case + + Args: + case_id: TestRail test case ID + automation_id: Automation ID value to set + + Returns: + Tuple of (success, error_message) + - success: True if update succeeded, False otherwise + - error_message: Empty string on success, error details on failure + + API Endpoint: POST /api/v2/update_case/{case_id} + """ + self.environment.vlog(f"Setting automation_id '{automation_id}' on case {case_id}") + + update_data = {"custom_automation_id": automation_id} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.status_code == 200: + return True, "" + else: + error_msg = ( + update_response.error_message or f"Failed to update automation_id (HTTP {update_response.status_code})" + ) + return False, error_msg + def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: """ Upload .feature file to TestRail BDD endpoint @@ -1814,3 +1842,125 @@ def get_bdd(self, case_id: int) -> Tuple[str, str]: else: error_msg = response.error_message or f"Failed to retrieve BDD test case (HTTP {response.status_code})" return "", error_msg + + def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: + """ + Get the BDD template ID for a project + + Args: + project_id: TestRail project ID + + Returns: + Tuple of (template_id, error_message) + - template_id: BDD template ID if found, None otherwise + - error_message: Empty string on success, error details on failure + + API Endpoint: GET /api/v2/get_templates/{project_id} + """ + self.environment.vlog(f"Getting templates for project {project_id}") + response = self.client.send_get(f"get_templates/{project_id}") + + if response.status_code == 200: + templates = response.response_text + if isinstance(templates, list): + self.environment.vlog(f"Retrieved {len(templates)} template(s) from TestRail") + + # Log all available templates for debugging + if templates: + self.environment.vlog("Available templates:") + for template in templates: + template_id = template.get("id") + template_name = template.get("name", "") + self.environment.vlog(f" - ID {template_id}: '{template_name}'") + + # Look for BDD template by name + for template in templates: + template_name = template.get("name", "").strip() + template_name_lower = template_name.lower() + template_id = template.get("id") + + self.environment.vlog(f"Checking template '{template_name}' (ID: {template_id})") + self.environment.vlog(f" Lowercase: '{template_name_lower}'") + + # Check for BDD template (support both US and UK spellings) + if ( + "behavior" in template_name_lower + or "behaviour" in template_name_lower + or "bdd" in template_name_lower + ): + self.environment.vlog(f" ✓ MATCH: This is the BDD template!") + self.environment.log(f"Found BDD template: '{template_name}' (ID: {template_id})") + return template_id, "" + else: + self.environment.vlog(f" ✗ No match: Does not contain 'behavior', 'behaviour', or 'bdd'") + + # Build detailed error message with available templates + error_parts = ["BDD template not found. Please enable BDD template in TestRail project settings."] + if templates: + template_list = ", ".join([f"'{t.get('name', 'Unknown')}'" for t in templates]) + error_parts.append(f"Available templates: {template_list}") + error_parts.append("The BDD template name should contain 'behavior', 'behaviour', or 'bdd'.") + else: + error_parts.append("No templates are available in this project.") + + return None, "\n".join(error_parts) + else: + return None, "Unexpected response format from get_templates" + else: + error_msg = response.error_message or f"Failed to get templates (HTTP {response.status_code})" + return None, error_msg + + def add_case_bdd( + self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None + ) -> Tuple[int, str]: + """ + Create a BDD test case with Gherkin content + + Args: + section_id: TestRail section ID where test case will be created + title: Test case title (scenario name) + bdd_content: Gherkin scenario content + template_id: BDD template ID + tags: Optional list of tags (for refs field) + + Returns: + Tuple of (case_id, error_message) + - case_id: Created test case ID if successful, None otherwise + - error_message: Empty string on success, error details on failure + + API Endpoint: POST /api/v2/add_case/{section_id} + """ + self.environment.vlog(f"Creating BDD test case '{title}' in section {section_id}") + + # Build request body + # Note: custom_testrail_bdd_scenario expects an array of lines, not a single string + bdd_lines = bdd_content.split("\n") if bdd_content else [] + + body = { + "title": title, + "template_id": template_id, + "custom_testrail_bdd_scenario": bdd_lines, + } + + # Add tags as references if provided + if tags: + # Filter out @C tags (case IDs) and format others + ref_tags = [tag for tag in tags if not tag.upper().startswith("@C")] + if ref_tags: + body["refs"] = ", ".join(ref_tags) + + response = self.client.send_post(f"add_case/{section_id}", body) + + if response.status_code == 200: + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + self.environment.vlog(f"Created BDD test case ID: {case_id}") + return case_id, "" + else: + return None, "Response missing 'id' field" + else: + return None, "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to create BDD test case (HTTP {response.status_code})" + return None, error_msg diff --git a/trcli/cli.py b/trcli/cli.py index 155e1e6..8a5112a 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -90,7 +90,7 @@ def case_fields(self, case_fields: Union[List[str], dict]): exit(1) self._case_fields = fields_dict - @property + @property def result_fields(self): return self._result_fields @@ -143,6 +143,21 @@ def set_parameters(self, context: click.core.Context): param_sources_types = [ParameterSource.DEFAULT] else: param_sources_types = [ParameterSource.DEFAULT, ParameterSource.ENVIRONMENT] + + # First, get parameters from parent context (global options like --verbose) + if context.parent: + for param, value in context.parent.params.items(): + if param == "config": + continue + param_config_value = self.params_from_config.get(param, None) + param_source = context.parent.get_parameter_source(param) + + if param_source in param_sources_types and param_config_value is not None: + setattr(self, param, param_config_value) + else: + setattr(self, param, value) + + # Then, process current context parameters (subcommand-specific options) for param, value in context.params.items(): # Don't set config again # Skip setting config again @@ -202,18 +217,11 @@ def parse_params_from_config_file(self, file_path: Path): for page_content in file_content: if page_content: self.params_from_config.update(page_content) - if ( - self.params_from_config.get("config") is not None - and self.default_config_file - ): + if self.params_from_config.get("config") is not None and self.default_config_file: self.default_config_file = False - self.parse_params_from_config_file( - self.params_from_config["config"] - ) + self.parse_params_from_config_file(self.params_from_config["config"]) except (yaml.YAMLError, ValueError, TypeError) as e: - self.elog( - FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path) - ) + self.elog(FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path)) self.elog(f"Error details:\n{e}") if not self.default_config_file: exit(1) @@ -280,10 +288,13 @@ def main(self, *args, **kwargs): ) @click.option("-u", "--username", type=click.STRING, metavar="", help="Username.") @click.option("-p", "--password", type=click.STRING, metavar="", help="Password.") -@click.option("-k", "--key", metavar="", help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.") @click.option( - "-v", "--verbose", is_flag=True, help="Output all API calls and their results." + "-k", + "--key", + metavar="", + help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.", ) +@click.option("-v", "--verbose", is_flag=True, help="Output all API calls and their results.") @click.option("--verify", is_flag=True, help="Verify the data was added correctly.") @click.option("--insecure", is_flag=True, help="Allow insecure requests.") @click.option( @@ -328,22 +339,11 @@ def main(self, *args, **kwargs): help="Silence stdout", default=False, ) +@click.option("--proxy", metavar="", help="Proxy address and port (e.g., http://proxy.example.com:8080).") +@click.option("--proxy-user", metavar="", help="Proxy username and password in the format 'username:password'.") @click.option( - "--proxy", - metavar="", - help="Proxy address and port (e.g., http://proxy.example.com:8080)." -) -@click.option( - "--proxy-user", - metavar="", - help="Proxy username and password in the format 'username:password'." + "--noproxy", metavar="", help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." ) -@click.option( - "--noproxy", - metavar="", - help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." -) - def cli(environment: Environment, context: click.core.Context, *args, **kwargs): """TestRail CLI""" if not sys.argv[1:]: @@ -354,6 +354,6 @@ def cli(environment: Environment, context: click.core.Context, *args, **kwargs): if not context.invoked_subcommand: print(MISSING_COMMAND_SLOGAN) exit(2) - + environment.parse_config_file(context) environment.set_parameters(context) diff --git a/trcli/commands/cmd_export_gherkin.py b/trcli/commands/cmd_export_gherkin.py index 0dba035..cc4941f 100644 --- a/trcli/commands/cmd_export_gherkin.py +++ b/trcli/commands/cmd_export_gherkin.py @@ -23,7 +23,6 @@ metavar="", help="Output path for the .feature file. If not specified, prints to stdout.", ) -@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging output.") @click.pass_context @pass_environment def cli(environment: Environment, context: click.Context, case_id: int, output: str, **kwargs): @@ -53,10 +52,6 @@ def cli(environment: Environment, context: click.Context, case_id: int, output: environment.set_parameters(context) environment.check_for_required_parameters() - # Set up logging - if kwargs.get("verbose"): - environment.verbose = True - try: environment.vlog(f"Target case ID: {case_id}") environment.vlog(f"API endpoint: GET /api/v2/get_bdd/{case_id}") diff --git a/trcli/commands/cmd_import_gherkin.py b/trcli/commands/cmd_import_gherkin.py index 1ec9e87..95bddf8 100644 --- a/trcli/commands/cmd_import_gherkin.py +++ b/trcli/commands/cmd_import_gherkin.py @@ -25,7 +25,6 @@ required=True, help="TestRail section ID where test cases will be created.", ) -@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging output.") @click.option("--json-output", is_flag=True, help="Output case IDs in JSON format.") @click.pass_context @pass_environment @@ -53,9 +52,7 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: environment.set_parameters(context) environment.check_for_required_parameters() - # Set up logging - if kwargs.get("verbose"): - environment.verbose = True + json_output = kwargs.get("json_output", False) try: # Read the feature file @@ -75,7 +72,8 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: environment.vlog(f"API endpoint: POST /api/v2/add_bdd/{section_id}") # Initialize API client - environment.log("Connecting to TestRail...") + if not json_output: + environment.log("Connecting to TestRail...") # Create APIClient uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) @@ -103,7 +101,8 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: ) # Upload feature file - environment.log(f"Uploading feature file to TestRail...") + if not json_output: + environment.log(f"Uploading feature file to TestRail...") case_ids, error_message = api_request_handler.add_bdd(section_id, feature_content) if error_message: diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py index 98bd46b..eaeddab 100644 --- a/trcli/commands/cmd_parse_cucumber.py +++ b/trcli/commands/cmd_parse_cucumber.py @@ -4,7 +4,7 @@ from trcli.api.results_uploader import ResultsUploader from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS from trcli.commands.results_parser_helpers import results_parser_options, print_config -from trcli.constants import FAULT_MAPPING +from trcli.constants import FAULT_MAPPING, ProjectErrors from trcli.data_classes.validation_exception import ValidationException from trcli.readers.cucumber_json import CucumberParser @@ -22,42 +22,27 @@ metavar="", help="Section ID for uploading .feature file (required if --upload-feature is used).", ) +@click.option( + "-v", + "--verbose", + is_flag=True, + help="Enable verbose logging output.", +) @click.pass_context @pass_environment def cli(environment: Environment, context: click.Context, *args, **kwargs): """Parse Cucumber JSON results and upload to TestRail This command parses Cucumber JSON test results and uploads them to TestRail. - It supports two workflows: - - Workflow 1 - Upload Results Only (requires existing test cases): - trcli parse_cucumber -f results.json -n --project-id 1 --suite-id 2 - - Workflow 2 - Create Cases + Upload Results (via BDD): - trcli parse_cucumber -f results.json --upload-feature \\ - --feature-section-id 123 --project-id 1 --suite-id 2 - - The --upload-feature flag will: - 1. Generate a .feature file from the Cucumber JSON - 2. Upload it to TestRail via add_bdd endpoint (applying mapping rules) - 3. Retrieve the created case IDs - 4. Upload test results to those cases - - Generated .feature Mapping Rules (Cucumber JSON → .feature → TestRail): - - Feature name/description → Feature: + free text → Test Case name + Preconditions - - Background → Background: → BDD Scenario field - - Scenarios → Scenario:/Scenario Outline: → BDD Scenario field - - Rules → Rule: → BDD Scenario field - - Examples → Examples: table → BDD field (under parent scenario) - - Feature/Scenario tags → @Tags → Reference/BDD fields - - Without --upload-feature, test cases must already exist in TestRail - and be matched via automation_id (use --case-matcher option). """ environment.cmd = "parse_cucumber" environment.set_parameters(context) environment.check_for_required_parameters() + # Set verbose mode if requested + if kwargs.get("verbose"): + environment.verbose = True + # Validate feature upload options upload_feature = kwargs.get("upload_feature", False) feature_section_id = kwargs.get("feature_section_id") @@ -73,44 +58,182 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): parsed_suites = CucumberParser(environment).parse_file() # Workflow: Upload feature file if requested - if upload_feature: - environment.log("\n=== Phase 1: Uploading Feature File ===") + # Only create test cases if auto-creation is enabled + if upload_feature and environment.auto_creation_response: + environment.log("\n=== Phase 1: Creating BDD Test Cases ===") - # Generate feature file content - parser = CucumberParser(environment) - feature_content = parser.generate_feature_file() + # Setup API client + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.api_client import APIClient + import trcli - if not feature_content: - environment.elog("Error: Could not generate feature file from Cucumber JSON") - exit(1) + environment.vlog("Initializing API client for BDD upload...") + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) - # Upload feature file - from trcli.api.api_request_handler import ApiRequestHandler + # Set credentials + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler + minimal_suite = parsed_suites[0] if parsed_suites else None + if not minimal_suite: + from trcli.data_classes.dataclass_testrail import TestRailSuite + + minimal_suite = TestRailSuite(name="Cucumber BDD", testsections=[]) + # Set suite_id from environment if provided + if environment.suite_id: + minimal_suite.suite_id = environment.suite_id + + # Create ApiRequestHandler api_handler = ApiRequestHandler( environment=environment, - suites_input=parsed_suites, - project_id=environment.project_id, + api_client=api_client, + suites_data=minimal_suite, ) - environment.log(f"Uploading generated .feature file to section {feature_section_id}...") - case_ids, error_message = api_handler.add_bdd(feature_section_id, feature_content) + # Resolve project to get actual project_id + environment.log("Checking project. ", new_line=False) + project_data = api_handler.get_project_data(environment.project, environment.project_id) + + # Validate project was found + if project_data.project_id == ProjectErrors.not_existing_project: + environment.elog(f"\n{project_data.error_message}") + exit(1) + elif project_data.project_id == ProjectErrors.other_error: + environment.elog(f"\nError checking project: {project_data.error_message}") + exit(1) + elif project_data.project_id == ProjectErrors.multiple_project_same_name: + environment.elog(f"\nError checking project: {project_data.error_message}") + exit(1) + + environment.log("Done.") + resolved_project_id = project_data.project_id + + # Get BDD template ID + environment.log("Getting BDD template ID...") + bdd_template_id, error_message = api_handler.get_bdd_template_id(resolved_project_id) if error_message: - environment.elog(f"Error uploading feature file: {error_message}") + environment.elog(f"Error getting BDD template: {error_message}") + exit(1) + + environment.vlog(f"Using BDD template ID: {bdd_template_id}") + + # Load Cucumber JSON to access raw feature data + parser = CucumberParser(environment) + with open(environment.file, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + if not isinstance(cucumber_data, list) or not cucumber_data: + environment.elog("Error: Invalid Cucumber JSON format") exit(1) - environment.log(f"✓ Created/updated {len(case_ids)} test case(s)") + # Create BDD test cases (one per feature) + environment.log("Creating BDD test cases from features...") + case_ids = [] + feature_scenario_counts = [] # Track how many scenarios per feature + + for feature in cucumber_data: + feature_name = feature.get("name", "Untitled Feature") + + # Count scenarios in this feature (excluding backgrounds) + scenario_count = sum( + 1 + for element in feature.get("elements", []) + if element.get("type", "") in ("scenario", "scenario_outline") + ) + + if scenario_count == 0: + environment.vlog(f"Skipping feature '{feature_name}' - no scenarios found") + continue + + # Generate complete .feature file content for this feature + environment.vlog(f"Generating .feature file for feature: {feature_name}") + feature_content = parser._generate_feature_content(feature) + + # Upload .feature file via add_bdd endpoint + environment.vlog(f"Uploading feature '{feature_name}' with {scenario_count} scenario(s)") + returned_case_ids, error_message = api_handler.add_bdd( + section_id=feature_section_id, feature_content=feature_content + ) + + if error_message: + environment.elog(f"Error creating BDD test case for feature '{feature_name}': {error_message}") + exit(1) + + if not returned_case_ids or len(returned_case_ids) == 0: + environment.elog(f"Error: add_bdd did not return a case ID for feature '{feature_name}'") + exit(1) + + case_id = returned_case_ids[0] # add_bdd returns list with one case ID + case_ids.append(case_id) + feature_scenario_counts.append(scenario_count) + environment.vlog(f" Created case ID: {case_id} (covers {scenario_count} scenario(s))") + + # Set automation_id on the created test case for future matching + # Use feature name as automation_id (one TestRail case = one feature) + automation_id = feature_name + success, error_message = api_handler.update_case_automation_id(case_id, automation_id) + + if not success: + environment.log(f" Warning: Failed to set automation_id: {error_message}") + else: + environment.vlog(f" Set automation_id: '{automation_id}'") + + environment.log(f"✓ Successfully created {len(case_ids)} BDD test case(s)") environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") - # Update parsed suites with case IDs (if we can map them) - # Note: This mapping assumes the order is preserved, which may not always be true - # A more robust implementation would match by automation_id - environment.log("\nNote: Proceeding to upload results for matched cases...") + # Map returned case IDs to parsed test cases + environment.vlog("\nMapping case IDs to test results...") + + # Map case IDs to sections (one case ID per feature/section) + # Each feature creates one test case in TestRail but may have multiple scenario results + total_mapped = 0 + if len(case_ids) != len(parsed_suites[0].testsections): + environment.elog( + f"Error: Mismatch between features ({len(case_ids)}) and parsed sections ({len(parsed_suites[0].testsections)})" + ) + exit(1) + + for section, case_id, scenario_count in zip( + parsed_suites[0].testsections, case_ids, feature_scenario_counts + ): + environment.vlog( + f"Mapping case ID {case_id} to section '{section.name}' ({len(section.testcases)} scenario(s))" + ) + + # Assign the same case ID to ALL test cases (scenarios) in this section + for test_case in section.testcases: + test_case.case_id = case_id + if test_case.result: + test_case.result.case_id = case_id + total_mapped += 1 + + environment.vlog(f"Mapped {len(case_ids)} case ID(s) to {total_mapped} test result(s)") + + environment.log("\nProceeding to upload test results...") + elif upload_feature and not environment.auto_creation_response: + # Auto-creation is disabled, skip test case creation + environment.log("\n=== Skipping BDD Test Case Creation ===") + environment.log("Auto-creation disabled (-n flag). Will match scenarios using automation_id.") # Upload test results environment.log("\n=== Phase 2: Uploading Test Results ===") + # Ensure all suites have suite_id set from environment + for suite in parsed_suites: + if environment.suite_id and not suite.suite_id: + suite.suite_id = environment.suite_id + run_id = None for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) diff --git a/trcli/commands/cmd_parse_gherkin.py b/trcli/commands/cmd_parse_gherkin.py index fedcad2..c9af7f0 100644 --- a/trcli/commands/cmd_parse_gherkin.py +++ b/trcli/commands/cmd_parse_gherkin.py @@ -18,38 +18,20 @@ ) @click.option("--output", type=click.Path(), metavar="", help="Optional output file path to save parsed JSON.") @click.option("--pretty", is_flag=True, help="Pretty print JSON output with indentation.") -@click.option( - "--case-matcher", - metavar="", - default="auto", - type=click.Choice(["auto", "name", "property"], case_sensitive=False), - help="Mechanism to match cases between the report and TestRail.", -) -@click.option("--suite-name", metavar="", help="Override suite name (defaults to feature name).") -@click.option("-v", "--verbose", is_flag=True, help="Enable verbose logging output.") @click.pass_context @pass_environment -def cli(environment: Environment, context: click.Context, file: str, output: str, pretty: bool, **kwargs): - """Parse Gherkin .feature files +def cli(environment: Environment, context: click.Context, file: str, output: str, pretty: bool): + """Parse Gherkin .feature file locally This command parses Gherkin/BDD .feature files and converts them into - TestRail data structure format. + TestRail data structure format without uploading to TestRail. """ environment.cmd = "parse_gherkin" environment.file = file - environment.case_matcher = kwargs.get("case_matcher", "auto").upper() - environment.suite_name = kwargs.get("suite_name") - - # Set up logging - if kwargs.get("verbose"): - environment.verbose = True try: # Parse the feature file - if environment.verbose: - environment.log(f"Starting Gherkin parser for file: {file}") - parser = GherkinParser(environment) parsed_suites = parser.parse_file() @@ -148,9 +130,6 @@ def cli(environment: Environment, context: click.Context, file: str, output: str # Print to stdout print(json_output) - if environment.verbose: - environment.log("✓ Gherkin parsing completed successfully") - except FileNotFoundError: environment.elog(FAULT_MAPPING["missing_file"]) exit(1) @@ -159,8 +138,4 @@ def cli(environment: Environment, context: click.Context, file: str, output: str exit(1) except Exception as e: environment.elog(f"Unexpected error during parsing: {str(e)}") - if environment.verbose: - import traceback - - environment.elog(traceback.format_exc()) exit(1) diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py index d4b2b6d..4a1ffd3 100644 --- a/trcli/readers/cucumber_json.py +++ b/trcli/readers/cucumber_json.py @@ -1,6 +1,6 @@ import json from pathlib import Path -from beartype.typing import List, Dict, Any, Optional +from beartype.typing import List, Dict, Any, Optional, Tuple from trcli.cli import Environment from trcli.data_classes.data_parsers import MatchersParser, TestRailCaseFieldsOptimizer @@ -308,6 +308,102 @@ def generate_feature_file(self) -> str: return "\n\n".join(feature_files) + def generate_scenario_gherkin(self, feature: Dict[str, Any], scenario: Dict[str, Any]) -> Tuple[str, List[str]]: + """Generate Gherkin content for a single scenario with feature context + + This creates a complete .feature file containing just one scenario, + including the feature header, tags, and description. + + Args: + feature: Feature object from Cucumber JSON + scenario: Scenario object from Cucumber JSON + + Returns: + Tuple of (gherkin_content, all_tags) + - gherkin_content: Complete Gherkin .feature file for single scenario + - all_tags: List of all tags (feature + scenario) + """ + lines = [] + + # Collect all tags (feature + scenario) + feature_tags = self._extract_tags(feature.get("tags", [])) + scenario_tags = self._extract_tags(scenario.get("tags", [])) + all_tags = feature_tags + scenario_tags + + # Feature tags + if feature_tags: + lines.append(" ".join(feature_tags)) + + # Feature header + feature_name = feature.get("name", "Untitled Feature") + feature_description = feature.get("description", "") + + lines.append(f"Feature: {feature_name}") + if feature_description: + for desc_line in feature_description.split("\n"): + if desc_line.strip(): + lines.append(f" {desc_line.strip()}") + + lines.append("") # Empty line after feature header + + # Background (if exists in feature) - include for context + background = None + for element in feature.get("elements", []): + if element.get("type") == "background": + background = element + break + + if background: + background_content = self._generate_background_content(background) + if background_content: + lines.append(background_content) + lines.append("") + + # Scenario tags + if scenario_tags: + lines.append(" " + " ".join(scenario_tags)) + + # Scenario content + scenario_type = scenario.get("type", "scenario") + scenario_name = scenario.get("name", "Untitled Scenario") + + if scenario_type == "scenario_outline": + lines.append(f" Scenario Outline: {scenario_name}") + else: + lines.append(f" Scenario: {scenario_name}") + + # Steps + for step in scenario.get("steps", []): + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + lines.append(f" {keyword} {step_name}") + + # Examples table (for Scenario Outline) + if scenario_type == "scenario_outline": + examples = scenario.get("examples", []) + if examples: + for example_group in examples: + lines.append("") # Empty line before examples + + # Examples tags (if any) + example_tags = self._extract_tags(example_group.get("tags", [])) + if example_tags: + lines.append(" " + " ".join(example_tags)) + + # Examples keyword + lines.append(" Examples:") + + # Examples table + rows = example_group.get("rows", []) + if rows: + for row in rows: + cells = row.get("cells", []) + if cells: + row_content = " | ".join(cells) + lines.append(f" | {row_content} |") + + return "\n".join(lines), all_tags + def _generate_feature_content(self, feature: Dict[str, Any]) -> str: """Generate Gherkin feature content from Cucumber feature object @@ -470,9 +566,10 @@ def _generate_rule_content(self, rule: Dict[str, Any]) -> str: if desc_line.strip(): lines.append(f" {desc_line.strip()}") - # Background within rule (if any) + # Process children in order: Background first, then scenarios for element in rule.get("children", []): element_type = element.get("type", "") + if element_type == "background": lines.append("") background_content = self._generate_background_content(element) @@ -480,10 +577,7 @@ def _generate_rule_content(self, rule: Dict[str, Any]) -> str: for line in background_content.split("\n"): lines.append(" " + line if line else "") - # Scenarios within rule - for element in rule.get("children", []): - element_type = element.get("type", "") - if element_type in ("scenario", "scenario_outline"): + elif element_type in ("scenario", "scenario_outline"): lines.append("") scenario_content = self._generate_scenario_content(element) # Indent scenario under rule From 95ce4c53c61788edbf2c8b40d5c548b814dd231f Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 3 Dec 2025 21:19:12 +0800 Subject: [PATCH 07/33] TRCLI-198 updated unit and functional tests for bdd implementation --- tests/pytest.ini | 7 +- tests/test_cmd_export_gherkin.py | 5 +- tests/test_cmd_import_gherkin.py | 5 +- tests/test_cmd_parse_cucumber.py | 113 +++++++++++++++++++++++++++---- tests/test_cmd_parse_gherkin.py | 48 ------------- tests/test_data/cli_test_data.py | 1 + tests_e2e/test_end2end.py | 15 ++-- 7 files changed, 126 insertions(+), 68 deletions(-) diff --git a/tests/pytest.ini b/tests/pytest.ini index 7cb31ac..9d5f3be 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -12,4 +12,9 @@ markers = data_provider: tests for data provider project_based_client: mark a test as a project-based client test. proxy: test for proxy feature - \ No newline at end of file + cmd_import_gherkin: tests for import_gherkin command + cmd_export_gherkin: tests for export_gherkin command + cmd_parse_gherkin: tests for parse_gherkin command + cmd_parse_cucumber: tests for parse_cucumber command + parse_gherkin: tests for gherkin parser + parse_cucumber: tests for cucumber parser diff --git a/tests/test_cmd_export_gherkin.py b/tests/test_cmd_export_gherkin.py index e305c00..e5ed788 100644 --- a/tests/test_cmd_export_gherkin.py +++ b/tests/test_cmd_export_gherkin.py @@ -100,7 +100,10 @@ def test_export_gherkin_verbose_logging(self, mock_api_client_class, mock_api_ha mock_api_handler_class.return_value = mock_handler mock_handler.get_bdd.return_value = (self.sample_feature_content, "") - result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456", "--verbose"], obj=self.environment) + # Enable verbose mode via environment (verbose is now a global option) + self.environment.verbose = True + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"], obj=self.environment) assert result.exit_code == 0 diff --git a/tests/test_cmd_import_gherkin.py b/tests/test_cmd_import_gherkin.py index 0bfde94..16c95cc 100644 --- a/tests/test_cmd_import_gherkin.py +++ b/tests/test_cmd_import_gherkin.py @@ -104,13 +104,16 @@ def test_import_gherkin_verbose_logging(self, mock_api_client_class, mock_api_ha mock_api_handler_class.return_value = mock_handler mock_handler.add_bdd.return_value = ([456], "") + # Enable verbose mode via environment (verbose is now a global option) + self.environment.verbose = True + with self.runner.isolated_filesystem(): with open("test.feature", "w") as f: f.write("Feature: Test\n Scenario: Test\n") result = self.runner.invoke( cmd_import_gherkin.cli, - ["--file", "test.feature", "--section-id", "123", "--verbose"], + ["--file", "test.feature", "--section-id", "123"], obj=self.environment, ) diff --git a/tests/test_cmd_parse_cucumber.py b/tests/test_cmd_parse_cucumber.py index be8ee22..1c2cfea 100644 --- a/tests/test_cmd_parse_cucumber.py +++ b/tests/test_cmd_parse_cucumber.py @@ -24,6 +24,7 @@ def setup_method(self): self.environment.password = "password" self.environment.project = "Test Project" self.environment.project_id = 1 + self.environment.auto_creation_response = True # Enable auto-creation for tests @pytest.mark.cmd_parse_cucumber @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @@ -56,22 +57,46 @@ def test_parse_cucumber_workflow1_results_only(self, mock_parser_class, mock_upl @patch("trcli.api.api_request_handler.ApiRequestHandler") @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + @patch( + "builtins.open", + new_callable=mock.mock_open, + read_data='[{"name":"Test Feature","elements":[{"type":"scenario","name":"Test Scenario"}]}]', + ) def test_parse_cucumber_workflow2_upload_feature( - self, mock_parser_class, mock_uploader_class, mock_api_handler_class + self, mock_open, mock_parser_class, mock_uploader_class, mock_api_handler_class ): - """Test Workflow 2: Generate feature, upload, then upload results""" + """Test Workflow 2: Create BDD test cases per feature, then upload results""" # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser + + # Mock suite with test cases mock_suite = MagicMock() mock_suite.name = "Test Suite" + mock_section = MagicMock() + mock_section.name = "Test Feature" + mock_case = MagicMock() + mock_case.case_id = None + mock_case.result = MagicMock() + mock_section.testcases = [mock_case] + mock_suite.testsections = [mock_section] mock_parser.parse_file.return_value = [mock_suite] - mock_parser.generate_feature_file.return_value = "Feature: Test\n Scenario: Test\n" + + # Mock _generate_feature_content to return Gherkin content + mock_parser._generate_feature_content.return_value = "Feature: Test\n Scenario: Test\n Given test step\n" # Mock API handler mock_api_handler = MagicMock() mock_api_handler_class.return_value = mock_api_handler - mock_api_handler.add_bdd.return_value = ([101, 102], "") + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + + mock_api_handler.get_bdd_template_id.return_value = (2, "") # BDD template ID = 2 + mock_api_handler.add_bdd.return_value = ([101], "") # Returns list with case ID = 101 + mock_api_handler.update_case_automation_id.return_value = (True, "") # Success updating automation_id # Mock uploader mock_uploader = MagicMock() @@ -95,8 +120,9 @@ def test_parse_cucumber_workflow2_upload_feature( ) assert result.exit_code == 0 - mock_parser.generate_feature_file.assert_called_once() + mock_api_handler.get_bdd_template_id.assert_called_once() mock_api_handler.add_bdd.assert_called_once() + mock_api_handler.update_case_automation_id.assert_called_once() mock_uploader.upload_results.assert_called() @pytest.mark.cmd_parse_cucumber @@ -120,6 +146,51 @@ def test_parse_cucumber_upload_feature_requires_section_id(self): assert result.exit_code == 1 assert "feature-section-id is required" in result.output.lower() + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_upload_feature_with_no_flag(self, mock_parser_class, mock_uploader_class): + """Test that -n flag skips test case creation with --upload-feature""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_section = MagicMock() + mock_section.name = "Test Feature" + mock_section.testcases = [] + mock_suite.testsections = [mock_section] + mock_parser.parse_file.return_value = [mock_suite] + + # Mock uploader + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + # Set auto_creation_response to False (simulates -n flag) + self.environment.auto_creation_response = False + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 0 + assert "skipping bdd test case creation" in result.output.lower() + assert "auto-creation disabled" in result.output.lower() + mock_uploader.upload_results.assert_called() + @pytest.mark.cmd_parse_cucumber def test_parse_cucumber_missing_file(self): """Test with non-existent Cucumber JSON file""" @@ -178,14 +249,14 @@ def test_parse_cucumber_empty_json(self, mock_parser_class): @pytest.mark.cmd_parse_cucumber @patch("trcli.api.api_request_handler.ApiRequestHandler") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") - def test_parse_cucumber_feature_generation_failure(self, mock_parser_class, mock_api_handler_class): - """Test when feature file generation fails""" + @patch("builtins.open", new_callable=mock.mock_open, read_data="[]") + def test_parse_cucumber_invalid_cucumber_json(self, mock_open, mock_parser_class, mock_api_handler_class): + """Test with invalid Cucumber JSON structure (empty array)""" # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser mock_suite = MagicMock() mock_parser.parse_file.return_value = [mock_suite] - mock_parser.generate_feature_file.return_value = "" # Empty content result = self.runner.invoke( cmd_parse_cucumber.cli, @@ -204,23 +275,39 @@ def test_parse_cucumber_feature_generation_failure(self, mock_parser_class, mock ) assert result.exit_code == 1 - assert "could not generate feature file" in result.output.lower() + # Check that it fails with any appropriate error (either JSON format or parsing error) + assert "invalid cucumber json format" in result.output.lower() or "error parsing" in result.output.lower() @pytest.mark.cmd_parse_cucumber @patch("trcli.api.api_request_handler.ApiRequestHandler") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") - def test_parse_cucumber_api_error_during_feature_upload(self, mock_parser_class, mock_api_handler_class): - """Test API error during feature file upload""" + @patch( + "builtins.open", + new_callable=mock.mock_open, + read_data='[{"name":"Test Feature","elements":[{"type":"scenario","name":"Test Scenario"}]}]', + ) + def test_parse_cucumber_api_error_during_feature_upload(self, mock_open, mock_parser_class, mock_api_handler_class): + """Test API error during BDD test case creation""" # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser mock_suite = MagicMock() + mock_section = MagicMock() + mock_section.name = "Test Feature" + mock_suite.testsections = [mock_section] mock_parser.parse_file.return_value = [mock_suite] - mock_parser.generate_feature_file.return_value = "Feature: Test\n" + mock_parser._generate_feature_content.return_value = "Feature: Test\n Scenario: Test\n" # Mock API handler with error mock_api_handler = MagicMock() mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + + mock_api_handler.get_bdd_template_id.return_value = (2, "") mock_api_handler.add_bdd.return_value = ([], "API Error: Section not found") result = self.runner.invoke( @@ -240,7 +327,7 @@ def test_parse_cucumber_api_error_during_feature_upload(self, mock_parser_class, ) assert result.exit_code == 1 - assert "error uploading feature file" in result.output.lower() + assert "error creating" in result.output.lower() @pytest.mark.cmd_parse_cucumber def test_parse_cucumber_required_parameters(self): diff --git a/tests/test_cmd_parse_gherkin.py b/tests/test_cmd_parse_gherkin.py index 11a30e5..1d6624e 100644 --- a/tests/test_cmd_parse_gherkin.py +++ b/tests/test_cmd_parse_gherkin.py @@ -66,54 +66,6 @@ def test_parse_gherkin_pretty_print(self): assert "\n" in json_str assert " " in json_str # Indentation - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_custom_suite_name(self): - """Test parsing with custom suite name""" - custom_suite_name = "My Custom Suite" - result = self.runner.invoke( - cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--suite-name", custom_suite_name] - ) - - assert result.exit_code == 0 - json_start = result.output.find("{") - output_data = json.loads(result.output[json_start:]) - assert output_data["suites"][0]["name"] == custom_suite_name - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_case_matcher_name(self): - """Test parsing with NAME case matcher""" - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--case-matcher", "name"]) - - assert result.exit_code == 0 - json_start = result.output.find("{") - output_data = json.loads(result.output[json_start:]) - assert "suites" in output_data - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_case_matcher_property(self): - """Test parsing with PROPERTY case matcher""" - result = self.runner.invoke( - cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--case-matcher", "property"] - ) - - assert result.exit_code == 0 - json_start = result.output.find("{") - output_data = json.loads(result.output[json_start:]) - assert "suites" in output_data - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_verbose_logging(self): - """Test parsing with verbose logging enabled""" - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--verbose"]) - - assert result.exit_code == 0 - # Extract JSON from output (may have verbose logs before and after) - json_start = result.output.find("{") - json_end = result.output.rfind("}") + 1 # Find last closing brace - json_str = result.output[json_start:json_end] - output_data = json.loads(json_str) - assert "suites" in output_data - @pytest.mark.cmd_parse_gherkin def test_parse_gherkin_missing_file(self): """Test parsing with non-existent file""" diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index cf6f59b..c282137 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -68,6 +68,7 @@ " - parse_cucumber: Cucumber JSON results (BDD)\n" " - import_gherkin: Upload .feature files to TestRail BDD\n" " - export_gherkin: Export BDD test cases as .feature files\n" + " - parse_gherkin: Parse Gherkin .feature file locally\n" " - parse_robot: Robot Framework XML Files\n" " - parse_openapi: OpenAPI YML Files\n" " - add_run: Create a new test run\n" diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 93ec0f7..6cb60a8 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -2055,10 +2055,10 @@ def test_import_gherkin_with_verbose(self): trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + -v \\ import_gherkin \\ -f "reports_gherkin/sample_bdd.feature" \\ - --section-id 2388 \\ - --verbose + --section-id 2388 """ ) _assert_contains( @@ -2092,6 +2092,10 @@ def test_export_gherkin_download_to_stdout(self): json_start = import_output.find("{") if json_start >= 0: json_str = import_output[json_start:] + # Remove "DONE" and any trailing text after the JSON + json_end = json_str.find("}") + if json_end >= 0: + json_str = json_str[: json_end + 1] output_data = json.loads(json_str) case_id = output_data.get("case_ids", [])[0] if output_data.get("case_ids") else None @@ -2130,6 +2134,10 @@ def test_export_gherkin_download_to_file(self): json_start = import_output.find("{") if json_start >= 0: json_str = import_output[json_start:] + # Remove "DONE" and any trailing text after the JSON + json_end = json_str.find("}") + if json_end >= 0: + json_str = json_str[: json_end + 1] output_data = json.loads(json_str) case_id = output_data.get("case_ids", [])[0] if output_data.get("case_ids") else None @@ -2287,7 +2295,6 @@ def test_bdd_help_commands(self): "-f, --file", "--section-id", "--json-output", - "-v, --verbose", ], ) @@ -2295,7 +2302,7 @@ def test_bdd_help_commands(self): export_gherkin_help = _run_cmd("trcli export_gherkin --help") _assert_contains( export_gherkin_help, - ["Export BDD test case from TestRail as .feature file", "--case-id", "--output", "-v, --verbose"], + ["Export BDD test case from TestRail as .feature file", "--case-id", "--output"], ) # Test parse_cucumber help From 9a8a1d669d3961bde9b795f3b8ea709c7706c694 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 15 Dec 2025 17:54:31 +0800 Subject: [PATCH 08/33] TRCLI-21 Updated parse_junit with new special parser option for bdd --- CHANGELOG.MD | 3 + trcli/commands/cmd_parse_junit.py | 91 +++--- trcli/readers/junit_xml.py | 487 +++++++++++++++++++++++++++--- 3 files changed, 491 insertions(+), 90 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index a49c9ab..80a3f13 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -13,6 +13,9 @@ _released 12-01-2025 ### Fixed - Added new BDD/Gherkin parser command parse_bdd for behavioral driven development-related testing +### Added + - **BDD Support for parse_junit**: Added `--special-parser bdd` option to group multiple JUnit scenarios into a single TestRail BDD test case; supports case ID extraction, BDD case validation and result aggregation + ## [1.12.4] _released 11-03-2025 diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index e24fcea..744ec1f 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -18,38 +18,35 @@ "--special-parser", metavar="", default="junit", - type=click.Choice(["junit", "saucectl"], case_sensitive=False), - help="Optional special parser option for specialized JUnit reports." + type=click.Choice(["junit", "saucectl", "bdd"], case_sensitive=False), + help="Optional special parser option for specialized JUnit reports. Use 'bdd' for BDD framework JUnit output.", ) @click.option( - "-a", "--assign", + "-a", + "--assign", "assign_failed_to", metavar="", - help="Comma-separated list of user emails to assign failed test results to." + help="Comma-separated list of user emails to assign failed test results to.", ) @click.option( "--test-run-ref", metavar="", - help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total)." -) -@click.option( - "--json-output", - is_flag=True, - help="Output reference operation results in JSON format." + help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total).", ) +@click.option("--json-output", is_flag=True, help="Output reference operation results in JSON format.") @click.option( "--update-existing-cases", type=click.Choice(["yes", "no"], case_sensitive=False), default="no", metavar="", - help="Update existing TestRail cases with values from JUnit properties (default: no)." + help="Update existing TestRail cases with values from JUnit properties (default: no).", ) @click.option( "--update-strategy", type=click.Choice(["append", "replace"], case_sensitive=False), default="append", metavar="", - help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append)." + help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append).", ) @click.pass_context @pass_environment @@ -58,13 +55,13 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.cmd = "parse_junit" environment.set_parameters(context) environment.check_for_required_parameters() - + if environment.test_run_ref is not None: validation_error = _validate_test_run_ref(environment.test_run_ref) if validation_error: environment.elog(validation_error) exit(1) - + settings.ALLOW_ELAPSED_MS = environment.allow_ms print_config(environment) try: @@ -75,20 +72,20 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): result_uploader = ResultsUploader(environment=environment, suite=suite) result_uploader.upload_results() - if run_id is None and hasattr(result_uploader, 'last_run_id'): + if run_id is None and hasattr(result_uploader, "last_run_id"): run_id = result_uploader.last_run_id - + # Collect case update results - if hasattr(result_uploader, 'case_update_results'): + if hasattr(result_uploader, "case_update_results"): case_update_results = result_uploader.case_update_results - + if environment.test_run_ref and run_id: _handle_test_run_references(environment, run_id) - + # Handle case update reporting if enabled if environment.update_existing_cases == "yes" and case_update_results is not None: _handle_case_update_reporting(environment, case_update_results) - + # Exit with error if there were case update failures (after reporting) if case_update_results.get("failed_cases"): exit(1) @@ -116,14 +113,14 @@ def _validate_test_run_ref(test_run_ref: str) -> str: """ if not test_run_ref or not test_run_ref.strip(): return "Error: --test-run-ref cannot be empty or whitespace-only" - - refs = [ref.strip() for ref in test_run_ref.split(',') if ref.strip()] + + refs = [ref.strip() for ref in test_run_ref.split(",") if ref.strip()] if not refs: return "Error: --test-run-ref contains no valid references (malformed input)" - + if len(test_run_ref) > 250: return f"Error: --test-run-ref exceeds 250 character limit ({len(test_run_ref)} characters)" - + return None @@ -135,40 +132,34 @@ def _handle_test_run_references(environment: Environment, run_id: int): from trcli.data_classes.dataclass_testrail import TestRailSuite import json - refs = [ref.strip() for ref in environment.test_run_ref.split(',') if ref.strip()] - - project_client = ProjectBasedClient( - environment=environment, - suite=TestRailSuite(name="temp", suite_id=1) - ) + refs = [ref.strip() for ref in environment.test_run_ref.split(",") if ref.strip()] + + project_client = ProjectBasedClient(environment=environment, suite=TestRailSuite(name="temp", suite_id=1)) project_client.resolve_project() - + environment.log(f"Appending references to test run {run_id}...") run_data, added_refs, skipped_refs, error_message = project_client.api_request_handler.append_run_references( run_id, refs ) - + if error_message: environment.elog(f"Error: Failed to append references: {error_message}") exit(1) - + final_refs = run_data.get("refs", "") if run_data else "" - + if environment.json_output: # JSON output - result = { - "run_id": run_id, - "added": added_refs, - "skipped": skipped_refs, - "total_references": final_refs - } + result = {"run_id": run_id, "added": added_refs, "skipped": skipped_refs, "total_references": final_refs} print(json.dumps(result, indent=2)) else: environment.log(f"References appended successfully:") environment.log(f" Run ID: {run_id}") environment.log(f" Total references: {len(final_refs.split(',')) if final_refs else 0}") environment.log(f" Newly added: {len(added_refs)} ({', '.join(added_refs) if added_refs else 'none'})") - environment.log(f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})") + environment.log( + f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})" + ) if final_refs: environment.log(f" All references: {final_refs}") @@ -178,24 +169,24 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: Handle reporting of case update results. """ import json - + # Handle None input gracefully if case_update_results is None: return - + if environment.json_output: # JSON output for case updates result = { "summary": { "updated_cases": len(case_update_results.get("updated_cases", [])), "skipped_cases": len(case_update_results.get("skipped_cases", [])), - "failed_cases": len(case_update_results.get("failed_cases", [])) + "failed_cases": len(case_update_results.get("failed_cases", [])), }, "details": { "updated_cases": case_update_results.get("updated_cases", []), "skipped_cases": case_update_results.get("skipped_cases", []), - "failed_cases": case_update_results.get("failed_cases", []) - } + "failed_cases": case_update_results.get("failed_cases", []), + }, } print(json.dumps(result, indent=2)) else: @@ -203,13 +194,13 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: updated_cases = case_update_results.get("updated_cases", []) skipped_cases = case_update_results.get("skipped_cases", []) failed_cases = case_update_results.get("failed_cases", []) - + if updated_cases or skipped_cases or failed_cases: environment.log("Case Reference Updates Summary:") environment.log(f" Updated cases: {len(updated_cases)}") environment.log(f" Skipped cases: {len(skipped_cases)}") environment.log(f" Failed cases: {len(failed_cases)}") - + if updated_cases: environment.log(" Updated case details:") for case_info in updated_cases: @@ -217,14 +208,14 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: added = case_info.get("added_refs", []) skipped = case_info.get("skipped_refs", []) environment.log(f" C{case_id}: added {len(added)} refs, skipped {len(skipped)} duplicates") - + if skipped_cases: environment.log(" Skipped case details:") for case_info in skipped_cases: case_id = case_info["case_id"] reason = case_info.get("reason", "Unknown reason") environment.log(f" C{case_id}: {reason}") - + if failed_cases: environment.log(" Failed case details:") for case_info in failed_cases: diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 2218c31..28f847c 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -4,8 +4,7 @@ from unittest import TestCase, TestSuite from xml.etree import ElementTree as etree -from junitparser import ( - JUnitXml, JUnitXmlError, Element, Attr, TestSuite as JUnitTestSuite, TestCase as JUnitTestCase) +from junitparser import JUnitXml, JUnitXmlError, Element, Attr, TestSuite as JUnitTestSuite, TestCase as JUnitTestCase from trcli.cli import Environment from trcli.constants import OLD_SYSTEM_NAME_AUTOMATION_ID @@ -15,16 +14,12 @@ TestRailSuite, TestRailSection, TestRailProperty, - TestRailResult, TestRailSeparatedStep, + TestRailResult, + TestRailSeparatedStep, ) from trcli.readers.file_parser import FileParser -STEP_STATUSES = { - "passed": 1, - "untested": 3, - "skipped": 4, - "failed": 5 -} +STEP_STATUSES = {"passed": 1, "untested": 3, "skipped": 4, "failed": 5} TestCase.id = Attr("id") TestSuite.id = Attr("id") @@ -47,7 +42,7 @@ def __init__(self, environment: Environment): super().__init__(environment) self._case_matcher = environment.case_matcher self._special = environment.special_parser - self._case_result_statuses = {"passed": 1, "skipped": 4,"error": 5, "failure": 5} + self._case_result_statuses = {"passed": 1, "skipped": 4, "error": 5, "failure": 5} self._update_with_custom_statuses() @classmethod @@ -134,7 +129,7 @@ def _get_comment_for_case_result(case: JUnitTestCase) -> str: parts = [ f"Type: {result.type}" if result.type else "", f"Message: {result.message}" if result.message else "", - f"Text: {result.text}" if result.text else "" + f"Text: {result.text}" if result.text else "", ] return "\n".join(part for part in parts if part).strip() @@ -155,7 +150,7 @@ def _parse_case_properties(case): continue elif name.startswith("testrail_result_step"): - status, step = value.split(':', maxsplit=1) + status, step = value.split(":", maxsplit=1) step_obj = TestRailSeparatedStep(step.strip()) step_obj.status_id = STEP_STATUSES[status.lower().strip()] result_steps.append(step_obj) @@ -169,7 +164,7 @@ def _parse_case_properties(case): text = prop._elem.text.strip() if prop._elem.text else None field_value = text or value case_fields.append(field_value) - + # Extract refs for case updates if field_value and field_value.startswith("refs:"): case_refs = field_value[5:].strip() # Remove "refs:" prefix @@ -201,8 +196,9 @@ def _parse_test_cases(self, section) -> List[TestRailCase]: """ automation_id = f"{case.classname}.{case.name}" case_id, case_name = self._extract_case_id_and_name(case) - result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session = self._parse_case_properties( - case) + result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session = ( + self._parse_case_properties(case) + ) result_fields_dict, case_fields_dict = self._resolve_case_fields(result_fields, case_fields) status_id = self._get_status_id_for_case_result(case) comment = self._get_comment_for_case_result(case) @@ -221,30 +217,31 @@ def _parse_test_cases(self, section) -> List[TestRailCase]: if sauce_session: result.prepend_comment(f"SauceLabs session: {sauce_session}") - automation_id = ( - case_fields_dict.pop(OLD_SYSTEM_NAME_AUTOMATION_ID, None) - or case._elem.get(OLD_SYSTEM_NAME_AUTOMATION_ID, automation_id)) + automation_id = case_fields_dict.pop(OLD_SYSTEM_NAME_AUTOMATION_ID, None) or case._elem.get( + OLD_SYSTEM_NAME_AUTOMATION_ID, automation_id + ) # Create TestRailCase kwargs case_kwargs = { - "title": TestRailCaseFieldsOptimizer.extract_last_words(case_name, - TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH), + "title": TestRailCaseFieldsOptimizer.extract_last_words( + case_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), "case_id": case_id, "result": result, "custom_automation_id": automation_id, "case_fields": case_fields_dict, } - + # Only set refs field if case_refs has actual content if case_refs and case_refs.strip(): case_kwargs["refs"] = case_refs - + test_case = TestRailCase(**case_kwargs) - + # Store JUnit references as a temporary attribute for case updates (not serialized) if case_refs and case_refs.strip(): test_case._junit_case_refs = case_refs - + test_cases.append(test_case) return test_cases @@ -256,7 +253,6 @@ def _get_suite_name(self, suite): return suite.name raise ValueError("Suite name is not defined in environment or JUnit report.") - def _parse_sections(self, suite) -> List[TestRailSection]: sections = [] processed_props = [] @@ -272,16 +268,424 @@ def _parse_sections(self, suite) -> List[TestRailSection]: then sub_sections=sub_sections """ properties = self._extract_section_properties(section, processed_props) - test_cases = self._parse_test_cases(section) - self.env.log(f"Processed {len(test_cases)} test cases in section {section.name}.") - sections.append(TestRailSection( - section.name, - testcases=test_cases, - properties=properties, - )) + + # BDD MODE: Group all scenarios under one test case + if self._is_bdd_mode(): + test_case = self._parse_bdd_feature_as_single_case(section) + test_cases = [test_case] if test_case else [] + # STANDARD MODE: One test case per JUnit testcase + else: + test_cases = self._parse_test_cases(section) + + self.env.log(f"Processed {len(test_cases)} test case(s) in section {section.name}.") + sections.append( + TestRailSection( + section.name, + testcases=test_cases, + properties=properties, + ) + ) return sections + def _is_bdd_mode(self) -> bool: + """Check if BDD grouping mode is enabled + + Returns: + True if special parser is 'bdd', False otherwise + """ + return self._special == "bdd" + + def _extract_feature_case_id_from_property(self, testsuite) -> Union[int, None]: + """Extract case ID from testsuite-level properties + + Looks for properties: testrail_case_id, test_id, bdd_case_id + + Args: + testsuite: JUnit testsuite element + + Returns: + Case ID as integer or None if not found + """ + for prop in testsuite.properties(): + if prop.name in ["testrail_case_id", "test_id", "bdd_case_id"]: + case_id_str = prop.value.lower().replace("c", "") + if case_id_str.isnumeric(): + self.env.vlog(f"BDD: Found case ID C{case_id_str} in testsuite property '{prop.name}'") + return int(case_id_str) + return None + + def _extract_case_id_from_testcases(self, testsuite) -> List[tuple]: + """Extract case IDs from testcase properties and names + + Args: + testsuite: JUnit testsuite element + + Returns: + List of tuples (testcase_name, case_id) + """ + testcase_case_ids = [] + + for testcase in testsuite: + tc_case_id = None + + # Check testcase properties first + for case_props in testcase.iterchildren(Properties): + for prop in case_props.iterchildren(Property): + if prop.name == "test_id": + tc_case_id_str = prop.value.lower().replace("c", "") + if tc_case_id_str.isnumeric(): + tc_case_id = int(tc_case_id_str) + break + + # Check testcase name if property not found + if not tc_case_id: + tc_case_id, _ = MatchersParser.parse_name_with_id(testcase.name) + + if tc_case_id: + testcase_case_ids.append((testcase.name, tc_case_id)) + + return testcase_case_ids + + def _extract_and_validate_bdd_case_id(self, testsuite) -> tuple: + """Extract case ID from various sources and validate consistency + + In BDD mode, all scenarios in a feature MUST share the same case ID. + + Priority order: + 1. Testsuite-level property (testrail_case_id, test_id, bdd_case_id) + 2. Testcase properties (all must be same) + 3. Testcase names (all must be same) + 4. Testsuite name pattern [C123] + + Args: + testsuite: JUnit testsuite element + + Returns: + Tuple of (case_id: int or None, validation_errors: List[str]) + """ + validation_errors = [] + + # Priority 1: Testsuite-level property + case_id = self._extract_feature_case_id_from_property(testsuite) + if case_id: + return case_id, [] + + # Priority 2 & 3: Check testcase properties and names + testcase_case_ids = self._extract_case_id_from_testcases(testsuite) + + if not testcase_case_ids: + validation_errors.append( + f"BDD Error: No case ID found for feature '{testsuite.name}'.\n" + f" Add case ID using one of:\n" + f" - Testsuite property: \n" + f" - Testcase names: 'Scenario name C42'\n" + f" - Testcase property: " + ) + return None, validation_errors + + # Check consistency - all must be the same + unique_case_ids = set(cid for _, cid in testcase_case_ids) + + if len(unique_case_ids) > 1: + validation_errors.append( + f"BDD Error: Multiple different case IDs found in feature '{testsuite.name}'.\n" + f" In BDD mode, all scenarios must map to the SAME TestRail case.\n" + f" Found case IDs: {sorted(unique_case_ids)}\n" + f" Scenarios:\n" + + "\n".join(f" - '{name}' → C{cid}" for name, cid in testcase_case_ids) + + f"\n\n If these should be separate test cases, remove --special-parser bdd flag." + ) + return None, validation_errors + + case_id = testcase_case_ids[0][1] + self.env.vlog( + f"BDD: Found consistent case ID C{case_id} across {len(testcase_case_ids)} scenario(s) " + f"in feature '{testsuite.name}'" + ) + + # Priority 4: Check testsuite name if no testcase IDs found + if not case_id and self._case_matcher == MatchersParser.NAME: + case_id, _ = MatchersParser.parse_name_with_id(testsuite.name) + if case_id: + self.env.vlog(f"BDD: Found case ID C{case_id} in testsuite name") + + return case_id, [] + + def _validate_bdd_case_exists(self, case_id: int, feature_name: str) -> tuple: + """Validate that case exists in TestRail AND is a BDD test case + + A valid BDD test case MUST have: + - Exist in TestRail (case ID is valid) + - Have custom_testrail_bdd_scenario field with content + + Args: + case_id: TestRail case ID to validate + feature_name: Feature/testsuite name for error context + + Returns: + Tuple of (is_valid: bool, error_message: str, case_data: dict) + """ + try: + # Import here to avoid circular dependency + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.project_based_client import ProjectBasedClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + # Get API client + temp_suite = TestRailSuite(name="temp", suite_id=1) + project_client = ProjectBasedClient(environment=self.env, suite=temp_suite) + api_handler = project_client.api_request_handler + + # Step 1: Get case from TestRail + response = api_handler.client.send_get(f"get_case/{case_id}") + + if response.error_message: + return ( + False, + ( + f"BDD Validation Error: Case C{case_id} does not exist in TestRail.\n" + f"Feature: '{feature_name}'\n" + f"API Error: {response.error_message}\n\n" + f"Action Required:\n" + f" 1. Verify case C{case_id} exists in TestRail\n" + f" 2. Ensure you have permission to access this case\n" + f" 3. Create the BDD test case if it doesn't exist:\n" + f" trcli import_gherkin -f {feature_name}.feature --section-id " + ), + {}, + ) + + case_data = response.response_text + + # Step 2: Validate it's a BDD test case + bdd_scenario_field = case_data.get("custom_testrail_bdd_scenario") + + if not bdd_scenario_field: + return ( + False, + ( + f"BDD Validation Error: Case C{case_id} is NOT a BDD test case.\n" + f"Feature: '{feature_name}'\n" + f"Case Title: '{case_data.get('title', 'Unknown')}'\n\n" + f"Reason: The 'custom_testrail_bdd_scenario' field is empty or null.\n" + f"This indicates the case is using a regular template, not the BDD template.\n\n" + f"Action Required:\n" + f" Option 1: Upload this case using standard mode (remove --special-parser bdd)\n" + f" Option 2: Create a proper BDD test case with:\n" + f" trcli import_gherkin -f {feature_name}.feature --section-id \n" + f" Option 3: Convert existing case to BDD template in TestRail UI" + ), + case_data, + ) + + # Success! + self.env.vlog( + f"BDD: Validated case C{case_id} is a valid BDD test case\n" + f" - Title: '{case_data.get('title')}'\n" + f" - Template ID: {case_data.get('template_id')}\n" + f" - Has BDD scenarios: Yes" + ) + + return True, "", case_data + + except Exception as e: + return ( + False, + ( + f"BDD Validation Error: Unable to validate case C{case_id}.\n" + f"Feature: '{feature_name}'\n" + f"Error: {str(e)}\n\n" + f"Action Required: Verify your TestRail connection and case access permissions." + ), + {}, + ) + + def _aggregate_scenario_statuses(self, scenario_statuses: List[int]) -> int: + """Aggregate scenario statuses using fail-fast logic + + Fail-fast logic: + - If ANY scenario is Failed (5) → Feature is Failed (5) + - Else if ANY scenario is Skipped (4) → Feature is Skipped (4) + - Else if ALL scenarios Passed (1) → Feature is Passed (1) + + Args: + scenario_statuses: List of TestRail status IDs + + Returns: + Aggregated status ID + """ + if 5 in scenario_statuses: # Any failure + return 5 + elif 4 in scenario_statuses: # Any skipped (no failures) + return 4 + else: # All passed + return 1 + + def _format_failure_message(self, scenario_name: str, result_obj) -> str: + """Format failure details for comment + + Args: + scenario_name: Clean scenario name + result_obj: JUnit result object (failure/error element) + + Returns: + Formatted failure message + """ + lines = [f"Scenario: {scenario_name}"] + + if result_obj.type: + lines.append(f" Type: {result_obj.type}") + + if result_obj.message: + lines.append(f" Message: {result_obj.message}") + + if result_obj.text: + # Truncate if too long + text = result_obj.text.strip() + if len(text) > 500: + text = text[:500] + "\n ... (truncated)" + lines.append(f" Details:\n {text}") + + return "\n".join(lines) + + def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, None]: + """Parse all scenarios in a testsuite as a single BDD test case + + Enhanced validation: + 1. Case ID exists + 2. All scenarios have same case ID + 3. Case exists in TestRail + 4. Case is actually a BDD test case (has custom_testrail_bdd_scenario) + + Args: + testsuite: JUnit testsuite containing multiple scenarios + + Returns: + Single TestRailCase with aggregated scenario results, or None if validation fails + """ + feature_name = testsuite.name + + # Step 1: Extract and validate case ID consistency + case_id, validation_errors = self._extract_and_validate_bdd_case_id(testsuite) + + if validation_errors: + for error in validation_errors: + self.env.elog(error) + return None + + if not case_id: + self.env.elog(f"BDD Error: No valid case ID found for feature '{feature_name}'. " f"Skipping this feature.") + return None + + # Step 2: Validate case exists AND is a BDD case + is_valid, error_message, case_data = self._validate_bdd_case_exists(case_id, feature_name) + + if not is_valid: + self.env.elog(error_message) + # Raise exception to stop processing + from trcli.data_classes.validation_exception import ValidationException + + raise ValidationException( + field_name="case_id", + class_name="BDD Feature", + reason=f"Case C{case_id} validation failed. See error above for details.", + ) + + self.env.log(f"BDD: Case C{case_id} validated as BDD test case for feature '{feature_name}'") + + # Step 3: Parse all scenarios + scenarios = [] + scenario_statuses = [] + total_time = 0 + failure_messages = [] + + for idx, testcase in enumerate(testsuite, 1): + scenario_name = testcase.name + # Clean case ID from name + _, clean_scenario_name = MatchersParser.parse_name_with_id(scenario_name) + if not clean_scenario_name: + clean_scenario_name = scenario_name + + scenario_time = float(testcase.time or 0) + total_time += scenario_time + + # Determine scenario status + if testcase.is_passed: + scenario_status = 1 + scenario_status_label = "PASSED" + elif testcase.is_skipped: + scenario_status = 4 + scenario_status_label = "SKIPPED" + else: # Failed + scenario_status = 5 + scenario_status_label = "FAILED" + + # Capture failure details + if testcase.result: + result_obj = testcase.result[0] + error_msg = self._format_failure_message(clean_scenario_name, result_obj) + failure_messages.append(error_msg) + + # Track status for aggregation + scenario_statuses.append(scenario_status) + + # Create step result for this scenario + step = TestRailSeparatedStep(content=f"Scenario {idx}: {clean_scenario_name}") + step.status_id = scenario_status + scenarios.append(step) + + self.env.vlog(f" - Scenario {idx}: {clean_scenario_name} → {scenario_status_label} " f"({scenario_time}s)") + + # Step 4: Aggregate statuses + overall_status = self._aggregate_scenario_statuses(scenario_statuses) + + status_labels = {1: "PASSED", 4: "SKIPPED", 5: "FAILED"} + overall_status_label = status_labels.get(overall_status, "UNKNOWN") + + # Step 5: Create comment with summary + passed_count = scenario_statuses.count(1) + failed_count = scenario_statuses.count(5) + skipped_count = scenario_statuses.count(4) + total_count = len(scenario_statuses) + + summary = ( + f"Feature Summary:\n" + f" Total Scenarios: {total_count}\n" + f" Passed: {passed_count}\n" + f" Failed: {failed_count}\n" + f" Skipped: {skipped_count}\n" + ) + + if failure_messages: + comment = f"{summary}\n{'='*50}\nFailure Details:\n\n" + "\n\n".join(failure_messages) + else: + comment = summary + + # Step 6: Create aggregated result + result = TestRailResult( + case_id=case_id, + status_id=overall_status, + elapsed=total_time if total_time > 0 else None, # Pass numeric value, not formatted string + custom_step_results=scenarios, + comment=comment, + ) + + # Step 7: Create test case + test_case = TestRailCase( + title=feature_name, + case_id=case_id, + result=result, + ) + + self.env.log( + f"BDD: Grouped {total_count} scenario(s) under case C{case_id} " + f"'{feature_name}' → {overall_status_label}" + ) + self.env.log(f" Breakdown: {passed_count} passed, {failed_count} failed, " f"{skipped_count} skipped") + + return test_case + def parse_file(self) -> List[TestRailSuite]: self.env.log("Parsing JUnit report.") suite = JUnitXml.fromfile(self.filepath, parse_func=self._add_root_element_to_tree) @@ -296,11 +700,13 @@ def parse_file(self) -> List[TestRailSuite]: testrail_sections = self._parse_sections(suite) suite_name = self.env.suite_name if self.env.suite_name else suite.name - testrail_suites.append(TestRailSuite( - suite_name, - testsections=testrail_sections, - source=self.filename, - )) + testrail_suites.append( + TestRailSuite( + suite_name, + testsections=testrail_sections, + source=self.filename, + ) + ) return testrail_suites @@ -310,9 +716,9 @@ def _split_sauce_report(self, suite) -> List[JUnitXml]: for section in suite: if not len(section): continue - divider_index = section.name.find('-') + divider_index = section.name.find("-") subsuite_name = section.name[:divider_index].strip() - section.name = section.name[divider_index + 1:].strip() + section.name = section.name[divider_index + 1 :].strip() new_xml = JUnitXml(subsuite_name) if subsuite_name not in subsuites.keys(): subsuites[subsuite_name] = new_xml @@ -344,5 +750,6 @@ def _split_sauce_report(self, suite) -> List[JUnitXml]: return [v for k, v in subsuites.items()] -if __name__ == '__main__': + +if __name__ == "__main__": pass From 1b81d813bf8436501cfa40afcc9a9cc7ef5c66e7 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 15 Dec 2025 17:57:50 +0800 Subject: [PATCH 09/33] TRCLI-21 Updated parse_junit bdd support unit tests including test data --- tests/test_data/XML/bdd_all_fail.xml | 14 + tests/test_data/XML/bdd_all_pass.xml | 10 + .../XML/bdd_inconsistent_case_ids.xml | 9 + tests/test_data/XML/bdd_mixed_results.xml | 25 + tests/test_data/XML/bdd_no_case_id.xml | 8 + .../XML/bdd_valid_testcase_names.xml | 9 + .../XML/bdd_valid_testsuite_property.xml | 18 + tests/test_data/cli_test_data.py | 1 - tests/test_junit_bdd_parser.py | 477 ++++++++++++++++++ 9 files changed, 570 insertions(+), 1 deletion(-) create mode 100644 tests/test_data/XML/bdd_all_fail.xml create mode 100644 tests/test_data/XML/bdd_all_pass.xml create mode 100644 tests/test_data/XML/bdd_inconsistent_case_ids.xml create mode 100644 tests/test_data/XML/bdd_mixed_results.xml create mode 100644 tests/test_data/XML/bdd_no_case_id.xml create mode 100644 tests/test_data/XML/bdd_valid_testcase_names.xml create mode 100644 tests/test_data/XML/bdd_valid_testsuite_property.xml create mode 100644 tests/test_junit_bdd_parser.py diff --git a/tests/test_data/XML/bdd_all_fail.xml b/tests/test_data/XML/bdd_all_fail.xml new file mode 100644 index 0000000..af13597 --- /dev/null +++ b/tests/test_data/XML/bdd_all_fail.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/tests/test_data/XML/bdd_all_pass.xml b/tests/test_data/XML/bdd_all_pass.xml new file mode 100644 index 0000000..a6b128e --- /dev/null +++ b/tests/test_data/XML/bdd_all_pass.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/tests/test_data/XML/bdd_inconsistent_case_ids.xml b/tests/test_data/XML/bdd_inconsistent_case_ids.xml new file mode 100644 index 0000000..135fd8a --- /dev/null +++ b/tests/test_data/XML/bdd_inconsistent_case_ids.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/tests/test_data/XML/bdd_mixed_results.xml b/tests/test_data/XML/bdd_mixed_results.xml new file mode 100644 index 0000000..40c6c19 --- /dev/null +++ b/tests/test_data/XML/bdd_mixed_results.xml @@ -0,0 +1,25 @@ + + + + + + + + + + + + + +Expected: Login denied with error "Invalid credentials" +Actual: Login successful +at test_login.py:45 + + + + + + + + + diff --git a/tests/test_data/XML/bdd_no_case_id.xml b/tests/test_data/XML/bdd_no_case_id.xml new file mode 100644 index 0000000..e7987fc --- /dev/null +++ b/tests/test_data/XML/bdd_no_case_id.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/tests/test_data/XML/bdd_valid_testcase_names.xml b/tests/test_data/XML/bdd_valid_testcase_names.xml new file mode 100644 index 0000000..d504870 --- /dev/null +++ b/tests/test_data/XML/bdd_valid_testcase_names.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/tests/test_data/XML/bdd_valid_testsuite_property.xml b/tests/test_data/XML/bdd_valid_testsuite_property.xml new file mode 100644 index 0000000..3d987d2 --- /dev/null +++ b/tests/test_data/XML/bdd_valid_testsuite_property.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index c282137..cf6f59b 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -68,7 +68,6 @@ " - parse_cucumber: Cucumber JSON results (BDD)\n" " - import_gherkin: Upload .feature files to TestRail BDD\n" " - export_gherkin: Export BDD test cases as .feature files\n" - " - parse_gherkin: Parse Gherkin .feature file locally\n" " - parse_robot: Robot Framework XML Files\n" " - parse_openapi: OpenAPI YML Files\n" " - add_run: Create a new test run\n" diff --git a/tests/test_junit_bdd_parser.py b/tests/test_junit_bdd_parser.py new file mode 100644 index 0000000..b9b798f --- /dev/null +++ b/tests/test_junit_bdd_parser.py @@ -0,0 +1,477 @@ +""" +Unit tests for BDD-specific JUnit parsing functionality + +Tests the --special-parser bdd mode that groups multiple scenarios +into a single TestRail BDD test case. +""" + +import pytest +from unittest.mock import Mock, MagicMock, patch +from pathlib import Path + +from trcli.cli import Environment +from trcli.readers.junit_xml import JunitParser +from trcli.data_classes.validation_exception import ValidationException + + +class TestBDDJunitParser: + """Test BDD mode for JUnit parser""" + + @pytest.fixture + def environment(self): + """Create mock environment for BDD mode""" + env = Mock(spec=Environment) + env.case_matcher = "auto" + env.special_parser = "bdd" + env.suite_name = None + env.file = None # Required by FileParser + env.params_from_config = {} # Required by JunitParser for custom statuses + env.log = Mock() + env.elog = Mock() + env.vlog = Mock() + return env + + @pytest.fixture + def mock_api_validation_success(self): + """Mock successful API validation (case exists and is BDD)""" + with patch("trcli.api.project_based_client.ProjectBasedClient") as mock_client_class: + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + # Mock successful get_case response with BDD field + mock_response.error_message = "" + mock_response.response_text = { + "id": 42, + "title": "User Enrollment", + "template_id": 4, + "custom_testrail_bdd_scenario": '[{"content":"Scenario 1"}]', + } + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + yield mock_client + + def test_bdd_mode_detection(self, environment): + """Test that BDD mode is correctly detected""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + assert parser._is_bdd_mode() is True + + def test_standard_mode_detection(self, environment): + """Test that standard mode is detected when not BDD""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + environment.special_parser = "junit" + parser = JunitParser(environment) + assert parser._is_bdd_mode() is False + + def test_extract_case_id_from_testsuite_property(self, environment): + """Test extracting case ID from testsuite property""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + # Parse and check case ID extraction + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id = parser._extract_feature_case_id_from_property(testsuite) + assert case_id == 42 + + def test_extract_case_id_from_testcase_names(self, environment): + """Test extracting case ID from testcase names""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testcase_names.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + testcase_ids = parser._extract_case_id_from_testcases(testsuite) + assert len(testcase_ids) == 3 + assert all(case_id == 42 for _, case_id in testcase_ids) + + def test_validate_consistent_case_ids(self, environment): + """Test validation passes when all scenarios have same case ID""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testcase_names.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id, errors = parser._extract_and_validate_bdd_case_id(testsuite) + assert case_id == 42 + assert len(errors) == 0 + + def test_validate_inconsistent_case_ids_error(self, environment): + """Test validation fails when scenarios have different case IDs""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_inconsistent_case_ids.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id, errors = parser._extract_and_validate_bdd_case_id(testsuite) + assert case_id is None + assert len(errors) == 1 + assert "Multiple different case IDs" in errors[0] + assert "123" in errors[0] and "124" in errors[0] and "125" in errors[0] + + def test_validate_no_case_id_error(self, environment): + """Test validation fails when no case ID found""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_no_case_id.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id, errors = parser._extract_and_validate_bdd_case_id(testsuite) + assert case_id is None + assert len(errors) == 1 + assert "No case ID found" in errors[0] + + def test_aggregate_all_pass(self, environment): + """Test status aggregation when all scenarios pass""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [1, 1, 1] # All passed + result = parser._aggregate_scenario_statuses(statuses) + assert result == 1 # Passed + + def test_aggregate_one_fail(self, environment): + """Test status aggregation when one scenario fails (fail-fast)""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [1, 5, 1] # One failed + result = parser._aggregate_scenario_statuses(statuses) + assert result == 5 # Failed + + def test_aggregate_all_skip(self, environment): + """Test status aggregation when all scenarios skipped""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [4, 4, 4] # All skipped + result = parser._aggregate_scenario_statuses(statuses) + assert result == 4 # Skipped + + def test_aggregate_pass_and_skip(self, environment): + """Test status aggregation with pass and skip (no fails)""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [1, 4, 1] # Mixed pass/skip + result = parser._aggregate_scenario_statuses(statuses) + assert result == 4 # Skipped (since some not executed) + + def test_aggregate_fail_and_skip(self, environment): + """Test status aggregation with fail and skip""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [5, 4, 1] # Mixed fail/skip/pass + result = parser._aggregate_scenario_statuses(statuses) + assert result == 5 # Failed (failure takes precedence) + + def test_format_failure_message(self, environment): + """Test failure message formatting""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + mock_result = Mock() + mock_result.type = "AssertionError" + mock_result.message = "Expected X but got Y" + mock_result.text = "Details about failure" + + message = parser._format_failure_message("Test Scenario", mock_result) + + assert "Scenario: Test Scenario" in message + assert "Type: AssertionError" in message + assert "Message: Expected X but got Y" in message + assert "Details:\n Details about failure" in message + + def test_format_failure_message_truncation(self, environment): + """Test failure message truncates long text""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + mock_result = Mock() + mock_result.type = "Error" + mock_result.message = "Error" + mock_result.text = "A" * 600 # Long text + + message = parser._format_failure_message("Test", mock_result) + assert "... (truncated)" in message + assert len(message) < 700 # Should be truncated + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_validate_case_exists_success(self, mock_client_class, environment): + """Test validation passes when case exists and is BDD""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "" + mock_response.response_text = { + "id": 42, + "title": "Test Feature", + "custom_testrail_bdd_scenario": '[{"content":"..."}]', + } + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + is_valid, error_msg, case_data = parser._validate_bdd_case_exists(42, "Test Feature") + + assert is_valid is True + assert error_msg == "" + assert case_data["id"] == 42 + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_validate_case_not_exists(self, mock_client_class, environment): + """Test validation fails when case doesn't exist""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "Field :case_id is not a valid test case." + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + is_valid, error_msg, case_data = parser._validate_bdd_case_exists(999, "Test Feature") + + assert is_valid is False + assert "does not exist" in error_msg + assert "C999" in error_msg + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_validate_case_not_bdd(self, mock_client_class, environment): + """Test validation fails when case is not BDD template""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "" + mock_response.response_text = { + "id": 42, + "title": "Regular Test Case", + "custom_testrail_bdd_scenario": None, # Not a BDD case + } + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + is_valid, error_msg, case_data = parser._validate_bdd_case_exists(42, "Test Feature") + + assert is_valid is False + assert "is NOT a BDD test case" in error_msg + assert "custom_testrail_bdd_scenario" in error_msg + + def test_parse_bdd_feature_all_pass(self, environment, mock_api_validation_success): + """Test parsing BDD feature with all scenarios passing""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + # Mock the case ID to match test data + mock_api_validation_success.api_request_handler.client.send_get.return_value.response_text["id"] = 100 + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is not None + assert test_case.case_id == 100 + assert test_case.result.status_id == 1 # Passed + assert len(test_case.result.custom_step_results) == 2 + assert "Total Scenarios: 2" in test_case.result.comment + assert "Passed: 2" in test_case.result.comment + + def test_parse_bdd_feature_mixed_results(self, environment, mock_api_validation_success): + """Test parsing BDD feature with mixed results (pass/fail/skip)""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_mixed_results.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + # Mock the case ID + mock_api_validation_success.api_request_handler.client.send_get.return_value.response_text["id"] = 25293 + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is not None + assert test_case.case_id == 25293 + assert test_case.result.status_id == 5 # Failed (fail-fast) + assert len(test_case.result.custom_step_results) == 3 + + # Check step statuses + assert test_case.result.custom_step_results[0].status_id == 1 # Passed + assert test_case.result.custom_step_results[1].status_id == 5 # Failed + assert test_case.result.custom_step_results[2].status_id == 4 # Skipped + + # Check comment contains summary and failure details + assert "Total Scenarios: 3" in test_case.result.comment + assert "Passed: 1" in test_case.result.comment + assert "Failed: 1" in test_case.result.comment + assert "Skipped: 1" in test_case.result.comment + assert "Failure Details:" in test_case.result.comment + assert "Invalid password" in test_case.result.comment + + def test_parse_bdd_feature_no_case_id_returns_none(self, environment): + """Test that parsing returns None when no case ID found""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_no_case_id.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is None + environment.elog.assert_called() + + def test_parse_bdd_feature_inconsistent_ids_returns_none(self, environment): + """Test that parsing returns None when case IDs are inconsistent""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_inconsistent_case_ids.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is None + environment.elog.assert_called() + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_parse_bdd_feature_case_not_exists_raises_exception(self, mock_client_class, environment): + """Test that parsing raises ValidationException when case doesn't exist""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "Case not found" + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + with pytest.raises(ValidationException) as exc_info: + parser._parse_bdd_feature_as_single_case(testsuite) + + assert "case_id" in str(exc_info.value.field_name) + assert "BDD Feature" in str(exc_info.value.class_name) + + def test_parse_sections_bdd_mode(self, environment, mock_api_validation_success): + """Test that _parse_sections uses BDD mode when enabled""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file, parse_func=parser._add_root_element_to_tree) + + sections = parser._parse_sections(suite) + + assert len(sections) == 1 + assert len(sections[0].testcases) == 1 # One BDD test case + assert sections[0].testcases[0].case_id == 42 + + def test_parse_sections_standard_mode(self, environment): + """Test that _parse_sections uses standard mode when BDD not enabled""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testcase_names.xml" + environment.file = str(test_file) + environment.special_parser = "junit" # Standard mode + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file, parse_func=parser._add_root_element_to_tree) + + sections = parser._parse_sections(suite) + + assert len(sections) == 1 + # In standard mode, should have 3 separate test cases + assert len(sections[0].testcases) == 3 + + def test_elapsed_time_calculation(self, environment, mock_api_validation_success): + """Test that elapsed time is summed correctly from all scenarios""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_mixed_results.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + mock_api_validation_success.api_request_handler.client.send_get.return_value.response_text["id"] = 25293 + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case.result.elapsed == "2s" # 1.0 + 1.5 + 0.0 = 2.5, rounds to 2 (banker's rounding) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From f7636da460ddf5e2b1aa10f047cddc427d6d05d8 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 17 Dec 2025 19:00:23 +0800 Subject: [PATCH 10/33] TRCLI-21 updated functional tests for BDD --- tests_e2e/test_end2end.py | 83 ++++------------------------ trcli/commands/cmd_import_gherkin.py | 8 +-- trcli/readers/junit_xml.py | 2 +- 3 files changed, 13 insertions(+), 80 deletions(-) diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index b6da428..bd785a0 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1996,17 +1996,6 @@ def test_parse_gherkin_pretty_format(self): ) _assert_contains(output, ["Parsing Gherkin feature file:", "sample_login.feature", '"suites"', '"summary"']) - def test_parse_gherkin_custom_suite_name(self): - """Test parse_gherkin command with custom suite name""" - output = _run_cmd( - f""" -trcli parse_gherkin \\ - -f "reports_gherkin/sample_login.feature" \\ - --suite-name "Custom BDD Suite" - """ - ) - _assert_contains(output, ["Parsing Gherkin feature file:", '"name": "Custom BDD Suite"']) - def test_import_gherkin_upload_feature(self): """Test import_gherkin command to upload .feature file to TestRail""" output = _run_cmd( @@ -2158,7 +2147,7 @@ def test_export_gherkin_download_to_file(self): [ "Connecting to TestRail...", "Retrieving BDD test case", - "Successfully exported BDD test case", + "Successfully exported test case", "exported_bdd.feature", ], ) @@ -2172,18 +2161,18 @@ def test_parse_cucumber_workflow1_results_only(self): --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_cucumber \\ --title "[CLI-E2E-Tests] Cucumber Parser - Results Only" \\ - --suite-id 128 \\ + --suite-id 86 \\ -f "reports_cucumber/sample_cucumber.json" """ ) _assert_contains( output, [ + "Parsing Cucumber", "Processed", - "test cases", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted", - "test results", + "Results uploaded successfully", ], ) @@ -2196,7 +2185,7 @@ def test_parse_cucumber_workflow2_with_feature_upload(self): --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_cucumber \\ --title "[CLI-E2E-Tests] Cucumber Parser - With Feature Upload" \\ - --suite-id 128 \\ + --suite-id 86 \\ --upload-feature \\ --feature-section-id 2388 \\ -f "reports_cucumber/sample_cucumber.json" @@ -2205,66 +2194,16 @@ def test_parse_cucumber_workflow2_with_feature_upload(self): _assert_contains( output, [ - "Generating .feature file from Cucumber JSON...", - "Generated .feature file", - "Uploading .feature file to TestRail...", - "Successfully uploaded", - "test case(s) from .feature file", - "Processed", - "test cases", + "Creating BDD test cases from features...", + "Successfully created", + "BDD test case", + "Proceeding to upload test results...", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted", - "test results", - ], - ) - - def test_parse_cucumber_advanced_features(self): - """Test parse_cucumber with advanced Gherkin features (Background, Examples, Rules)""" - output = _run_cmd( - f""" -trcli -y \\ - -h {self.TR_INSTANCE} \\ - --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ - parse_cucumber \\ - --title "[CLI-E2E-Tests] Cucumber Parser - Advanced Features" \\ - --suite-id 128 \\ - --upload-feature \\ - --feature-section-id 2388 \\ - -f "reports_cucumber/sample_cucumber_advanced.json" - """ - ) - _assert_contains( - output, - [ - "Generating .feature file from Cucumber JSON...", - "Generated .feature file", - "Uploading .feature file to TestRail...", - "Successfully uploaded", - "test case(s) from .feature file", - "Processed", - "test cases", - f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Results uploaded successfully", ], ) - def test_parse_cucumber_with_verbose_logging(self): - """Test parse_cucumber with verbose logging enabled""" - output = _run_cmd( - f""" -trcli -y \\ - -h {self.TR_INSTANCE} \\ - --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ - parse_cucumber \\ - --title "[CLI-E2E-Tests] Cucumber Parser - Verbose" \\ - --suite-id 128 \\ - --verbose \\ - -f "reports_cucumber/sample_cucumber.json" - """ - ) - _assert_contains( - output, ["Processed", "test cases", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view"] - ) - def test_bdd_help_commands(self): """Test that all BDD commands appear in help documentation""" @@ -2281,8 +2220,6 @@ def test_bdd_help_commands(self): "-f, --file", "--output", "--pretty", - "--suite-name", - "--case-matcher", ], ) diff --git a/trcli/commands/cmd_import_gherkin.py b/trcli/commands/cmd_import_gherkin.py index 95bddf8..4d21ebf 100644 --- a/trcli/commands/cmd_import_gherkin.py +++ b/trcli/commands/cmd_import_gherkin.py @@ -52,8 +52,6 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: environment.set_parameters(context) environment.check_for_required_parameters() - json_output = kwargs.get("json_output", False) - try: # Read the feature file feature_path = Path(file) @@ -72,8 +70,7 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: environment.vlog(f"API endpoint: POST /api/v2/add_bdd/{section_id}") # Initialize API client - if not json_output: - environment.log("Connecting to TestRail...") + environment.log("Connecting to TestRail...") # Create APIClient uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) @@ -101,8 +98,7 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: ) # Upload feature file - if not json_output: - environment.log(f"Uploading feature file to TestRail...") + environment.log(f"Uploading feature file to TestRail...") case_ids, error_message = api_request_handler.add_bdd(section_id, feature_content) if error_message: diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 28f847c..e6f85a7 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -277,7 +277,7 @@ def _parse_sections(self, suite) -> List[TestRailSection]: else: test_cases = self._parse_test_cases(section) - self.env.log(f"Processed {len(test_cases)} test case(s) in section {section.name}.") + self.env.log(f"Processed {len(test_cases)} test cases in section {section.name}.") sections.append( TestRailSection( section.name, From f69732d3afe5478738d098f934a3b5ccb59916d6 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 13 Jan 2026 18:07:23 +0800 Subject: [PATCH 11/33] TRCLI-193: Fixed issues with parse_cucumber related to uploading feature and parsing results --- trcli/api/project_based_client.py | 53 ++- trcli/api/results_uploader.py | 224 ++++++------ trcli/commands/cmd_parse_cucumber.py | 402 +++++++++++----------- trcli/constants.py | 1 + trcli/data_classes/dataclass_testrail.py | 27 +- trcli/readers/cucumber_json.py | 415 ++++++++++++++++++++++- 6 files changed, 767 insertions(+), 355 deletions(-) diff --git a/trcli/api/project_based_client.py b/trcli/api/project_based_client.py index 3d8cd7e..fbfdfbc 100644 --- a/trcli/api/project_based_client.py +++ b/trcli/api/project_based_client.py @@ -49,7 +49,7 @@ def instantiate_api_client(self) -> APIClient: "proxy": proxy, "proxy_user": proxy_user, "noproxy": noproxy, - "uploader_metadata": uploader_metadata + "uploader_metadata": uploader_metadata, } if self.environment.timeout: @@ -69,10 +69,13 @@ def resolve_project(self): """ Gets and checks project settings. """ + # Skip if project is already resolved (e.g., by parse_cucumber command) + if self.project is not None: + self.environment.vlog("Project already resolved, skipping project check") + return + self.environment.log("Checking project. ", new_line=False) - self.project = self.api_request_handler.get_project_data( - self.environment.project, self.environment.project_id - ) + self.project = self.api_request_handler.get_project_data(self.environment.project, self.environment.project_id) self._validate_project_id() if self.environment.auto_creation_response: if self.environment.case_matcher == MatchersParser.AUTO: @@ -88,18 +91,12 @@ def _validate_project_id(self): exit(1) elif self.project.project_id == ProjectErrors.other_error: self.environment.elog( - "\n" - + FAULT_MAPPING["error_checking_project"].format( - error_message=self.project.error_message - ) + "\n" + FAULT_MAPPING["error_checking_project"].format(error_message=self.project.error_message) ) exit(1) elif self.project.project_id == ProjectErrors.multiple_project_same_name: self.environment.elog( - "\n" - + FAULT_MAPPING["error_checking_project"].format( - error_message=self.project.error_message - ) + "\n" + FAULT_MAPPING["error_checking_project"].format(error_message=self.project.error_message) ) exit(1) @@ -132,9 +129,7 @@ def get_suite_id(self, suite_mode: int) -> Tuple[int, int, bool]: suite_name=self.api_request_handler.suites_data_from_provider.name, project_name=self.environment.project, ) - adding_message = ( - f"Adding missing suites to project {self.environment.project}." - ) + adding_message = f"Adding missing suites to project {self.environment.project}." fault_message = FAULT_MAPPING["no_user_agreement"].format(type="suite") added_suites, result_code = self.prompt_user_and_add_items( prompt_message=prompt_message, @@ -147,33 +142,27 @@ def get_suite_id(self, suite_mode: int) -> Tuple[int, int, bool]: suite_id = added_suites[0]["suite_id"] suite_added = True elif suite_mode == SuiteModes.single_suite_baselines: - suite_ids, error_message = self.api_request_handler.get_suite_ids( - project_id=project_id - ) + suite_ids, error_message = self.api_request_handler.get_suite_ids(project_id=project_id) if error_message: self.environment.elog(error_message) else: if len(suite_ids) > 1: self.environment.elog( - FAULT_MAPPING[ - "not_unique_suite_id_single_suite_baselines" - ].format(project_name=self.environment.project) + FAULT_MAPPING["not_unique_suite_id_single_suite_baselines"].format( + project_name=self.environment.project + ) ) else: result_code = 1 elif suite_mode == SuiteModes.single_suite: - suite_ids, error_message = self.api_request_handler.get_suite_ids( - project_id=project_id - ) + suite_ids, error_message = self.api_request_handler.get_suite_ids(project_id=project_id) if error_message: self.environment.elog(error_message) else: suite_id = suite_ids[0] result_code = 1 else: - self.environment.elog( - FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode) - ) + self.environment.elog(FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode)) else: suite_id = self.api_request_handler.suites_data_from_provider.suite_id result_code = self.check_suite_id(project_id) @@ -188,9 +177,7 @@ def check_suite_id(self, project_id: Optional[int]) -> int: result_code = -1 if project_id is None: project_id = self._get_project_id() - suite_exists, error_message = self.api_request_handler.check_suite_id( - project_id - ) + suite_exists, error_message = self.api_request_handler.check_suite_id(project_id) if suite_exists: result_code = 1 else: @@ -227,13 +214,13 @@ def create_or_update_test_run(self) -> Tuple[int, str]: self.environment.log(f"Updating test run. ", new_line=False) run_id = self.environment.run_id run, error_message = self.api_request_handler.update_run( - run_id, - self.run_name, + run_id, + self.run_name, start_date=self.environment.run_start_date, end_date=self.environment.run_end_date, milestone_id=self.environment.milestone_id, refs=self.environment.run_refs, - refs_action=getattr(self.environment, 'run_refs_action', 'add') + refs_action=getattr(self.environment, "run_refs_action", "add"), ) if self.environment.auto_close_run: self.environment.log("Closing run. ", new_line=False) diff --git a/trcli/api/results_uploader.py b/trcli/api/results_uploader.py index de99f3a..2a08169 100644 --- a/trcli/api/results_uploader.py +++ b/trcli/api/results_uploader.py @@ -18,7 +18,7 @@ def __init__(self, environment: Environment, suite: TestRailSuite, skip_run: boo super().__init__(environment, suite) self.skip_run = skip_run self.last_run_id = None - if hasattr(self.environment, 'special_parser') and self.environment.special_parser == "saucectl": + if hasattr(self.environment, "special_parser") and self.environment.special_parser == "saucectl": self.run_name += f" ({suite.name})" def upload_results(self): @@ -33,7 +33,7 @@ def upload_results(self): # Validate user emails early if --assign is specified try: - assign_value = getattr(self.environment, 'assign_failed_to', None) + assign_value = getattr(self.environment, "assign_failed_to", None) if assign_value is not None and str(assign_value).strip(): self._validate_and_store_user_ids() except (AttributeError, TypeError): @@ -43,22 +43,36 @@ def upload_results(self): self.resolve_project() suite_id, suite_added = self.resolve_suite() - # Resolve missing test cases and sections - missing_test_cases, error_message = self.api_request_handler.check_missing_test_cases_ids( - self.project.project_id + # Check if all test cases already have case_id set (BDD mode or pre-existing cases) + # Note: In BDD mode, case_id can be -1 (marker for auto-creation) or a real ID + suite_data = self.api_request_handler.suites_data_from_provider + all_cases_have_ids = all( + test_case.case_id is not None and test_case.case_id != 0 + for section in suite_data.testsections + for test_case in section.testcases ) - if error_message: - self.environment.elog( - FAULT_MAPPING["error_checking_missing_item"].format( - missing_item="missing test cases", error_message=error_message - ) + + if all_cases_have_ids: + self.environment.vlog("All test cases have IDs - skipping section/case creation checks") + + # Resolve missing test cases and sections + # Skip this check if all cases already have IDs (BDD mode) + missing_test_cases = False + if not all_cases_have_ids: + missing_test_cases, error_message = self.api_request_handler.check_missing_test_cases_ids( + self.project.project_id ) + if error_message: + self.environment.elog( + FAULT_MAPPING["error_checking_missing_item"].format( + missing_item="missing test cases", error_message=error_message + ) + ) + added_sections = None added_test_cases = None - if self.environment.auto_creation_response: - added_sections, result_code = self.add_missing_sections( - self.project.project_id - ) + if self.environment.auto_creation_response and not all_cases_have_ids: + added_sections, result_code = self.add_missing_sections(self.project.project_id) if result_code == -1: revert_logs = self.rollback_changes( suite_id=suite_id, suite_added=suite_added, added_sections=added_sections @@ -85,7 +99,7 @@ def upload_results(self): if added_test_cases: self.environment.log(f"Submitted {len(added_test_cases)} test cases in {stop - start:.1f} secs.") return - + # remove empty, unused sections created earlier, based on the sections actually used by the new test cases # - iterate on added_sections and remove those that are not used by the new test cases empty_sections = None @@ -93,9 +107,15 @@ def upload_results(self): if not added_test_cases: empty_sections = added_sections else: - empty_sections = [section for section in added_sections if section['section_id'] not in [case['section_id'] for case in added_test_cases]] + empty_sections = [ + section + for section in added_sections + if section["section_id"] not in [case["section_id"] for case in added_test_cases] + ] if len(empty_sections) > 0: - self.environment.log("Removing unnecessary empty sections that may have been created earlier. ", new_line=False) + self.environment.log( + "Removing unnecessary empty sections that may have been created earlier. ", new_line=False + ) _, error = self.api_request_handler.delete_sections(empty_sections) if error: self.environment.elog("\n" + error) @@ -106,12 +126,14 @@ def upload_results(self): # Update existing cases with JUnit references if enabled case_update_results = None case_update_failed = [] - if hasattr(self.environment, 'update_existing_cases') and self.environment.update_existing_cases == "yes": + if hasattr(self.environment, "update_existing_cases") and self.environment.update_existing_cases == "yes": self.environment.log("Updating existing cases with JUnit references...") case_update_results, case_update_failed = self.update_existing_cases_with_junit_refs(added_test_cases) - + if case_update_results.get("updated_cases"): - self.environment.log(f"Updated {len(case_update_results['updated_cases'])} existing case(s) with references.") + self.environment.log( + f"Updated {len(case_update_results['updated_cases'])} existing case(s) with references." + ) if case_update_results.get("failed_cases"): self.environment.elog(f"Failed to update {len(case_update_results['failed_cases'])} case(s).") @@ -154,16 +176,16 @@ def upload_results(self): stop = time.time() if results_amount: self.environment.log(f"Submitted {results_amount} test results in {stop - start:.1f} secs.") - + # Exit with error if there were invalid users (after processing valid ones) try: - has_invalid = getattr(self.environment, '_has_invalid_users', False) + has_invalid = getattr(self.environment, "_has_invalid_users", False) if has_invalid is True: # Explicitly check for True to avoid mock object issues exit(1) except (AttributeError, TypeError): # Skip exit if there are any issues with the attribute pass - + # Note: Error exit for case update failures is handled in cmd_parse_junit.py after reporting def _validate_and_store_user_ids(self): @@ -173,27 +195,27 @@ def _validate_and_store_user_ids(self): Exits only if NO valid users are found. """ try: - assign_value = getattr(self.environment, 'assign_failed_to', None) + assign_value = getattr(self.environment, "assign_failed_to", None) if assign_value is None or not str(assign_value).strip(): return except (AttributeError, TypeError): return - - # Check for empty or whitespace-only values + + # Check for empty or whitespace-only values assign_str = str(assign_value) if not assign_str.strip(): self.environment.elog("Error: --assign option requires at least one user email") exit(1) - - emails = [email.strip() for email in assign_str.split(',') if email.strip()] - + + emails = [email.strip() for email in assign_str.split(",") if email.strip()] + if not emails: self.environment.elog("Error: --assign option requires at least one user email") exit(1) - + valid_user_ids = [] invalid_users = [] - + for email in emails: user_id, error_msg = self.api_request_handler.get_user_by_email(email) if user_id is None: @@ -204,19 +226,19 @@ def _validate_and_store_user_ids(self): exit(1) else: valid_user_ids.append(user_id) - + # Handle invalid users if invalid_users: for invalid_user in invalid_users: self.environment.elog(f"Error: User not found: {invalid_user}") - + # Store valid user IDs for processing, but mark that we should exit with error later self.environment._has_invalid_users = True - + # If ALL users are invalid, exit immediately if not valid_user_ids: exit(1) - + # Store valid user IDs for later use self.environment._validated_user_ids = valid_user_ids @@ -224,89 +246,99 @@ def update_existing_cases_with_junit_refs(self, added_test_cases: List[Dict] = N """ Update existing test cases with references from JUnit properties. Excludes newly created cases to avoid unnecessary API calls. - + :param added_test_cases: List of cases that were just created (to be excluded) :returns: Tuple of (update_results, failed_cases) """ - if not hasattr(self.environment, 'update_existing_cases') or self.environment.update_existing_cases != "yes": + if not hasattr(self.environment, "update_existing_cases") or self.environment.update_existing_cases != "yes": return {}, [] # Feature not enabled - + # Create a set of newly created case IDs to exclude newly_created_case_ids = set() if added_test_cases: # Ensure all case IDs are integers for consistent comparison - newly_created_case_ids = {int(case.get('case_id')) for case in added_test_cases if case.get('case_id')} - - update_results = { - "updated_cases": [], - "skipped_cases": [], - "failed_cases": [] - } + newly_created_case_ids = {int(case.get("case_id")) for case in added_test_cases if case.get("case_id")} + + update_results = {"updated_cases": [], "skipped_cases": [], "failed_cases": []} failed_cases = [] - - strategy = getattr(self.environment, 'update_strategy', 'append') - + + strategy = getattr(self.environment, "update_strategy", "append") + # Process all test cases in all sections for section in self.api_request_handler.suites_data_from_provider.testsections: for test_case in section.testcases: # Only process cases that have a case_id (existing cases) and JUnit refs # AND exclude newly created cases - if (test_case.case_id and - hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and - int(test_case.case_id) not in newly_created_case_ids): + if ( + test_case.case_id + and hasattr(test_case, "_junit_case_refs") + and test_case._junit_case_refs + and int(test_case.case_id) not in newly_created_case_ids + ): try: - success, error_msg, added_refs, skipped_refs = self.api_request_handler.update_existing_case_references( - test_case.case_id, test_case._junit_case_refs, strategy + success, error_msg, added_refs, skipped_refs = ( + self.api_request_handler.update_existing_case_references( + test_case.case_id, test_case._junit_case_refs, strategy + ) ) - + if success: if added_refs: # Only count as "updated" if references were actually added - update_results["updated_cases"].append({ - "case_id": test_case.case_id, - "case_title": test_case.title, - "added_refs": added_refs, - "skipped_refs": skipped_refs - }) + update_results["updated_cases"].append( + { + "case_id": test_case.case_id, + "case_title": test_case.title, + "added_refs": added_refs, + "skipped_refs": skipped_refs, + } + ) else: # If no refs were added (all were duplicates or no valid refs), count as skipped - reason = "All references already present" if skipped_refs else "No valid references to process" - update_results["skipped_cases"].append({ - "case_id": test_case.case_id, - "case_title": test_case.title, - "reason": reason, - "skipped_refs": skipped_refs - }) + reason = ( + "All references already present" + if skipped_refs + else "No valid references to process" + ) + update_results["skipped_cases"].append( + { + "case_id": test_case.case_id, + "case_title": test_case.title, + "reason": reason, + "skipped_refs": skipped_refs, + } + ) else: error_info = { "case_id": test_case.case_id, "case_title": test_case.title, - "error": error_msg + "error": error_msg, } update_results["failed_cases"].append(error_info) failed_cases.append(error_info) self.environment.elog(f"Failed to update case C{test_case.case_id}: {error_msg}") - + except Exception as e: - error_info = { - "case_id": test_case.case_id, - "case_title": test_case.title, - "error": str(e) - } + error_info = {"case_id": test_case.case_id, "case_title": test_case.title, "error": str(e)} update_results["failed_cases"].append(error_info) failed_cases.append(error_info) self.environment.elog(f"Exception updating case C{test_case.case_id}: {str(e)}") - - elif (test_case.case_id and - hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and - int(test_case.case_id) in newly_created_case_ids): + + elif ( + test_case.case_id + and hasattr(test_case, "_junit_case_refs") + and test_case._junit_case_refs + and int(test_case.case_id) in newly_created_case_ids + ): # Skip newly created cases - they already have their references set - update_results["skipped_cases"].append({ - "case_id": test_case.case_id, - "case_title": test_case.title, - "reason": "Newly created case - references already set during creation" - }) - + update_results["skipped_cases"].append( + { + "case_id": test_case.case_id, + "case_title": test_case.title, + "reason": "Newly created case - references already set during creation", + } + ) + return update_results, failed_cases def add_missing_sections(self, project_id: int) -> Tuple[List, int]: @@ -328,9 +360,7 @@ def add_missing_sections(self, project_id: int) -> Tuple[List, int]: f"This will result to failure to upload all cases." ) return added_sections, result_code - prompt_message = PROMPT_MESSAGES["create_missing_sections"].format( - project_name=self.environment.project - ) + prompt_message = PROMPT_MESSAGES["create_missing_sections"].format(project_name=self.environment.project) adding_message = "Adding missing sections to the suite." fault_message = FAULT_MAPPING["no_user_agreement"].format(type="sections") added_sections, result_code = self.prompt_user_and_add_items( @@ -357,9 +387,7 @@ def add_missing_test_cases(self) -> Tuple[list, int]: do so. Returns list of added test case IDs if succeeds or empty list with result_code set to -1. """ - prompt_message = PROMPT_MESSAGES["create_missing_test_cases"].format( - project_name=self.environment.project - ) + prompt_message = PROMPT_MESSAGES["create_missing_test_cases"].format(project_name=self.environment.project) adding_message = "Adding missing test cases to the suite." fault_message = FAULT_MAPPING["no_user_agreement"].format(type="test cases") added_cases, result_code = self.prompt_user_and_add_items( @@ -392,29 +420,21 @@ def rollback_changes( else: returned_log.append(RevertMessages.run_deleted) if len(added_test_cases) > 0: - _, error = self.api_request_handler.delete_cases( - suite_id, added_test_cases - ) + _, error = self.api_request_handler.delete_cases(suite_id, added_test_cases) if error: - returned_log.append( - RevertMessages.test_cases_not_deleted.format(error=error) - ) + returned_log.append(RevertMessages.test_cases_not_deleted.format(error=error)) else: returned_log.append(RevertMessages.test_cases_deleted) if len(added_sections) > 0: _, error = self.api_request_handler.delete_sections(added_sections) if error: - returned_log.append( - RevertMessages.section_not_deleted.format(error=error) - ) + returned_log.append(RevertMessages.section_not_deleted.format(error=error)) else: returned_log.append(RevertMessages.section_deleted) if self.project.suite_mode != SuiteModes.single_suite and suite_added > 0: _, error = self.api_request_handler.delete_suite(suite_id) if error: - returned_log.append( - RevertMessages.suite_not_deleted.format(error=error) - ) + returned_log.append(RevertMessages.suite_not_deleted.format(error=error)) else: returned_log.append(RevertMessages.suite_deleted) return returned_log diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py index eaeddab..041b2d6 100644 --- a/trcli/commands/cmd_parse_cucumber.py +++ b/trcli/commands/cmd_parse_cucumber.py @@ -5,23 +5,13 @@ from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS from trcli.commands.results_parser_helpers import results_parser_options, print_config from trcli.constants import FAULT_MAPPING, ProjectErrors +from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.validation_exception import ValidationException from trcli.readers.cucumber_json import CucumberParser @click.command(context_settings=CONTEXT_SETTINGS) @results_parser_options -@click.option( - "--upload-feature", - is_flag=True, - help="Generate and upload .feature file to create/update test cases via BDD endpoint.", -) -@click.option( - "--feature-section-id", - type=click.IntRange(min=1), - metavar="", - help="Section ID for uploading .feature file (required if --upload-feature is used).", -) @click.option( "-v", "--verbose", @@ -34,6 +24,7 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): """Parse Cucumber JSON results and upload to TestRail This command parses Cucumber JSON test results and uploads them to TestRail. + Uses BDD matching mode to match features by name and auto-create missing test cases. """ environment.cmd = "parse_cucumber" environment.set_parameters(context) @@ -43,210 +34,243 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): if kwargs.get("verbose"): environment.verbose = True - # Validate feature upload options - upload_feature = kwargs.get("upload_feature", False) - feature_section_id = kwargs.get("feature_section_id") - - if upload_feature and not feature_section_id: - environment.elog("Error: --feature-section-id is required when using --upload-feature") - exit(1) - print_config(environment) try: - # Parse Cucumber JSON file - parsed_suites = CucumberParser(environment).parse_file() - - # Workflow: Upload feature file if requested - # Only create test cases if auto-creation is enabled - if upload_feature and environment.auto_creation_response: - environment.log("\n=== Phase 1: Creating BDD Test Cases ===") - - # Setup API client - from trcli.api.api_request_handler import ApiRequestHandler - from trcli.api.api_client import APIClient - import trcli - - environment.vlog("Initializing API client for BDD upload...") - uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) - api_client = APIClient( - host_name=environment.host, - verify=not environment.insecure, - verbose_logging_function=environment.vlog, - logging_function=environment.log, - uploader_metadata=uploader_metadata, - ) - - # Set credentials - api_client.username = environment.username - api_client.password = environment.password - api_client.api_key = environment.key - - # Create minimal suite for ApiRequestHandler - minimal_suite = parsed_suites[0] if parsed_suites else None - if not minimal_suite: - from trcli.data_classes.dataclass_testrail import TestRailSuite - - minimal_suite = TestRailSuite(name="Cucumber BDD", testsections=[]) - - # Set suite_id from environment if provided - if environment.suite_id: - minimal_suite.suite_id = environment.suite_id - - # Create ApiRequestHandler - api_handler = ApiRequestHandler( - environment=environment, - api_client=api_client, - suites_data=minimal_suite, - ) - - # Resolve project to get actual project_id - environment.log("Checking project. ", new_line=False) - project_data = api_handler.get_project_data(environment.project, environment.project_id) - - # Validate project was found - if project_data.project_id == ProjectErrors.not_existing_project: - environment.elog(f"\n{project_data.error_message}") - exit(1) - elif project_data.project_id == ProjectErrors.other_error: - environment.elog(f"\nError checking project: {project_data.error_message}") - exit(1) - elif project_data.project_id == ProjectErrors.multiple_project_same_name: - environment.elog(f"\nError checking project: {project_data.error_message}") - exit(1) - - environment.log("Done.") - resolved_project_id = project_data.project_id - - # Get BDD template ID - environment.log("Getting BDD template ID...") - bdd_template_id, error_message = api_handler.get_bdd_template_id(resolved_project_id) - - if error_message: - environment.elog(f"Error getting BDD template: {error_message}") - exit(1) - - environment.vlog(f"Using BDD template ID: {bdd_template_id}") - - # Load Cucumber JSON to access raw feature data - parser = CucumberParser(environment) - with open(environment.file, "r", encoding="utf-8") as f: - cucumber_data = json.load(f) - - if not isinstance(cucumber_data, list) or not cucumber_data: - environment.elog("Error: Invalid Cucumber JSON format") - exit(1) - - # Create BDD test cases (one per feature) - environment.log("Creating BDD test cases from features...") - case_ids = [] - feature_scenario_counts = [] # Track how many scenarios per feature - - for feature in cucumber_data: - feature_name = feature.get("name", "Untitled Feature") - - # Count scenarios in this feature (excluding backgrounds) - scenario_count = sum( - 1 - for element in feature.get("elements", []) - if element.get("type", "") in ("scenario", "scenario_outline") - ) - - if scenario_count == 0: - environment.vlog(f"Skipping feature '{feature_name}' - no scenarios found") - continue - - # Generate complete .feature file content for this feature - environment.vlog(f"Generating .feature file for feature: {feature_name}") - feature_content = parser._generate_feature_content(feature) - - # Upload .feature file via add_bdd endpoint - environment.vlog(f"Uploading feature '{feature_name}' with {scenario_count} scenario(s)") - returned_case_ids, error_message = api_handler.add_bdd( - section_id=feature_section_id, feature_content=feature_content - ) + # Setup API client and handler (needed for both modes) + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.api_client import APIClient + import trcli + + environment.vlog("Initializing API client...") + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) + + # Set credentials + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler + from trcli.data_classes.dataclass_testrail import TestRailSuite + + minimal_suite = TestRailSuite(name="Cucumber BDD", testsections=[]) + if environment.suite_id: + minimal_suite.suite_id = environment.suite_id + + # Create ApiRequestHandler + api_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=minimal_suite, + ) + + # Resolve project to get actual project_id (for use in BDD parsing) + environment.log("Checking project. ", new_line=False) + project_data = api_handler.get_project_data(environment.project, environment.project_id) + + # Validate project was found + if project_data.project_id == ProjectErrors.not_existing_project: + environment.elog(f"\n{project_data.error_message}") + exit(1) + elif project_data.project_id == ProjectErrors.other_error: + environment.elog(f"\nError checking project: {project_data.error_message}") + exit(1) + elif project_data.project_id == ProjectErrors.multiple_project_same_name: + environment.elog(f"\nError checking project: {project_data.error_message}") + exit(1) + + environment.log("Done.") + resolved_project_id = project_data.project_id + + # BDD Matching Mode: Set API handler for validation and caching + parser = CucumberParser(environment) + parser.set_api_handler(api_handler) + + # Determine auto-creation behavior: + # - With -n flag (auto_creation_response == False): Only match existing features + # - With -y flag (auto_creation_response == True): Auto-create missing features + # - Without flag (auto_creation_response == None): Auto-create by default for BDD + auto_create = environment.auto_creation_response != False + + if environment.auto_creation_response == False: + environment.vlog("Auto-creation disabled: Will only match existing BDD test cases") + else: + environment.vlog("Auto-creation enabled: Will create missing BDD test cases") + + parsed_suites = parser.parse_file( + bdd_matching_mode=True, + project_id=resolved_project_id, + suite_id=environment.suite_id, + auto_create=auto_create, + ) + + # Handle auto-creation of features in BDD matching mode + # auto_creation_response != False means: -y flag OR no flag (default to auto-create) + if environment.auto_creation_response != False: + # Check if there are any features that need to be created (case_id=-1) + features_to_create = [] + for suite in parsed_suites: + for section in suite.testsections: + for test_case in section.testcases: + if test_case.case_id == -1: + features_to_create.append({"section": section, "test_case": test_case}) + + if features_to_create: + environment.log(f"\n=== Auto-Creating {len(features_to_create)} Missing BDD Test Case(s) ===") + + # Load Cucumber JSON to access raw feature data + with open(environment.file, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + # Get BDD template ID + environment.log("Getting BDD template ID...") + bdd_template_id, error_message = api_handler.get_bdd_template_id(resolved_project_id) if error_message: - environment.elog(f"Error creating BDD test case for feature '{feature_name}': {error_message}") - exit(1) - - if not returned_case_ids or len(returned_case_ids) == 0: - environment.elog(f"Error: add_bdd did not return a case ID for feature '{feature_name}'") + environment.elog(f"Error getting BDD template: {error_message}") exit(1) - case_id = returned_case_ids[0] # add_bdd returns list with one case ID - case_ids.append(case_id) - feature_scenario_counts.append(scenario_count) - environment.vlog(f" Created case ID: {case_id} (covers {scenario_count} scenario(s))") - - # Set automation_id on the created test case for future matching - # Use feature name as automation_id (one TestRail case = one feature) - automation_id = feature_name - success, error_message = api_handler.update_case_automation_id(case_id, automation_id) - - if not success: - environment.log(f" Warning: Failed to set automation_id: {error_message}") - else: - environment.vlog(f" Set automation_id: '{automation_id}'") - - environment.log(f"✓ Successfully created {len(case_ids)} BDD test case(s)") - environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") - - # Map returned case IDs to parsed test cases - environment.vlog("\nMapping case IDs to test results...") - - # Map case IDs to sections (one case ID per feature/section) - # Each feature creates one test case in TestRail but may have multiple scenario results - total_mapped = 0 - if len(case_ids) != len(parsed_suites[0].testsections): - environment.elog( - f"Error: Mismatch between features ({len(case_ids)}) and parsed sections ({len(parsed_suites[0].testsections)})" - ) - exit(1) - - for section, case_id, scenario_count in zip( - parsed_suites[0].testsections, case_ids, feature_scenario_counts - ): - environment.vlog( - f"Mapping case ID {case_id} to section '{section.name}' ({len(section.testcases)} scenario(s))" + environment.vlog(f"Using BDD template ID: {bdd_template_id}") + + # Create each missing feature + created_case_ids = {} # Map feature name -> case_id + + for feature in cucumber_data: + feature_name = feature.get("name", "Untitled Feature") + normalized_name = parser._normalize_title(feature_name) + + # Check if this feature needs creation + needs_creation = any( + parser._normalize_title(item["section"].name) == normalized_name for item in features_to_create + ) + + if not needs_creation: + continue + + # Auto-create or fetch section for this feature + # Use feature name as section name (matching parse behavior) + section_name = feature_name + section_id = None + + # Try to find existing section by name + environment.vlog(f"Looking for section '{section_name}'...") + sections, error = api_handler._ApiRequestHandler__get_all_sections( + project_id=resolved_project_id, suite_id=environment.suite_id + ) + + if error: + environment.elog(f"Error fetching sections: {error}") + exit(1) + + for s in sections: + if s.get("name") == section_name: + section_id = s.get("id") + environment.vlog(f" Found existing section ID: {section_id}") + break + + # Create section if not found + if section_id is None: + environment.log(f"Creating section '{section_name}'...") + + # Use send_post to create section directly + section_body = {"suite_id": environment.suite_id, "name": section_name} + response = api_handler.client.send_post(f"add_section/{resolved_project_id}", section_body) + + if response.error_message: + environment.elog(f"Error creating section: {response.error_message}") + exit(1) + + section_id = response.response_text.get("id") + environment.vlog(f" Created section ID: {section_id}") + + # Generate feature content + environment.vlog(f"Generating .feature file for '{feature_name}'") + feature_content = parser._generate_feature_content(feature) + + # Upload feature via add_bdd endpoint + environment.log(f"Uploading feature '{feature_name}'...") + returned_case_ids, error_message = api_handler.add_bdd( + section_id=section_id, feature_content=feature_content + ) + + if error_message: + environment.elog(f"Error creating BDD test case: {error_message}") + exit(1) + + if not returned_case_ids or len(returned_case_ids) == 0: + environment.elog(f"Error: add_bdd did not return a case ID") + exit(1) + + case_id = returned_case_ids[0] + created_case_ids[normalized_name] = case_id + environment.log(f"Created case ID: C{case_id}") + + environment.log(f"Successfully created {len(created_case_ids)} BDD test case(s)") + + # Re-parse with the newly created case IDs in cache + environment.vlog("\nRe-parsing to match newly created cases...") + parser_for_results = CucumberParser(environment) + parser_for_results.set_api_handler(api_handler) + + # Build cache with newly created case IDs + temp_cache = created_case_ids.copy() + + # Also include existing cases from original parse + for suite in parsed_suites: + for section in suite.testsections: + for test_case in section.testcases: + if test_case.case_id != -1: + normalized = parser_for_results._normalize_title(section.name) + temp_cache[normalized] = test_case.case_id + + # Override cache + parser_for_results._bdd_case_cache = temp_cache + + # Re-parse in BDD matching mode with updated cache + parsed_suites = parser_for_results.parse_file( + bdd_matching_mode=True, + project_id=resolved_project_id, + suite_id=environment.suite_id, + auto_create=False, # No need to mark for creation again ) - # Assign the same case ID to ALL test cases (scenarios) in this section - for test_case in section.testcases: - test_case.case_id = case_id - if test_case.result: - test_case.result.case_id = case_id - total_mapped += 1 - - environment.vlog(f"Mapped {len(case_ids)} case ID(s) to {total_mapped} test result(s)") - - environment.log("\nProceeding to upload test results...") - elif upload_feature and not environment.auto_creation_response: - # Auto-creation is disabled, skip test case creation - environment.log("\n=== Skipping BDD Test Case Creation ===") - environment.log("Auto-creation disabled (-n flag). Will match scenarios using automation_id.") - - # Upload test results - environment.log("\n=== Phase 2: Uploading Test Results ===") + environment.vlog(f"Re-parsed with {len(temp_cache)} cached case(s)") # Ensure all suites have suite_id set from environment for suite in parsed_suites: if environment.suite_id and not suite.suite_id: suite.suite_id = environment.suite_id + # For BDD mode, bypass automation_id check by using NAME matcher + # BDD cases already have case_id set, so we don't need automation_id + original_case_matcher = environment.case_matcher + environment.case_matcher = MatchersParser.NAME + run_id = None for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) + # Set project to avoid duplicate "Checking project" call + result_uploader.project = project_data result_uploader.upload_results() if run_id is None and hasattr(result_uploader, "last_run_id"): run_id = result_uploader.last_run_id + # Restore original case matcher + environment.case_matcher = original_case_matcher + # Summary if run_id: - environment.log(f"\n✓ Results uploaded successfully to run ID: {run_id}") + environment.log(f"Results uploaded successfully to run ID: {run_id}") else: - environment.log("\n✓ Results processing completed") + environment.log("Results processing completed") except FileNotFoundError: environment.elog(f"Error: Cucumber JSON file not found: {environment.file}") diff --git a/trcli/constants.py b/trcli/constants.py index dc5f5f9..26f479b 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -101,6 +101,7 @@ - parse_cucumber: Cucumber JSON results (BDD) - import_gherkin: Upload .feature files to TestRail BDD - export_gherkin: Export BDD test cases as .feature files + - parse_gherkin: Parse Gherkin .feature file locally - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run diff --git a/trcli/data_classes/dataclass_testrail.py b/trcli/data_classes/dataclass_testrail.py index 238d679..67b3e63 100644 --- a/trcli/data_classes/dataclass_testrail.py +++ b/trcli/data_classes/dataclass_testrail.py @@ -38,15 +38,14 @@ class TestRailResult: result_fields: Optional[dict] = field(default_factory=dict, skip=True) junit_result_unparsed: List = field(default=None, metadata={"serde_skip": True}) custom_step_results: List[TestRailSeparatedStep] = field(default_factory=list, skip_if_default=True) + custom_testrail_bdd_scenario_results: List[TestRailSeparatedStep] = field( + default_factory=list, skip_if_default=True + ) def __post_init__(self): if self.junit_result_unparsed is not None: - self.status_id = self.calculate_status_id_from_junit_element( - self.junit_result_unparsed - ) - self.comment = self.get_comment_from_junit_element( - self.junit_result_unparsed - ) + self.status_id = self.calculate_status_id_from_junit_element(self.junit_result_unparsed) + self.comment = self.get_comment_from_junit_element(self.junit_result_unparsed) if self.elapsed is not None: self.elapsed = self.proper_format_for_elapsed(self.elapsed) @@ -71,9 +70,7 @@ def calculate_status_id_from_junit_element(junit_result: List) -> int: def get_comment_from_junit_element(junit_result: List) -> str: if len(junit_result) == 0: return "" - elif not any( - [junit_result[0].type, junit_result[0].message, junit_result[0].text] - ): + elif not any([junit_result[0].type, junit_result[0].message, junit_result[0].text]): return "" else: return f"Type: {junit_result[0].type or ''}\nMessage: {junit_result[0].message or ''}\nText: {junit_result[0].text or ''}" @@ -203,12 +200,8 @@ class TestRailSection: parent_id: int = field(default=None, skip_if_default=True) description: str = field(default=None, skip_if_default=True) section_id: int = field(default=None, metadata={"serde_skip": True}) - testcases: List[TestRailCase] = field( - default_factory=list, metadata={"serde_skip": True} - ) - properties: List[TestRailProperty] = field( - default_factory=list, metadata={"serde_skip": True} - ) + testcases: List[TestRailCase] = field(default_factory=list, metadata={"serde_skip": True}) + properties: List[TestRailProperty] = field(default_factory=list, metadata={"serde_skip": True}) def __getitem__(self, item): return getattr(self, item) @@ -231,9 +224,7 @@ class TestRailSuite: name: str suite_id: int = field(default=None, skip_if_default=True) description: str = field(default=None, skip_if_default=True) - testsections: List[TestRailSection] = field( - default_factory=list, metadata={"serde_skip": True} - ) + testsections: List[TestRailSection] = field(default_factory=list, metadata={"serde_skip": True}) source: str = field(default=None, metadata={"serde_skip": True}) def __post_init__(self): diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py index 4a1ffd3..965f60d 100644 --- a/trcli/readers/cucumber_json.py +++ b/trcli/readers/cucumber_json.py @@ -20,15 +20,34 @@ class CucumberParser(FileParser): def __init__(self, environment: Environment): super().__init__(environment) self.case_matcher = environment.case_matcher - - def parse_file(self) -> List[TestRailSuite]: + self._bdd_case_cache = None # Cache for BDD cases (populated on first use) + self._api_handler = None # Will be set when BDD matching mode is needed + + def parse_file( + self, + bdd_matching_mode: bool = False, + project_id: Optional[int] = None, + suite_id: Optional[int] = None, + auto_create: bool = False, + ) -> List[TestRailSuite]: """Parse Cucumber JSON results file and convert to TestRailSuite structure + Args: + bdd_matching_mode: If True, use BDD matching mode (group scenarios under existing BDD cases) + project_id: TestRail project ID (required for BDD matching mode) + suite_id: TestRail suite ID (required for BDD matching mode) + auto_create: If True, mark features for auto-creation when not found (BDD matching mode only) + Returns: List of TestRailSuite objects with test cases and results """ self.env.log(f"Parsing Cucumber JSON file: {self.filename}") + if bdd_matching_mode: + self.env.log("Using BDD matching mode (matching against existing BDD test cases)") + if not project_id or not suite_id: + raise ValueError("project_id and suite_id are required for BDD matching mode") + # Read and parse the JSON file with open(self.filepath, "r", encoding="utf-8") as f: cucumber_data = json.load(f) @@ -40,11 +59,26 @@ def parse_file(self) -> List[TestRailSuite]: # Parse features into TestRail structure sections = [] for feature in cucumber_data: - feature_sections = self._parse_feature(feature) + feature_sections = self._parse_feature(feature, bdd_matching_mode, project_id, suite_id, auto_create) sections.extend(feature_sections) - cases_count = sum(len(section.testcases) for section in sections) - self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") + # Generate appropriate message based on mode + if bdd_matching_mode: + # In BDD matching mode: count scenarios from original data + scenario_count = sum( + sum( + 1 + for element in feature.get("elements", []) + if element.get("type", "") in ("scenario", "scenario_outline") + ) + for feature in cucumber_data + ) + feature_word = "feature file" if len(cucumber_data) == 1 else "feature files" + self.env.log(f"Processed {scenario_count} scenarios in {len(cucumber_data)} {feature_word}.") + else: + # Standard mode: count test cases and sections + cases_count = sum(len(section.testcases) for section in sections) + self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") # Create suite suite_name = self.env.suite_name if self.env.suite_name else "Cucumber Test Results" @@ -56,11 +90,22 @@ def parse_file(self) -> List[TestRailSuite]: return [testrail_suite] - def _parse_feature(self, feature: Dict[str, Any]) -> List[TestRailSection]: + def _parse_feature( + self, + feature: Dict[str, Any], + bdd_matching_mode: bool = False, + project_id: Optional[int] = None, + suite_id: Optional[int] = None, + auto_create: bool = False, + ) -> List[TestRailSection]: """Parse a single Cucumber feature into TestRail sections Args: feature: Feature object from Cucumber JSON + bdd_matching_mode: If True, parse as single BDD case (group scenarios) + project_id: TestRail project ID (required for BDD matching mode) + suite_id: TestRail suite ID (required for BDD matching mode) + auto_create: If True, mark cases for auto-creation when not found Returns: List of TestRailSection objects @@ -71,14 +116,21 @@ def _parse_feature(self, feature: Dict[str, Any]) -> List[TestRailSection]: # Create a section for this feature section = TestRailSection(name=feature_name, testcases=[]) - # Parse scenarios/scenario outlines - for element in feature.get("elements", []): - element_type = element.get("type", "") + # Branch: BDD matching mode vs. standard mode + if bdd_matching_mode: + # BDD Matching Mode: Parse feature as single BDD case with grouped scenarios + test_case = self._parse_feature_as_bdd_case(feature, project_id, suite_id, auto_create) + if test_case: + section.testcases.append(test_case) + else: + # Standard Mode: Parse each scenario as separate test case + for element in feature.get("elements", []): + element_type = element.get("type", "") - if element_type in ("scenario", "scenario_outline"): - test_case = self._parse_scenario(element, feature_name, feature_tags) - if test_case: - section.testcases.append(test_case) + if element_type in ("scenario", "scenario_outline"): + test_case = self._parse_scenario(element, feature_name, feature_tags) + if test_case: + section.testcases.append(test_case) return [section] if section.testcases else [] @@ -585,3 +637,340 @@ def _generate_rule_content(self, rule: Dict[str, Any]) -> str: lines.append(" " + line if line else "") return "\n".join(lines) + + def _normalize_title(self, title: str) -> str: + """Normalize title for robust matching + + Converts to lowercase, strips whitespace, and removes special characters. + Hyphens, underscores, and special chars are converted to spaces for word boundaries. + + Args: + title: The title to normalize + + Returns: + Normalized title string + """ + import re + + # Convert to lowercase and strip + normalized = title.lower().strip() + # Replace hyphens, underscores, and special chars with spaces + normalized = re.sub(r"[^a-z0-9\s]", " ", normalized) + # Collapse multiple spaces to single space + normalized = re.sub(r"\s+", " ", normalized) + # Final strip + return normalized.strip() + + def set_api_handler(self, api_handler): + """Set API handler for BDD matching mode + + Args: + api_handler: ApiRequestHandler instance for API calls + """ + self._api_handler = api_handler + + def _get_bdd_cases_cache(self, project_id: int, suite_id: int) -> Dict[str, int]: + """Fetch and cache all BDD cases in suite (one-time batch operation) + + This method fetches all test cases once and caches BDD cases for fast lookups. + Performance: 40 API requests for 10K cases (due to pagination), then O(1) lookups. + + Args: + project_id: TestRail project ID + suite_id: TestRail suite ID + + Returns: + Dictionary mapping normalized_title → case_id for BDD cases only + """ + if self._bdd_case_cache is not None: + return self._bdd_case_cache + + if self._api_handler is None: + self.env.elog("Error: API handler not set. Cannot fetch BDD cases.") + return {} + + self.env.vlog(f"Fetching all BDD cases for suite {suite_id} (one-time operation)...") + + # Fetch ALL cases in suite (with pagination handled internally) + all_cases, error = self._api_handler._ApiRequestHandler__get_all_cases(project_id=project_id, suite_id=suite_id) + + if error: + self.env.elog(f"Error fetching cases: {error}") + return {} + + # Build hash table index: normalized_title → case_id (BDD cases only) + # Also track duplicates for warning + bdd_cache = {} + duplicate_tracker = {} # normalized_title → list of case IDs + bdd_count = 0 + + for case in all_cases: + # Filter to BDD template cases only + if case.get("custom_testrail_bdd_scenario"): + normalized = self._normalize_title(case["title"]) + case_id = case["id"] + + # Track duplicates + if normalized in duplicate_tracker: + duplicate_tracker[normalized].append(case_id) + else: + duplicate_tracker[normalized] = [case_id] + + bdd_cache[normalized] = case_id + bdd_count += 1 + + # Warn about duplicates + for normalized_title, case_ids in duplicate_tracker.items(): + if len(case_ids) > 1: + # Find original title (use first case's title) + original_title = None + for case in all_cases: + if case["id"] == case_ids[0]: + original_title = case["title"] + break + + case_ids_str = ", ".join([f"C{cid}" for cid in case_ids]) + self.env.elog(f"Warning: Multiple BDD cases found with title '{original_title}': {case_ids_str}") + self.env.elog(f" Using case ID C{case_ids[-1]} (last match)") + + self.env.vlog(f"Cached {bdd_count} BDD cases from {len(all_cases)} total cases") + self._bdd_case_cache = bdd_cache + return bdd_cache + + def _find_case_by_title(self, feature_name: str, project_id: int, suite_id: int) -> Optional[int]: + """Find BDD case by feature name using cached index (O(1) lookup) + + Args: + feature_name: Feature name from Cucumber JSON + project_id: TestRail project ID + suite_id: TestRail suite ID + + Returns: + Case ID if found, None otherwise + """ + cache = self._get_bdd_cases_cache(project_id, suite_id) + normalized = self._normalize_title(feature_name) + return cache.get(normalized) + + def _extract_case_id_from_tags(self, feature_tags: List[str], scenario_tags: List[str]) -> Optional[int]: + """Extract case ID from @C tags + + Priority: Feature-level tags > Scenario-level tags + This ensures feature-level @C123 tag applies to all scenarios. + + Args: + feature_tags: Tags from feature level + scenario_tags: Tags from scenario level + + Returns: + Case ID if found, None otherwise + """ + # Priority 1: Feature-level tags (applies to all scenarios) + for tag in feature_tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + return int(tag[2:]) + except ValueError: + pass + + # Priority 2: Scenario-level tags (fallback) + for tag in scenario_tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + return int(tag[2:]) + except ValueError: + pass + + return None + + def _validate_bdd_case_exists(self, case_id: int) -> Tuple[bool, Optional[str]]: + """Validate that case exists and is a BDD template case + + Args: + case_id: TestRail case ID to validate + + Returns: + Tuple of (is_valid, error_message) + - is_valid: True if case exists and is BDD template + - error_message: Error description if validation fails, None otherwise + """ + if self._api_handler is None: + return False, "API handler not set" + + try: + # Fetch case details from TestRail API (use api_handler's client) + response = self._api_handler.client.send_get(f"get_case/{case_id}") + + # Check if request failed or returned no data + if response.error_message or not response.response_text: + error_msg = response.error_message if response.error_message else "Case not found" + return False, f"Case C{case_id} not found: {error_msg}" + + case_data = response.response_text + + # Validate it's a BDD template case + if not case_data.get("custom_testrail_bdd_scenario"): + return False, f"Case C{case_id} is not a BDD template case" + + return True, None + + except Exception as e: + return False, f"Error validating case C{case_id}: {str(e)}" + + def _parse_feature_as_bdd_case( + self, feature: Dict[str, Any], project_id: int, suite_id: int, auto_create: bool = False + ) -> Optional[TestRailCase]: + """Parse Cucumber feature as single BDD test case with multiple scenario results + + This method is used in BDD matching mode (WITHOUT --upload-feature). + It groups all scenarios from a feature under a single BDD test case. + + Workflow: + 1. Extract case ID from @C tags (feature > scenario priority) + 2. Fallback to feature name matching via cached lookup + 3. If not found and auto_create=True: Return special marker for auto-creation + 4. Validate case exists and is BDD template + 5. Parse all scenarios as BDD scenario results + 6. Aggregate status (fail-fast: any scenario failure → feature fails) + 7. Create single TestRailCase with custom_testrail_bdd_scenario_results + + Args: + feature: Feature object from Cucumber JSON + project_id: TestRail project ID + suite_id: TestRail suite ID + auto_create: If True, mark for auto-creation when not found + + Returns: + TestRailCase with BDD scenario results, or None if case not found and auto_create=False + Returns TestRailCase with case_id=-1 if not found and auto_create=True (marker for creation) + """ + feature_name = feature.get("name", "Untitled Feature") + feature_tags = self._extract_tags(feature.get("tags", [])) + + # Step 1: Try to extract case ID from tags + case_id = None + for tag in feature_tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + case_id = int(tag[2:]) + self.env.vlog(f"Found case ID from feature tag: C{case_id}") + break + except ValueError: + pass + + # Step 2: Fallback to feature name matching (cached lookup) + if case_id is None: + case_id = self._find_case_by_title(feature_name, project_id, suite_id) + if case_id: + self.env.vlog(f"Found case ID from feature name '{feature_name}': C{case_id}") + + # Step 3: Handle case not found + if case_id is None: + if auto_create: + self.env.log(f"Feature '{feature_name}' not found in TestRail - will auto-create") + # Return special marker (case_id=-1) to indicate this needs creation + # Store feature data for later creation + case_id = -1 # Marker for auto-creation + else: + self.env.elog(f"Error: No BDD case found for feature '{feature_name}'") + self.env.elog(f" Add @C tag to feature or ensure case exists with title '{feature_name}'") + return None + + # Step 4: Validate case exists (skip validation if marked for creation) + if case_id != -1: + is_valid, error_message = self._validate_bdd_case_exists(case_id) + if not is_valid: + self.env.elog(f"Error validating case for feature '{feature_name}': {error_message}") + return None + + # Step 4: Parse all scenarios as BDD scenario results + bdd_scenario_results = [] + overall_status = 1 # Passed by default (fail-fast logic applied below) + total_elapsed = 0 + + for element in feature.get("elements", []): + element_type = element.get("type", "") + + if element_type in ("scenario", "scenario_outline"): + scenario_name = element.get("name", "Untitled Scenario") + scenario_tags = self._extract_tags(element.get("tags", [])) + + # Parse steps to determine scenario status + steps = element.get("steps", []) + _, scenario_status = self._parse_steps(steps) + + # Calculate elapsed time for this scenario + scenario_elapsed = 0 + for step in steps: + result = step.get("result", {}) + duration = result.get("duration", 0) + if duration: + scenario_elapsed += duration + + total_elapsed += scenario_elapsed + + # Create BDD scenario result (using TestRailSeparatedStep structure) + bdd_scenario = TestRailSeparatedStep(content=scenario_name) + bdd_scenario.status_id = scenario_status + bdd_scenario_results.append(bdd_scenario) + + # Fail-fast: If any scenario fails, entire feature fails + if scenario_status == 5: # Failed + overall_status = 5 + elif scenario_status == 4 and overall_status != 5: # Skipped + overall_status = 4 + elif scenario_status == 3 and overall_status == 1: # Untested/Pending + overall_status = 3 + + # Step 5: Calculate elapsed time (pass as numeric seconds, TestRailResult.__post_init__ will format) + elapsed_time = None + if total_elapsed > 0: + total_seconds = total_elapsed / 1_000_000_000 + elapsed_time = str(total_seconds) # Pass as string number, will be formatted by __post_init__ + + # Step 6: Build comment from failures (aggregate all scenario failures) + comment_parts = [] + for element in feature.get("elements", []): + if element.get("type", "") in ("scenario", "scenario_outline"): + scenario_name = element.get("name", "Untitled Scenario") + steps = element.get("steps", []) + + # Check if scenario failed + scenario_failed = False + for step in steps: + result = step.get("result", {}) + if result.get("status", "").lower() == "failed": + scenario_failed = True + break + + if scenario_failed: + failure_comment = self._build_comment_from_failures(steps) + if failure_comment: + comment_parts.append(f"Scenario: {scenario_name}\n{failure_comment}") + + comment = "\n\n".join(comment_parts) if comment_parts else "" + + # Step 7: Create result with BDD scenario results + result = TestRailResult( + case_id=case_id, + status_id=overall_status, + comment=comment, + elapsed=elapsed_time, + custom_testrail_bdd_scenario_results=bdd_scenario_results, # Use BDD field + ) + + # Step 8: Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + feature_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=case_id, + result=result, + ) + + self.env.vlog( + f"Parsed feature '{feature_name}' as BDD case C{case_id} " + f"with {len(bdd_scenario_results)} scenarios (status: {overall_status})" + ) + + return test_case From 963ff95317c15c0913300099fc65f23722a56469 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 13 Jan 2026 18:08:25 +0800 Subject: [PATCH 12/33] TRCLI-193: Updated test data and affected unit tests for parse_cucumber command --- README.md | 125 +++-- tests/pytest.ini | 1 + tests/test_cmd_parse_cucumber.py | 238 +++++--- tests/test_cucumber_bdd_matching.py | 508 ++++++++++++++++++ .../test_data/api_data_provider_test_data.py | 37 +- tests/test_data/json/api_request_handler.json | 2 +- .../api_request_handler_long_testcase.json | 2 +- .../custom_automation_id_in_property.json | 193 +++---- tests/test_data/json/data_provider.json | 2 +- .../data_provider_duplicated_case_names.json | 2 +- .../json/junit5_parentheses_test.json | 202 +++---- tests/test_data/json/milliseconds.json | 6 +- tests/test_data/json/no_root.json | 6 +- tests/test_data/json/required_only.json | 6 +- .../json/robotframework_id_in_name_RF50.json | 12 +- .../json/robotframework_id_in_name_RF70.json | 12 +- .../robotframework_id_in_property_RF50.json | 12 +- .../robotframework_id_in_property_RF70.json | 12 +- .../json/robotframework_simple_RF50.json | 12 +- .../json/robotframework_simple_RF70.json | 12 +- tests/test_data/json/root.json | 9 +- tests/test_data/json/root_id_in_name.json | 9 +- tests/test_data/json/root_id_in_property.json | 12 +- tests/test_data/json/sauce1.json | 8 +- tests/test_data/json/sauce2.json | 8 +- .../update_case_result_single_with_id.json | 2 +- .../update_case_result_single_without_id.json | 2 +- tests/test_results_uploader.py | 123 ++--- 28 files changed, 1106 insertions(+), 469 deletions(-) create mode 100644 tests/test_cucumber_bdd_matching.py diff --git a/README.md b/README.md index 9fa3ea6..00bf828 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ Supported and loaded modules: - parse_cucumber: Cucumber JSON results (BDD) - import_gherkin: Upload .feature files to TestRail BDD - export_gherkin: Export BDD test cases as .feature files + - parse_gherkin: Parse Gherkin .feature file locally - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run @@ -360,6 +361,9 @@ Usage: trcli parse_cucumber [OPTIONS] Parse Cucumber JSON results and upload to TestRail + This command parses Cucumber JSON test results and uploads them to TestRail. + Uses BDD matching mode to match features by name and auto-create missing test cases. + Options: -f, --file Filename and path. --close-run Close the newly created run @@ -377,8 +381,6 @@ Options: --case-fields List of case fields and values for new test cases creation. --result-fields List of result fields and values for test results creation. --allow-ms Allows using milliseconds for elapsed times. - --upload-feature Generate and upload .feature file to create/update test cases via BDD endpoint. - --feature-section-id Section ID for uploading .feature file (required if --upload-feature is used). [x>=1] -v, --verbose Enable verbose logging output. --help Show this message and exit. ``` @@ -443,21 +445,41 @@ Options: | `step` | Test Step | Steps with results become step results | | `tags` | Case Tags/Refs | Tags like @smoke, @C123 map to TestRail fields | -#### Two Workflows for BDD Test Results +#### BDD Matching Mode for Test Results + +The `parse_cucumber` command uses **BDD matching mode** to intelligently match Cucumber features to TestRail BDD test cases by feature name. This provides a seamless workflow for uploading BDD test results. + +**Key Features:** +- **Feature Name Matching**: Automatically matches Cucumber features to TestRail BDD test cases by feature name +- **Auto-Creation**: Automatically creates missing BDD test cases (can be controlled with flags) +- **Section Auto-Creation**: Creates TestRail sections on-the-fly if they don't exist +- **Batch Pre-fetching**: Efficiently fetches all BDD cases once and caches them for O(1) lookups +- **Duplicate Detection**: Warns when multiple BDD cases have the same feature name + +#### Auto-Creation Flags -##### Workflow 1: Upload Results Only (Code-First) +Control how the TestRail CLI handles missing BDD test cases: -Use this workflow when test cases already exist in TestRail and you want to match them using automation_id. +| Flag | Behavior | Use Case | +|------|----------|----------| +| **No flag** (default) | Auto-creates missing BDD test cases | Recommended for most workflows - creates cases automatically | +| `-y` or `--yes` | Auto-creates without prompting | CI/CD environments - same as default for BDD | +| `-n` or `--no` | Strict matching only - errors if not found | When you want to ensure all features exist in TestRail first | +#### Usage Examples + +**Default behavior (auto-create):** ```shell -# Upload results to existing test cases +# Auto-create missing BDD test cases (default) $ trcli parse_cucumber -f cucumber-results.json \ --project "Your Project" \ --suite-id 2 \ - --title "BDD Test Run" \ - -n + --title "BDD Test Run" +``` -# With automation (auto-create test cases if missing) +**Explicit auto-creation with -y flag:** +```shell +# Same as default - auto-create missing test cases $ trcli parse_cucumber -f cucumber-results.json \ --project "Your Project" \ --suite-id 2 \ @@ -465,65 +487,86 @@ $ trcli parse_cucumber -f cucumber-results.json \ -y ``` -**How it works:** -- Parser creates automation_id from feature name + scenario name -- TestRail CLI matches scenarios to existing cases via automation_id -- Results are uploaded to matched test cases -- With `-y`: Creates new test cases if no match found -- With `-n`: Skips scenarios without matching test cases - -##### Workflow 2: Create BDD Test Cases + Upload Results (Specification-First) - -Use this workflow to automatically create BDD test cases from Cucumber results using TestRail's BDD endpoint. - +**Strict matching mode with -n flag:** ```shell -# Create BDD test cases and upload results +# Only match existing BDD test cases - error if not found $ trcli parse_cucumber -f cucumber-results.json \ --project "Your Project" \ --suite-id 2 \ - --upload-feature \ - --feature-section-id 123 \ --title "BDD Test Run" \ - -y + -n ``` -**How it works:** -1. Parses Cucumber JSON results -2. Generates complete .feature files (one per feature) -3. Uploads .feature files to TestRail via `add_bdd` endpoint -4. TestRail creates BDD test cases with Gherkin content -5. Maps created case IDs to test results -6. Uploads all scenario results to their respective test cases -7. Sets automation_id on created test cases for future matching +**How BDD matching works:** +1. Fetches all BDD test cases from the specified suite (one-time batch operation) +2. Normalizes feature names for matching (case-insensitive, whitespace-normalized) +3. For each feature in Cucumber JSON: + - Tries to find existing BDD test case by feature name + - If found: Uses that case ID and uploads scenario results + - If not found and auto-create enabled: Creates new BDD test case with complete Gherkin content + - If not found and auto-create disabled (-n): Shows error and exits +4. Aggregates all scenario results per feature +5. Uploads results to TestRail with scenario-level detail + +**Important Notes:** +- Feature matching is done by **feature name** (not automation_id or case ID tags) +- If multiple BDD cases have the same feature name, a warning is shown +- Sections are created automatically if they don't exist +- Each feature becomes one BDD test case with multiple scenario results #### Case Matching for BDD Tests -BDD test matching works similarly to JUnit, with automation_id generated from your test structure: +The `parse_cucumber` command uses **feature name matching** to link Cucumber features to TestRail BDD test cases. This is different from the automation_id approach used in JUnit. -**Automation ID Format:** -``` -. -``` +**Feature Name Matching:** +- TestRail CLI normalizes and compares feature names from Cucumber JSON to BDD test case titles in TestRail +- Normalization: case-insensitive, whitespace-normalized (e.g., "User Login" matches "user login") +- One feature = one BDD test case with multiple scenario results **Example:** -``` +```gherkin +# Cucumber JSON feature Feature: User Login Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid credentials + Then I should see the dashboard + + Scenario: Failed login with invalid password + Given I am on the login page + When I enter invalid credentials + Then I should see an error message -Automation ID: User Login.Successful login with valid credentials +# TestRail +# - One BDD test case with title "User Login" +# - Contains both scenarios with individual pass/fail status +# - Section name: "User Login" (auto-created if needed) ``` -You can also use Case ID matching with `@C` tags: +**Using @C Tags (Optional):** + +You can also explicitly specify case IDs using `@C` tags at the feature level: ```gherkin +@C123 Feature: User Login - @C123 Scenario: Successful login with valid credentials Given I am on the login page When I enter valid credentials Then I should see the dashboard ``` +**Tag Priority:** +1. `@C` tags at feature level (if present) +2. Feature name matching (default behavior) + +**Duplicate Name Handling:** + +If multiple BDD test cases have the same feature name, the CLI will: +- Show a warning: `Warning: Multiple BDD cases found with title 'User Login': C101, C202, C303` +- Use the last matching case ID +- Recommend ensuring unique feature names in TestRail + ### Importing Gherkin Feature Files The `import_gherkin` command allows you to upload BDD test cases in TestRail from existing .feature files. diff --git a/tests/pytest.ini b/tests/pytest.ini index 9d5f3be..1115013 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -18,3 +18,4 @@ markers = cmd_parse_cucumber: tests for parse_cucumber command parse_gherkin: tests for gherkin parser parse_cucumber: tests for cucumber parser + cucumber_bdd_matching: tests for cucumber bdd matching diff --git a/tests/test_cmd_parse_cucumber.py b/tests/test_cmd_parse_cucumber.py index 1c2cfea..839ab06 100644 --- a/tests/test_cmd_parse_cucumber.py +++ b/tests/test_cmd_parse_cucumber.py @@ -27,10 +27,28 @@ def setup_method(self): self.environment.auto_creation_response = True # Enable auto-creation for tests @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") - def test_parse_cucumber_workflow1_results_only(self, mock_parser_class, mock_uploader_class): + def test_parse_cucumber_workflow1_results_only( + self, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class + ): """Test Workflow 1: Parse and upload results only (no feature upload)""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser @@ -55,6 +73,7 @@ def test_parse_cucumber_workflow1_results_only(self, mock_parser_class, mock_upl @pytest.mark.cmd_parse_cucumber @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") @patch( @@ -62,21 +81,35 @@ def test_parse_cucumber_workflow1_results_only(self, mock_parser_class, mock_upl new_callable=mock.mock_open, read_data='[{"name":"Test Feature","elements":[{"type":"scenario","name":"Test Scenario"}]}]', ) - def test_parse_cucumber_workflow2_upload_feature( - self, mock_open, mock_parser_class, mock_uploader_class, mock_api_handler_class + def test_parse_cucumber_auto_create_missing_features( + self, mock_open, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class ): - """Test Workflow 2: Create BDD test cases per feature, then upload results""" + """Test auto-creation of missing BDD test cases (default behavior)""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser - # Mock suite with test cases + # First parse returns case_id=-1 (needs creation) mock_suite = MagicMock() mock_suite.name = "Test Suite" mock_section = MagicMock() mock_section.name = "Test Feature" mock_case = MagicMock() - mock_case.case_id = None + mock_case.case_id = -1 # Marker for auto-creation mock_case.result = MagicMock() mock_section.testcases = [mock_case] mock_suite.testsections = [mock_section] @@ -84,19 +117,18 @@ def test_parse_cucumber_workflow2_upload_feature( # Mock _generate_feature_content to return Gherkin content mock_parser._generate_feature_content.return_value = "Feature: Test\n Scenario: Test\n Given test step\n" + mock_parser._normalize_title.return_value = "test feature" - # Mock API handler - mock_api_handler = MagicMock() - mock_api_handler_class.return_value = mock_api_handler - - # Mock project data resolution - mock_project_data = MagicMock() - mock_project_data.project_id = 1 - mock_api_handler.get_project_data.return_value = mock_project_data + # Mock section fetch and creation + mock_api_handler._ApiRequestHandler__get_all_sections.return_value = ([], None) + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 456} + mock_api_handler.client.send_post.return_value = mock_response - mock_api_handler.get_bdd_template_id.return_value = (2, "") # BDD template ID = 2 - mock_api_handler.add_bdd.return_value = ([101], "") # Returns list with case ID = 101 - mock_api_handler.update_case_automation_id.return_value = (True, "") # Success updating automation_id + # Mock BDD template and add_bdd + mock_api_handler.get_bdd_template_id.return_value = (2, None) + mock_api_handler.add_bdd.return_value = ([101], None) # Mock uploader mock_uploader = MagicMock() @@ -110,9 +142,6 @@ def test_parse_cucumber_workflow2_upload_feature( self.test_cucumber_path, "--suite-id", "2", - "--upload-feature", - "--feature-section-id", - "456", "--title", "Test Run", ], @@ -122,35 +151,30 @@ def test_parse_cucumber_workflow2_upload_feature( assert result.exit_code == 0 mock_api_handler.get_bdd_template_id.assert_called_once() mock_api_handler.add_bdd.assert_called_once() - mock_api_handler.update_case_automation_id.assert_called_once() - mock_uploader.upload_results.assert_called() - - @pytest.mark.cmd_parse_cucumber - def test_parse_cucumber_upload_feature_requires_section_id(self): - """Test that --upload-feature requires --feature-section-id""" - result = self.runner.invoke( - cmd_parse_cucumber.cli, - [ - "--file", - self.test_cucumber_path, - "--suite-id", - "2", - "--upload-feature", - # Missing --feature-section-id - "--title", - "Test Run", - ], - obj=self.environment, - ) - - assert result.exit_code == 1 - assert "feature-section-id is required" in result.output.lower() @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") - def test_parse_cucumber_upload_feature_with_no_flag(self, mock_parser_class, mock_uploader_class): - """Test that -n flag skips test case creation with --upload-feature""" + def test_parse_cucumber_with_n_flag( + self, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class + ): + """Test that -n flag only matches existing BDD test cases""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser @@ -177,9 +201,6 @@ def test_parse_cucumber_upload_feature_with_no_flag(self, mock_parser_class, moc self.test_cucumber_path, "--suite-id", "2", - "--upload-feature", - "--feature-section-id", - "456", "--title", "Test Run", ], @@ -187,9 +208,8 @@ def test_parse_cucumber_upload_feature_with_no_flag(self, mock_parser_class, moc ) assert result.exit_code == 0 - assert "skipping bdd test case creation" in result.output.lower() - assert "auto-creation disabled" in result.output.lower() - mock_uploader.upload_results.assert_called() + # Verify auto_create=False was passed to parser + mock_parser.parse_file.assert_called_with(bdd_matching_mode=True, project_id=1, suite_id=2, auto_create=False) @pytest.mark.cmd_parse_cucumber def test_parse_cucumber_missing_file(self): @@ -248,15 +268,31 @@ def test_parse_cucumber_empty_json(self, mock_parser_class): @pytest.mark.cmd_parse_cucumber @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") @patch("builtins.open", new_callable=mock.mock_open, read_data="[]") - def test_parse_cucumber_invalid_cucumber_json(self, mock_open, mock_parser_class, mock_api_handler_class): + def test_parse_cucumber_invalid_cucumber_json( + self, mock_open, mock_parser_class, mock_api_client_class, mock_api_handler_class + ): """Test with invalid Cucumber JSON structure (empty array)""" - # Mock parser + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + + # Mock parser to raise error for empty JSON mock_parser = MagicMock() mock_parser_class.return_value = mock_parser - mock_suite = MagicMock() - mock_parser.parse_file.return_value = [mock_suite] + mock_parser.parse_file.side_effect = ValueError("Invalid Cucumber JSON format: empty array") result = self.runner.invoke( cmd_parse_cucumber.cli, @@ -265,9 +301,6 @@ def test_parse_cucumber_invalid_cucumber_json(self, mock_open, mock_parser_class self.test_cucumber_path, "--suite-id", "2", - "--upload-feature", - "--feature-section-id", - "456", "--title", "Test Run", ], @@ -276,38 +309,58 @@ def test_parse_cucumber_invalid_cucumber_json(self, mock_open, mock_parser_class assert result.exit_code == 1 # Check that it fails with any appropriate error (either JSON format or parsing error) - assert "invalid cucumber json format" in result.output.lower() or "error parsing" in result.output.lower() + assert "invalid" in result.output.lower() or "error parsing" in result.output.lower() @pytest.mark.cmd_parse_cucumber @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") @patch( "builtins.open", new_callable=mock.mock_open, read_data='[{"name":"Test Feature","elements":[{"type":"scenario","name":"Test Scenario"}]}]', ) - def test_parse_cucumber_api_error_during_feature_upload(self, mock_open, mock_parser_class, mock_api_handler_class): - """Test API error during BDD test case creation""" + def test_parse_cucumber_api_error_during_auto_creation( + self, mock_open, mock_parser_class, mock_api_client_class, mock_api_handler_class + ): + """Test API error during BDD test case auto-creation""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler with error + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + # Mock parser mock_parser = MagicMock() mock_parser_class.return_value = mock_parser mock_suite = MagicMock() mock_section = MagicMock() mock_section.name = "Test Feature" + mock_case = MagicMock() + mock_case.case_id = -1 # Needs creation + mock_section.testcases = [mock_case] mock_suite.testsections = [mock_section] mock_parser.parse_file.return_value = [mock_suite] mock_parser._generate_feature_content.return_value = "Feature: Test\n Scenario: Test\n" + mock_parser._normalize_title.return_value = "test feature" - # Mock API handler with error - mock_api_handler = MagicMock() - mock_api_handler_class.return_value = mock_api_handler - - # Mock project data resolution - mock_project_data = MagicMock() - mock_project_data.project_id = 1 - mock_api_handler.get_project_data.return_value = mock_project_data + # Mock section fetch + mock_api_handler._ApiRequestHandler__get_all_sections.return_value = ([], None) + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 456} + mock_api_handler.client.send_post.return_value = mock_response - mock_api_handler.get_bdd_template_id.return_value = (2, "") + # Mock BDD template and add_bdd with error + mock_api_handler.get_bdd_template_id.return_value = (2, None) mock_api_handler.add_bdd.return_value = ([], "API Error: Section not found") result = self.runner.invoke( @@ -317,9 +370,6 @@ def test_parse_cucumber_api_error_during_feature_upload(self, mock_open, mock_pa self.test_cucumber_path, "--suite-id", "2", - "--upload-feature", - "--feature-section-id", - "456", "--title", "Test Run", ], @@ -327,7 +377,7 @@ def test_parse_cucumber_api_error_during_feature_upload(self, mock_open, mock_pa ) assert result.exit_code == 1 - assert "error creating" in result.output.lower() + assert "error" in result.output.lower() @pytest.mark.cmd_parse_cucumber def test_parse_cucumber_required_parameters(self): @@ -345,12 +395,30 @@ def test_parse_cucumber_required_parameters(self): # Will fail @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") - def test_parse_cucumber_validation_exception(self, mock_parser_class, mock_uploader_class): + def test_parse_cucumber_validation_exception( + self, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class + ): """Test handling of ValidationException""" from trcli.data_classes.validation_exception import ValidationException + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + # Mock parser to raise ValidationException mock_parser = MagicMock() mock_parser_class.return_value = mock_parser @@ -366,10 +434,28 @@ def test_parse_cucumber_validation_exception(self, mock_parser_class, mock_uploa assert "validation error" in result.output.lower() @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") - def test_parse_cucumber_value_error(self, mock_parser_class, mock_uploader_class): + def test_parse_cucumber_value_error( + self, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class + ): """Test handling of ValueError during parsing""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + # Mock parser to raise ValueError mock_parser = MagicMock() mock_parser_class.return_value = mock_parser diff --git a/tests/test_cucumber_bdd_matching.py b/tests/test_cucumber_bdd_matching.py new file mode 100644 index 0000000..c1fdca4 --- /dev/null +++ b/tests/test_cucumber_bdd_matching.py @@ -0,0 +1,508 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch, call +from pathlib import Path + +from trcli.cli import Environment +from trcli.readers.cucumber_json import CucumberParser +from trcli.data_classes.dataclass_testrail import TestRailSeparatedStep + + +class TestCucumberBDDMatching: + """Test class for BDD matching mode functionality in CucumberParser""" + + def setup_method(self): + """Set up test environment""" + self.environment = Environment(cmd="parse_cucumber") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + self.environment.suite_id = 2 + + # Create a temporary test file for CucumberParser initialization + import tempfile + + self.temp_file = tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) + self.temp_file.write("[]") + self.temp_file.close() + self.environment.file = self.temp_file.name + + # Sample Cucumber JSON feature + self.sample_feature = { + "name": "User Login", + "tags": [{"name": "@smoke"}], + "elements": [ + { + "type": "scenario", + "name": "Successful login", + "tags": [], + "steps": [ + { + "keyword": "Given", + "name": "user is on login page", + "result": {"status": "passed", "duration": 1000000000}, + }, + { + "keyword": "When", + "name": "user enters valid credentials", + "result": {"status": "passed", "duration": 2000000000}, + }, + { + "keyword": "Then", + "name": "user is logged in", + "result": {"status": "passed", "duration": 500000000}, + }, + ], + }, + { + "type": "scenario", + "name": "Failed login", + "tags": [], + "steps": [ + { + "keyword": "Given", + "name": "user is on login page", + "result": {"status": "passed", "duration": 1000000000}, + }, + { + "keyword": "When", + "name": "user enters invalid credentials", + "result": {"status": "passed", "duration": 2000000000}, + }, + { + "keyword": "Then", + "name": "error message is shown", + "result": { + "status": "failed", + "duration": 500000000, + "error_message": "Expected error not found", + }, + }, + ], + }, + ], + } + + def teardown_method(self): + """Clean up temporary files""" + import os + + if hasattr(self, "temp_file") and os.path.exists(self.temp_file.name): + os.unlink(self.temp_file.name) + + @pytest.mark.cucumber_bdd_matching + def test_normalize_title_basic(self): + """Test title normalization removes special characters and normalizes case""" + parser = CucumberParser(self.environment) + + assert parser._normalize_title("User Login") == "user login" + assert parser._normalize_title("User-Login") == "user login" + assert parser._normalize_title("User_Login!") == "user login" + assert parser._normalize_title(" User Login ") == "user login" + assert parser._normalize_title("User@#$%Login") == "user login" + + @pytest.mark.cucumber_bdd_matching + def test_normalize_title_complex(self): + """Test title normalization with complex cases""" + parser = CucumberParser(self.environment) + + assert parser._normalize_title("E-commerce: Product Checkout") == "e commerce product checkout" + assert parser._normalize_title("API (v2) Authentication") == "api v2 authentication" + assert parser._normalize_title("Test-Case #123") == "test case 123" + + @pytest.mark.cucumber_bdd_matching + def test_extract_case_id_from_feature_tags(self): + """Test case ID extraction from feature-level tags""" + parser = CucumberParser(self.environment) + + feature_tags = ["@smoke", "@C123", "@regression"] + scenario_tags = ["@C456"] + + # Feature-level tag should take priority + case_id = parser._extract_case_id_from_tags(feature_tags, scenario_tags) + assert case_id == 123 + + @pytest.mark.cucumber_bdd_matching + def test_extract_case_id_from_scenario_tags(self): + """Test case ID extraction from scenario-level tags (fallback)""" + parser = CucumberParser(self.environment) + + feature_tags = ["@smoke"] + scenario_tags = ["@C456", "@regression"] + + # Should use scenario-level tag when no feature-level tag + case_id = parser._extract_case_id_from_tags(feature_tags, scenario_tags) + assert case_id == 456 + + @pytest.mark.cucumber_bdd_matching + def test_extract_case_id_no_tags(self): + """Test case ID extraction returns None when no @C tags""" + parser = CucumberParser(self.environment) + + feature_tags = ["@smoke", "@regression"] + scenario_tags = ["@fast"] + + case_id = parser._extract_case_id_from_tags(feature_tags, scenario_tags) + assert case_id is None + + @pytest.mark.cucumber_bdd_matching + def test_extract_case_id_lowercase(self): + """Test case ID extraction with lowercase @c tag""" + parser = CucumberParser(self.environment) + + feature_tags = ["@c789"] + scenario_tags = [] + + case_id = parser._extract_case_id_from_tags(feature_tags, scenario_tags) + assert case_id == 789 + + @pytest.mark.cucumber_bdd_matching + def test_extract_case_id_invalid_format(self): + """Test case ID extraction handles invalid formats gracefully""" + parser = CucumberParser(self.environment) + + feature_tags = ["@C", "@Cabc", "@C123abc"] + scenario_tags = [] + + # Should return None for invalid formats + case_id = parser._extract_case_id_from_tags(feature_tags, scenario_tags) + assert case_id is None + + @pytest.mark.cucumber_bdd_matching + @patch("trcli.readers.cucumber_json.CucumberParser._get_bdd_cases_cache") + def test_find_case_by_title_found(self, mock_get_cache): + """Test finding case by title using cached lookup""" + parser = CucumberParser(self.environment) + + # Mock cache with normalized titles + mock_get_cache.return_value = {"user login": 101, "product search": 102, "checkout": 103} + + case_id = parser._find_case_by_title("User Login", project_id=1, suite_id=2) + assert case_id == 101 + + # Verify cache was accessed + mock_get_cache.assert_called_once_with(1, 2) + + @pytest.mark.cucumber_bdd_matching + @patch("trcli.readers.cucumber_json.CucumberParser._get_bdd_cases_cache") + def test_find_case_by_title_not_found(self, mock_get_cache): + """Test finding case by title returns None when not in cache""" + parser = CucumberParser(self.environment) + + mock_get_cache.return_value = {"user login": 101, "product search": 102} + + case_id = parser._find_case_by_title("Nonexistent Feature", project_id=1, suite_id=2) + assert case_id is None + + @pytest.mark.cucumber_bdd_matching + @patch("trcli.readers.cucumber_json.CucumberParser._get_bdd_cases_cache") + def test_find_case_by_title_normalization(self, mock_get_cache): + """Test case matching with different formatting""" + parser = CucumberParser(self.environment) + + mock_get_cache.return_value = {"user login": 101} + + # Should match despite different formatting + assert parser._find_case_by_title("User Login", 1, 2) == 101 + assert parser._find_case_by_title("User-Login", 1, 2) == 101 + assert parser._find_case_by_title("user_login", 1, 2) == 101 + assert parser._find_case_by_title("USER LOGIN", 1, 2) == 101 + + @pytest.mark.cucumber_bdd_matching + def test_get_bdd_cases_cache_builds_correctly(self): + """Test BDD cases cache is built correctly from API response""" + parser = CucumberParser(self.environment) + + # Mock API handler + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock API response with mix of BDD and non-BDD cases + mock_cases = [ + {"id": 101, "title": "User Login", "custom_testrail_bdd_scenario": "Scenario: Login"}, + {"id": 102, "title": "Product Search", "custom_testrail_bdd_scenario": None}, # Not BDD + {"id": 103, "title": "Checkout Process", "custom_testrail_bdd_scenario": "Scenario: Checkout"}, + ] + mock_api_handler._ApiRequestHandler__get_all_cases.return_value = (mock_cases, None) + + # Build cache + cache = parser._get_bdd_cases_cache(project_id=1, suite_id=2) + + # Should only include BDD cases (101 and 103) + assert len(cache) == 2 + assert cache["user login"] == 101 + assert cache["checkout process"] == 103 + assert "product search" not in cache + + @pytest.mark.cucumber_bdd_matching + def test_get_bdd_cases_cache_caching_behavior(self): + """Test cache is only fetched once""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + mock_cases = [{"id": 101, "title": "User Login", "custom_testrail_bdd_scenario": "Scenario: Login"}] + mock_api_handler._ApiRequestHandler__get_all_cases.return_value = (mock_cases, None) + + # First call - should fetch from API + cache1 = parser._get_bdd_cases_cache(1, 2) + assert mock_api_handler._ApiRequestHandler__get_all_cases.call_count == 1 + + # Second call - should use cache + cache2 = parser._get_bdd_cases_cache(1, 2) + assert mock_api_handler._ApiRequestHandler__get_all_cases.call_count == 1 # No additional call + + # Verify same cache returned + assert cache1 is cache2 + + @pytest.mark.cucumber_bdd_matching + def test_get_bdd_cases_cache_api_error(self): + """Test cache handles API errors gracefully""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock API error + mock_api_handler._ApiRequestHandler__get_all_cases.return_value = ([], "API Error") + + cache = parser._get_bdd_cases_cache(1, 2) + + # Should return empty cache on error + assert cache == {} + + @pytest.mark.cucumber_bdd_matching + def test_validate_bdd_case_exists_valid(self): + """Test validation succeeds for valid BDD case""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock valid BDD case - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = { + "id": 101, + "title": "User Login", + "custom_testrail_bdd_scenario": "Scenario: Login", + } + mock_api_handler.client.send_get.return_value = mock_response + + is_valid, error_message = parser._validate_bdd_case_exists(101) + + assert is_valid is True + assert error_message is None + + @pytest.mark.cucumber_bdd_matching + def test_validate_bdd_case_not_found(self): + """Test validation fails when case not found""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock case not found - mock send_get response + mock_response = MagicMock() + mock_response.error_message = "Case not found" + mock_response.response_text = None + mock_api_handler.client.send_get.return_value = mock_response + + is_valid, error_message = parser._validate_bdd_case_exists(999) + + assert is_valid is False + assert "not found" in error_message.lower() + + @pytest.mark.cucumber_bdd_matching + def test_validate_bdd_case_not_bdd_template(self): + """Test validation fails when case is not BDD template""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock non-BDD case - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 102, "title": "Regular Test", "custom_testrail_bdd_scenario": None} + mock_api_handler.client.send_get.return_value = mock_response + + is_valid, error_message = parser._validate_bdd_case_exists(102) + + assert is_valid is False + assert "not a bdd template" in error_message.lower() + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_as_bdd_case_with_tag(self): + """Test parsing feature as BDD case using @C tag""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock validation - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 123, "custom_testrail_bdd_scenario": "Scenario: Test"} + mock_api_handler.client.send_get.return_value = mock_response + + # Add @C tag to feature + feature_with_tag = self.sample_feature.copy() + feature_with_tag["tags"] = [{"name": "@C123"}] + + test_case = parser._parse_feature_as_bdd_case(feature_with_tag, project_id=1, suite_id=2) + + assert test_case is not None + assert test_case.case_id == 123 + assert test_case.result.case_id == 123 + assert len(test_case.result.custom_testrail_bdd_scenario_results) == 2 # Two scenarios + assert test_case.result.status_id == 5 # Failed (one scenario failed) + + @pytest.mark.cucumber_bdd_matching + @patch("trcli.readers.cucumber_json.CucumberParser._find_case_by_title") + def test_parse_feature_as_bdd_case_by_title(self, mock_find): + """Test parsing feature as BDD case using title matching""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock title matching + mock_find.return_value = 456 + + # Mock validation - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 456, "custom_testrail_bdd_scenario": "Scenario: Test"} + mock_api_handler.client.send_get.return_value = mock_response + + test_case = parser._parse_feature_as_bdd_case(self.sample_feature, project_id=1, suite_id=2) + + assert test_case is not None + assert test_case.case_id == 456 + mock_find.assert_called_once_with("User Login", 1, 2) + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_as_bdd_case_scenario_statuses(self): + """Test BDD scenario results have correct statuses""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock validation - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 123, "custom_testrail_bdd_scenario": "Scenario: Test"} + mock_api_handler.client.send_get.return_value = mock_response + + feature_with_tag = self.sample_feature.copy() + feature_with_tag["tags"] = [{"name": "@C123"}] + + test_case = parser._parse_feature_as_bdd_case(feature_with_tag, project_id=1, suite_id=2) + + scenarios = test_case.result.custom_testrail_bdd_scenario_results + + # First scenario: passed + assert scenarios[0].content == "Successful login" + assert scenarios[0].status_id == 1 + + # Second scenario: failed + assert scenarios[1].content == "Failed login" + assert scenarios[1].status_id == 5 + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_as_bdd_case_elapsed_time(self): + """Test elapsed time calculation for BDD case""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock validation - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 123, "custom_testrail_bdd_scenario": "Scenario: Test"} + mock_api_handler.client.send_get.return_value = mock_response + + feature_with_tag = self.sample_feature.copy() + feature_with_tag["tags"] = [{"name": "@C123"}] + + test_case = parser._parse_feature_as_bdd_case(feature_with_tag, project_id=1, suite_id=2) + + # Total duration: (1+2+0.5) + (1+2+0.5) = 7 seconds + assert test_case.result.elapsed == "7s" + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_as_bdd_case_not_found(self): + """Test parsing returns None when case not found""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock no case found (no tag, no title match) + with patch.object(parser, "_find_case_by_title", return_value=None): + test_case = parser._parse_feature_as_bdd_case(self.sample_feature, project_id=1, suite_id=2) + + assert test_case is None + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_as_bdd_case_validation_fails(self): + """Test parsing returns None when validation fails""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock validation failure (not BDD template) - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 123, "custom_testrail_bdd_scenario": None} + mock_api_handler.client.send_get.return_value = mock_response + + feature_with_tag = self.sample_feature.copy() + feature_with_tag["tags"] = [{"name": "@C123"}] + + test_case = parser._parse_feature_as_bdd_case(feature_with_tag, project_id=1, suite_id=2) + + assert test_case is None + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_branching_bdd_mode(self): + """Test _parse_feature branches correctly to BDD matching mode""" + parser = CucumberParser(self.environment) + + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock validation - mock send_get response + mock_response = MagicMock() + mock_response.error_message = None + mock_response.response_text = {"id": 123, "custom_testrail_bdd_scenario": "Scenario: Test"} + mock_api_handler.client.send_get.return_value = mock_response + + feature_with_tag = self.sample_feature.copy() + feature_with_tag["tags"] = [{"name": "@C123"}] + + # Call with BDD matching mode enabled + sections = parser._parse_feature(feature_with_tag, bdd_matching_mode=True, project_id=1, suite_id=2) + + assert len(sections) == 1 + assert len(sections[0].testcases) == 1 # One BDD case (not 2 separate scenarios) + assert sections[0].testcases[0].case_id == 123 + + @pytest.mark.cucumber_bdd_matching + def test_parse_feature_branching_standard_mode(self): + """Test _parse_feature uses standard mode when bdd_matching_mode=False""" + parser = CucumberParser(self.environment) + + # Call with standard mode + sections = parser._parse_feature(self.sample_feature, bdd_matching_mode=False, project_id=None, suite_id=None) + + assert len(sections) == 1 + assert len(sections[0].testcases) == 2 # Two separate test cases (one per scenario) diff --git a/tests/test_data/api_data_provider_test_data.py b/tests/test_data/api_data_provider_test_data.py index a51f7cc..2ff2d03 100644 --- a/tests/test_data/api_data_provider_test_data.py +++ b/tests/test_data/api_data_provider_test_data.py @@ -11,15 +11,11 @@ json_string = json.dumps(json.load(file_json)) test_input_single_result_with_id = from_json(TestRailSuite, json_string) -file_json = open( - Path(__file__).parent / "json/update_case_result_single_without_id.json" -) +file_json = open(Path(__file__).parent / "json/update_case_result_single_without_id.json") json_string = json.dumps(json.load(file_json)) test_input_single_result_without_id = from_json(TestRailSuite, json_string) -file_json = open( - Path(__file__).parent / "json/data_provider_duplicated_case_names.json" -) +file_json = open(Path(__file__).parent / "json/data_provider_duplicated_case_names.json") json_string = json.dumps(json.load(file_json)) test_input_duplicated_case_names = from_json(TestRailSuite, json_string) @@ -31,15 +27,13 @@ {"name": "Passed test", "suite_id": 123}, ] -post_cases_bodies = [ - {"section_id": 12345, "title": "testCase2", "custom_automation_id": "className.testCase2abc"} -] +post_cases_bodies = [{"section_id": 12345, "title": "testCase2", "custom_automation_id": "className.testCase2abc"}] post_run_bodies = { "description": "logging: True\ndebug: False", "name": "test run", "suite_id": 123, - "case_ids": [60, 4] + "case_ids": [60, 4], } post_run_full_body = { @@ -49,7 +43,7 @@ "case_ids": [60, 4], "assignedto_id": 1, "include_all": True, - "refs": "SAN-1, SAN-2" + "refs": "SAN-1, SAN-2", } post_results_for_cases_body = [ @@ -60,10 +54,25 @@ "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user", "attachments": [], "status_id": 4, - 'custom_step_results': [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [], + }, + { + "case_id": 1234567, + "comment": "", + "attachments": [], + "status_id": 1, + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [], + }, + { + "case_id": 4, + "comment": "", + "attachments": [], + "status_id": 1, + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [], }, - {"case_id": 1234567, "comment": "", "attachments": [], "status_id": 1, 'custom_step_results': []}, - {"case_id": 4, "comment": "", "attachments": [], "status_id": 1, 'custom_step_results': []}, ] } ] diff --git a/tests/test_data/json/api_request_handler.json b/tests/test_data/json/api_request_handler.json index 6273ed5..6c370ff 100644 --- a/tests/test_data/json/api_request_handler.json +++ b/tests/test_data/json/api_request_handler.json @@ -68,4 +68,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_data/json/api_request_handler_long_testcase.json b/tests/test_data/json/api_request_handler_long_testcase.json index 6038306..5d02089 100644 --- a/tests/test_data/json/api_request_handler_long_testcase.json +++ b/tests/test_data/json/api_request_handler_long_testcase.json @@ -68,4 +68,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_data/json/custom_automation_id_in_property.json b/tests/test_data/json/custom_automation_id_in_property.json index e48ef3d..f933645 100644 --- a/tests/test_data/json/custom_automation_id_in_property.json +++ b/tests/test_data/json/custom_automation_id_in_property.json @@ -1,99 +1,102 @@ { - "description": null, - "name": "test suites root", - "source": "custom_automation_id_in_property.xml", - "suite_id": null, - "testsections": [ - { - "description": null, - "name": "custom_automation_id in property", - "suite_id": null, - "parent_id": null, - "section_id": null, - "testcases": [ + "description": null, + "name": "test suites root", + "source": "custom_automation_id_in_property.xml", + "suite_id": null, + "testsections": [ { - "title": "test_testrail 1", - "section_id": null, - "case_id": null, - "estimate": null, - "template_id": null, - "type_id": null, - "milestone_id": null, - "refs": null, - "case_fields": { - "template_id": "1" - }, - "result": { - "case_id": null, - "status_id": 1, - "comment": "", - "version": null, - "elapsed": "159s", - "defects": null, - "assignedto_id": null, - "attachments": [], - "result_fields": {}, - "junit_result_unparsed": [], - "custom_step_results": [] - }, - "custom_automation_id": "automation_id_1" - }, - { - "title": "test_testrail 2", - "section_id": null, - "case_id": null, - "estimate": null, - "template_id": null, - "type_id": null, - "milestone_id": null, - "refs": null, - "case_fields": { - "template_id": "1" - }, - "result": { - "case_id": null, - "status_id": 1, - "comment": "", - "version": null, - "elapsed": "159s", - "defects": null, - "assignedto_id": null, - "attachments": [], - "result_fields": {}, - "junit_result_unparsed": [], - "custom_step_results": [] - }, - "custom_automation_id": "automation_id_2" - }, - { - "title": "test_testrail 3", - "section_id": null, - "case_id": null, - "estimate": null, - "template_id": null, - "type_id": null, - "milestone_id": null, - "refs": null, - "case_fields": { - "template_id": "1" - }, - "result": { - "case_id": null, - "status_id": 1, - "comment": "", - "version": null, - "elapsed": "159s", - "defects": null, - "assignedto_id": null, - "attachments": [], - "result_fields": {}, - "junit_result_unparsed": [], - "custom_step_results": [] - }, - "custom_automation_id": "automation_id_3" + "description": null, + "name": "custom_automation_id in property", + "suite_id": null, + "parent_id": null, + "section_id": null, + "testcases": [ + { + "title": "test_testrail 1", + "section_id": null, + "case_id": null, + "estimate": null, + "template_id": null, + "type_id": null, + "milestone_id": null, + "refs": null, + "case_fields": { + "template_id": "1" + }, + "result": { + "case_id": null, + "status_id": 1, + "comment": "", + "version": null, + "elapsed": "159s", + "defects": null, + "assignedto_id": null, + "attachments": [], + "result_fields": {}, + "junit_result_unparsed": [], + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "automation_id_1" + }, + { + "title": "test_testrail 2", + "section_id": null, + "case_id": null, + "estimate": null, + "template_id": null, + "type_id": null, + "milestone_id": null, + "refs": null, + "case_fields": { + "template_id": "1" + }, + "result": { + "case_id": null, + "status_id": 1, + "comment": "", + "version": null, + "elapsed": "159s", + "defects": null, + "assignedto_id": null, + "attachments": [], + "result_fields": {}, + "junit_result_unparsed": [], + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "automation_id_2" + }, + { + "title": "test_testrail 3", + "section_id": null, + "case_id": null, + "estimate": null, + "template_id": null, + "type_id": null, + "milestone_id": null, + "refs": null, + "case_fields": { + "template_id": "1" + }, + "result": { + "case_id": null, + "status_id": 1, + "comment": "", + "version": null, + "elapsed": "159s", + "defects": null, + "assignedto_id": null, + "attachments": [], + "result_fields": {}, + "junit_result_unparsed": [], + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "automation_id_3" + } + ], + "properties": [] } - ], - "properties": [] - } - ] + ] } \ No newline at end of file diff --git a/tests/test_data/json/data_provider.json b/tests/test_data/json/data_provider.json index 34fd12a..b924161 100644 --- a/tests/test_data/json/data_provider.json +++ b/tests/test_data/json/data_provider.json @@ -69,4 +69,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_data/json/data_provider_duplicated_case_names.json b/tests/test_data/json/data_provider_duplicated_case_names.json index 0eafef8..12eabfb 100644 --- a/tests/test_data/json/data_provider_duplicated_case_names.json +++ b/tests/test_data/json/data_provider_duplicated_case_names.json @@ -68,4 +68,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_data/json/junit5_parentheses_test.json b/tests/test_data/json/junit5_parentheses_test.json index 574ecf2..a429daa 100644 --- a/tests/test_data/json/junit5_parentheses_test.json +++ b/tests/test_data/json/junit5_parentheses_test.json @@ -1,101 +1,107 @@ { - "name": "JUnit 5 Test Suite with Parentheses", - "testsections": [ - { - "name": "JUnit5ParenthesesTests", - "testcases": [ + "name": "JUnit 5 Test Suite with Parentheses", + "testsections": [ { - "title": "test_name", - "case_id": 120013, - "result": { - "case_id": 120013, - "elapsed": 1.5, - "attachments": [], - "result_fields": {}, - "custom_step_results": [], - "status_id": 1, - "comment": "" - }, - "custom_automation_id": "com.example.MyTests.test_name_C120013()", - "case_fields": {} - }, - { - "title": "testMethod", - "case_id": 123, - "result": { - "case_id": 123, - "elapsed": 2.1, - "attachments": [], - "result_fields": {}, - "custom_step_results": [], - "status_id": 1, - "comment": "" - }, - "custom_automation_id": "com.example.MyTests.testMethod_C123()", - "case_fields": {} - }, - { - "title": "complexTest", - "case_id": 456, - "result": { - "case_id": 456, - "elapsed": 0.8, - "attachments": [], - "result_fields": {}, - "custom_step_results": [], - "status_id": 1, - "comment": "" - }, - "custom_automation_id": "com.example.MyTests.complexTest_C456(String param, int value)", - "case_fields": {} - }, - { - "title": "test_name()", - "case_id": 789, - "result": { - "case_id": 789, - "elapsed": 1.2, - "attachments": [], - "result_fields": {}, - "custom_step_results": [], - "status_id": 1, - "comment": "" - }, - "custom_automation_id": "com.example.MyTests.C789_test_name()", - "case_fields": {} - }, - { - "title": "test_with_brackets()", - "case_id": 999, - "result": { - "case_id": 999, - "elapsed": 0.9, - "attachments": [], - "result_fields": {}, - "custom_step_results": [], - "status_id": 1, - "comment": "" - }, - "custom_automation_id": "com.example.MyTests.[C999] test_with_brackets()", - "case_fields": {} - }, - { - "title": "test_name", - "case_id": 555, - "result": { - "case_id": 555, - "elapsed": 1.0, - "attachments": [], - "result_fields": {}, - "custom_step_results": [], - "status_id": 1, - "comment": "" - }, - "custom_automation_id": "com.example.MyTests.test_name_C555", - "case_fields": {} + "name": "JUnit5ParenthesesTests", + "testcases": [ + { + "title": "test_name", + "case_id": 120013, + "result": { + "case_id": 120013, + "elapsed": 1.5, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "", + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "com.example.MyTests.test_name_C120013()", + "case_fields": {} + }, + { + "title": "testMethod", + "case_id": 123, + "result": { + "case_id": 123, + "elapsed": 2.1, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "", + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "com.example.MyTests.testMethod_C123()", + "case_fields": {} + }, + { + "title": "complexTest", + "case_id": 456, + "result": { + "case_id": 456, + "elapsed": 0.8, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "", + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "com.example.MyTests.complexTest_C456(String param, int value)", + "case_fields": {} + }, + { + "title": "test_name()", + "case_id": 789, + "result": { + "case_id": 789, + "elapsed": 1.2, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "", + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "com.example.MyTests.C789_test_name()", + "case_fields": {} + }, + { + "title": "test_with_brackets()", + "case_id": 999, + "result": { + "case_id": 999, + "elapsed": 0.9, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "", + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "com.example.MyTests.[C999] test_with_brackets()", + "case_fields": {} + }, + { + "title": "test_name", + "case_id": 555, + "result": { + "case_id": 555, + "elapsed": 1.0, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "", + "custom_testrail_bdd_scenario_results": [] + }, + "custom_automation_id": "com.example.MyTests.test_name_C555", + "case_fields": {} + } + ] } - ] - } - ], - "source": null -} + ], + "source": null +} \ No newline at end of file diff --git a/tests/test_data/json/milliseconds.json b/tests/test_data/json/milliseconds.json index 7bd3a79..6d6140f 100644 --- a/tests/test_data/json/milliseconds.json +++ b/tests/test_data/json/milliseconds.json @@ -29,7 +29,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -54,7 +55,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, diff --git a/tests/test_data/json/no_root.json b/tests/test_data/json/no_root.json index c743c16..27e1c8f 100644 --- a/tests/test_data/json/no_root.json +++ b/tests/test_data/json/no_root.json @@ -29,7 +29,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -54,7 +55,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, diff --git a/tests/test_data/json/required_only.json b/tests/test_data/json/required_only.json index 1f20e01..5cf8724 100644 --- a/tests/test_data/json/required_only.json +++ b/tests/test_data/json/required_only.json @@ -29,7 +29,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -54,7 +55,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, diff --git a/tests/test_data/json/robotframework_id_in_name_RF50.json b/tests/test_data/json/robotframework_id_in_name_RF50.json index 40777d8..7372a22 100644 --- a/tests/test_data/json/robotframework_id_in_name_RF50.json +++ b/tests/test_data/json/robotframework_id_in_name_RF50.json @@ -48,7 +48,8 @@ "content": "Set Test Message", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a" }, @@ -78,7 +79,8 @@ "content": "Fail", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b" } @@ -118,7 +120,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a" }, @@ -148,7 +151,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b" } diff --git a/tests/test_data/json/robotframework_id_in_name_RF70.json b/tests/test_data/json/robotframework_id_in_name_RF70.json index c63a36a..ba957d4 100644 --- a/tests/test_data/json/robotframework_id_in_name_RF70.json +++ b/tests/test_data/json/robotframework_id_in_name_RF70.json @@ -48,7 +48,8 @@ "content": "Set Test Message", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a" }, @@ -78,7 +79,8 @@ "content": "Fail", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b" } @@ -118,7 +120,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a" }, @@ -148,7 +151,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b" } diff --git a/tests/test_data/json/robotframework_id_in_property_RF50.json b/tests/test_data/json/robotframework_id_in_property_RF50.json index 4aa5bf6..d72161e 100644 --- a/tests/test_data/json/robotframework_id_in_property_RF50.json +++ b/tests/test_data/json/robotframework_id_in_property_RF50.json @@ -48,7 +48,8 @@ "content": "Set Test Message", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a" }, @@ -78,7 +79,8 @@ "content": "Fail", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b" } @@ -118,7 +120,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a" }, @@ -148,7 +151,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b" } diff --git a/tests/test_data/json/robotframework_id_in_property_RF70.json b/tests/test_data/json/robotframework_id_in_property_RF70.json index c3e0880..027d3d2 100644 --- a/tests/test_data/json/robotframework_id_in_property_RF70.json +++ b/tests/test_data/json/robotframework_id_in_property_RF70.json @@ -48,7 +48,8 @@ "content": "Set Test Message", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a" }, @@ -78,7 +79,8 @@ "content": "Fail", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b" } @@ -118,7 +120,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a" }, @@ -148,7 +151,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b" } diff --git a/tests/test_data/json/robotframework_simple_RF50.json b/tests/test_data/json/robotframework_simple_RF50.json index a535c00..812464f 100644 --- a/tests/test_data/json/robotframework_simple_RF50.json +++ b/tests/test_data/json/robotframework_simple_RF50.json @@ -48,7 +48,8 @@ "content": "Set Test Message", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a" }, @@ -78,7 +79,8 @@ "content": "Fail", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b" } @@ -118,7 +120,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a" }, @@ -148,7 +151,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b" } diff --git a/tests/test_data/json/robotframework_simple_RF70.json b/tests/test_data/json/robotframework_simple_RF70.json index 8815832..5b042aa 100644 --- a/tests/test_data/json/robotframework_simple_RF70.json +++ b/tests/test_data/json/robotframework_simple_RF70.json @@ -48,7 +48,8 @@ "content": "Set Test Message", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a" }, @@ -78,7 +79,8 @@ "content": "Fail", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b" } @@ -118,7 +120,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a" }, @@ -148,7 +151,8 @@ "content": "Log", "status_id": 1 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b" } diff --git a/tests/test_data/json/root.json b/tests/test_data/json/root.json index af510bf..87a5e6c 100644 --- a/tests/test_data/json/root.json +++ b/tests/test_data/json/root.json @@ -50,7 +50,8 @@ "content": "Verify content", "status_id": 5 } - ] + ], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -68,8 +69,7 @@ "estimate": null, "milestone_id": null, "refs": null, - "case_fields": { - }, + "case_fields": {}, "result": { "assignedto_id": null, "defects": null, @@ -81,7 +81,8 @@ "custom_step_results": [], "result_fields": {}, "attachments": [], - "comment": "" + "comment": "", + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, diff --git a/tests/test_data/json/root_id_in_name.json b/tests/test_data/json/root_id_in_name.json index 3684730..cd26416 100644 --- a/tests/test_data/json/root_id_in_name.json +++ b/tests/test_data/json/root_id_in_name.json @@ -35,7 +35,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -63,7 +64,8 @@ "second.file" ], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -88,7 +90,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, diff --git a/tests/test_data/json/root_id_in_property.json b/tests/test_data/json/root_id_in_property.json index 53a99c3..45b9871 100644 --- a/tests/test_data/json/root_id_in_property.json +++ b/tests/test_data/json/root_id_in_property.json @@ -35,7 +35,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -60,7 +61,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -88,7 +90,8 @@ "second.file" ], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, @@ -113,7 +116,8 @@ "version": null, "attachments": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "section_id": null, "template_id": null, diff --git a/tests/test_data/json/sauce1.json b/tests/test_data/json/sauce1.json index 6304165..e6426fd 100644 --- a/tests/test_data/json/sauce1.json +++ b/tests/test_data/json/sauce1.json @@ -31,7 +31,8 @@ "attachments": [], "junit_result_unparsed": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Verify page structure.Component 1 Verify page structure" } @@ -77,7 +78,8 @@ "attachments": [], "junit_result_unparsed": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Verify page structure.Component 2 Verify page structure" } @@ -86,4 +88,4 @@ } ], "source": "sauce.xml" -} +} \ No newline at end of file diff --git a/tests/test_data/json/sauce2.json b/tests/test_data/json/sauce2.json index 3354b01..e3b4c4b 100644 --- a/tests/test_data/json/sauce2.json +++ b/tests/test_data/json/sauce2.json @@ -31,7 +31,8 @@ "attachments": [], "junit_result_unparsed": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Verify page structure.Component 1 Verify page structure" } @@ -77,7 +78,8 @@ "attachments": [], "junit_result_unparsed": [], "result_fields": {}, - "custom_step_results": [] + "custom_step_results": [], + "custom_testrail_bdd_scenario_results": [] }, "custom_automation_id": "Verify page structure.Component 2 Verify page structure" } @@ -86,4 +88,4 @@ } ], "source": "sauce.xml" -} +} \ No newline at end of file diff --git a/tests/test_data/json/update_case_result_single_with_id.json b/tests/test_data/json/update_case_result_single_with_id.json index 764c3f9..a0f32a8 100644 --- a/tests/test_data/json/update_case_result_single_with_id.json +++ b/tests/test_data/json/update_case_result_single_with_id.json @@ -21,4 +21,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_data/json/update_case_result_single_without_id.json b/tests/test_data/json/update_case_result_single_without_id.json index eded26a..84d4988 100644 --- a/tests/test_data/json/update_case_result_single_without_id.json +++ b/tests/test_data/json/update_case_result_single_without_id.json @@ -21,4 +21,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_results_uploader.py b/tests/test_results_uploader.py index 4b71d7f..2ec26ee 100644 --- a/tests/test_results_uploader.py +++ b/tests/test_results_uploader.py @@ -41,18 +41,12 @@ def result_uploader_data_provider(self, mocker): environment._validated_user_ids = [] junit_file_parser = mocker.patch.object(JunitParser, "parse_file") - api_request_handler = mocker.patch( - "trcli.api.project_based_client.ApiRequestHandler" - ) - results_uploader = ResultsUploader( - environment=environment, suite=junit_file_parser - ) + api_request_handler = mocker.patch("trcli.api.project_based_client.ApiRequestHandler") + results_uploader = ResultsUploader(environment=environment, suite=junit_file_parser) yield environment, api_request_handler, results_uploader @pytest.mark.results_uploader - def test_project_name_missing_in_test_rail( - self, result_uploader_data_provider, mocker - ): + def test_project_name_missing_in_test_rail(self, result_uploader_data_provider, mocker): """The purpose of this test is to check that proper message will be printed and trcli will terminate with proper code when project with name provided does not exist in TestRail.""" ( @@ -67,17 +61,13 @@ def test_project_name_missing_in_test_rail( error_message=f"{environment.project} project doesn't exist.", failing=True, ) - expected_elog_calls = [ - mocker.call(f"\n{environment.project} project doesn't exist.") - ] + expected_elog_calls = [mocker.call(f"\n{environment.project} project doesn't exist.")] with pytest.raises(SystemExit) as exception: results_uploader.upload_results() environment.elog.assert_has_calls(expected_elog_calls) - assert ( - exception.type == SystemExit - ), f"Expected SystemExit exception, but got {exception.type} instead." + assert exception.type == SystemExit, f"Expected SystemExit exception, but got {exception.type} instead." assert ( exception.value.code == exit_code ), f"Expected exit code {exit_code}, but got {exception.value.code} instead." @@ -94,9 +84,7 @@ def test_project_name_missing_in_test_rail( ], ids=["Unknown error", "project name matches more than one result"], ) - def test_error_during_checking_of_project( - self, error_type, error_message, result_uploader_data_provider, mocker - ): + def test_error_during_checking_of_project(self, error_type, error_message, result_uploader_data_provider, mocker): """The purpose of this test is to check that proper message would be printed and trcli tool will terminate with proper code when errors occurs during project name check.""" ( @@ -112,20 +100,13 @@ def test_error_during_checking_of_project( failing=True, ) expected_log_calls = [ - mocker.call( - "\n" - + FAULT_MAPPING["error_checking_project"].format( - error_message=error_message - ) - ) + mocker.call("\n" + FAULT_MAPPING["error_checking_project"].format(error_message=error_message)) ] with pytest.raises(SystemExit) as exception: results_uploader.upload_results() environment.elog.assert_has_calls(expected_log_calls) - assert ( - exception.type == SystemExit - ), f"Expected SystemExit exception, but got {exception.type} instead." + assert exception.type == SystemExit, f"Expected SystemExit exception, but got {exception.type} instead." assert ( exception.value.code == exit_code ), f"Expected exit code {exit_code}, but got {exception.value.code} instead." @@ -136,9 +117,7 @@ def test_error_during_checking_of_project( TEST_UPLOAD_RESULTS_FLOW_TEST_DATA, ids=TEST_UPLOAD_RESULTS_FLOW_IDS, ) - def test_upload_results_flow( - self, failing_function, result_uploader_data_provider, mocker - ): + def test_upload_results_flow(self, failing_function, result_uploader_data_provider, mocker): """The purpose of those tests is to check that proper message would be printed and trcli tool will terminate with proper code when one of the functions in the flow fails.""" ( @@ -170,20 +149,14 @@ def test_upload_results_flow( with pytest.raises(SystemExit) as exception: results_uploader.upload_results() - assert ( - exception.type == SystemExit - ), f"Expected SystemExit exception, but got {exception.type} instead." + assert exception.type == SystemExit, f"Expected SystemExit exception, but got {exception.type} instead." assert ( exception.value.code == exit_code ), f"Expected exit code {exit_code}, but got {exception.value.code} instead." - @pytest.mark.parametrize( - "run_id", [None, 101], ids=["No run ID provided", "Run ID provided"] - ) + @pytest.mark.parametrize("run_id", [None, 101], ids=["No run ID provided", "Run ID provided"]) @pytest.mark.results_uploader - def test_upload_results_successful( - self, run_id, result_uploader_data_provider, mocker - ): + def test_upload_results_successful(self, run_id, result_uploader_data_provider, mocker): """The purpose of this test is to check if during successful run of upload_results proper messages would be printed.""" ( @@ -199,26 +172,22 @@ def test_upload_results_successful( error_message="", failing=True, ) - upload_results_inner_functions_mocker( - results_uploader=results_uploader, mocker=mocker, failing_functions=[] - ) + upload_results_inner_functions_mocker(results_uploader=results_uploader, mocker=mocker, failing_functions=[]) results_uploader.api_request_handler.check_automation_id_field.return_value = None results_uploader.api_request_handler.check_missing_test_cases_ids.return_value = ([], "") results_uploader.api_request_handler.delete_sections.return_value = ([], "") expected_log_calls = [] + # Note: Empty section removal messages are no longer expected because + # the new logic skips section/case creation when all cases have IDs if not run_id: calls = { - 2: mocker.call("Removing unnecessary empty sections that may have been created earlier. ", new_line=False), - 3: mocker.call("Removed 1 unused/empty section(s)."), - 4: mocker.call("Creating test run. ", new_line=False), - 5: mocker.call("Closing run. ", new_line=False), + 2: mocker.call("Creating test run. ", new_line=False), + 3: mocker.call("Closing run. ", new_line=False), } else: calls = { - 2: mocker.call("Removing unnecessary empty sections that may have been created earlier. ", new_line=False), - 3: mocker.call("Removed 1 unused/empty section(s)."), - 4: mocker.call("Updating test run. ", new_line=False), - 5: mocker.call("Closing run. ", new_line=False), + 2: mocker.call("Updating test run. ", new_line=False), + 3: mocker.call("Closing run. ", new_line=False), } results_uploader.upload_results() @@ -226,9 +195,7 @@ def test_upload_results_successful( assert environment.log.call_args_list[index] == call @pytest.mark.results_uploader - def test_add_missing_sections_no_missing_sections( - self, result_uploader_data_provider - ): + def test_add_missing_sections_no_missing_sections(self, result_uploader_data_provider): """The purpose of this test is to check that add_missing_sections will return empty list and proper return code when there are no missing sections.""" ( @@ -280,12 +247,8 @@ def test_add_missing_sections_prompts_user( missing_sections, "", ) - results_uploader.environment.get_prompt_response_for_auto_creation.return_value = ( - user_response - ) - results_uploader.api_request_handler.data_provider.check_section_names_duplicates.return_value = ( - False - ) + results_uploader.environment.get_prompt_response_for_auto_creation.return_value = user_response + results_uploader.api_request_handler.data_provider.check_section_names_duplicates.return_value = False results_uploader.api_request_handler.add_sections.return_value = ( expected_added_sections, expected_add_sections_error, @@ -313,15 +276,11 @@ def test_add_missing_sections_prompts_user( environment.log.assert_has_calls(expected_log_calls) environment.elog.assert_has_calls(expected_elog_calls) environment.get_prompt_response_for_auto_creation.assert_called_with( - PROMPT_MESSAGES["create_missing_sections"].format( - project_name=environment.project - ) + PROMPT_MESSAGES["create_missing_sections"].format(project_name=environment.project) ) @pytest.mark.results_uploader - def test_add_missing_sections_error_checking( - self, result_uploader_data_provider, mocker - ): + def test_add_missing_sections_error_checking(self, result_uploader_data_provider, mocker): """The purpose of this test is to check that add_missing_sections will return empty list and -1 as a result code when check_missing_section_ids will fail. Proper message will be printed.""" ( @@ -381,9 +340,7 @@ def test_add_missing_test_cases_prompts_user( missing_test_cases, expected_message, ) - results_uploader.environment.get_prompt_response_for_auto_creation.return_value = ( - user_response - ) + results_uploader.environment.get_prompt_response_for_auto_creation.return_value = user_response results_uploader.api_request_handler.add_cases.return_value = ( expected_added_test_cases, expected_add_test_cases_error, @@ -412,15 +369,11 @@ def test_add_missing_test_cases_prompts_user( environment.log.assert_has_calls(expected_log_calls) environment.elog.assert_has_calls(expected_elog_calls) environment.get_prompt_response_for_auto_creation.assert_called_with( - PROMPT_MESSAGES["create_missing_test_cases"].format( - project_name=environment.project - ) + PROMPT_MESSAGES["create_missing_test_cases"].format(project_name=environment.project) ) @pytest.mark.results_uploader - def test_add_missing_test_cases_duplicated_case_names( - self, result_uploader_data_provider, mocker - ): + def test_add_missing_test_cases_duplicated_case_names(self, result_uploader_data_provider, mocker): """The purpose of this test is to check that proper warning will be printed when duplicated case names will be detected in result file.""" @@ -433,11 +386,7 @@ def test_rollback_changes_empty_changelist(self, result_uploader_data_provider): results_uploader, ) = result_uploader_data_provider - results_uploader.project = ProjectData( - project_id=1, - suite_mode=SuiteModes.single_suite, - error_message="" - ) + results_uploader.project = ProjectData(project_id=1, suite_mode=SuiteModes.single_suite, error_message="") assert ( results_uploader.rollback_changes() == [] @@ -460,11 +409,7 @@ def test_rollback_changes_after_error( results_uploader, ) = result_uploader_data_provider - results_uploader.project = ProjectData( - project_id=1, - suite_mode=SuiteModes.multiple_suites, - error_message="" - ) + results_uploader.project = ProjectData(project_id=1, suite_mode=SuiteModes.multiple_suites, error_message="") api_request_handler_delete_mocker( results_uploader=results_uploader, @@ -493,16 +438,10 @@ def test_rollback_changes_after_error_doesnt_delete_existing_suite( results_uploader, ) = result_uploader_data_provider - results_uploader.project = ProjectData( - project_id=1, - suite_mode=SuiteModes.multiple_suites, - error_message="" - ) + results_uploader.project = ProjectData(project_id=1, suite_mode=SuiteModes.multiple_suites, error_message="") suite_id = 1234 - results_uploader.api_request_handler.suites_data_from_provider.suite_id = ( - suite_id - ) + results_uploader.api_request_handler.suites_data_from_provider.suite_id = suite_id results_uploader.api_request_handler.check_suite_id.return_value = (True, "") api_request_handler_delete_mocker( From d60f5b13c798c03db3e75b4355173a0786a95e7c Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 15 Jan 2026 15:08:15 +0800 Subject: [PATCH 13/33] TRCLI-193: Remove parse_gherkin, affected unit and functional tests, readme --- README.md | 61 ------- setup.py | 1 - tests/pytest.ini | 2 - tests/test_cmd_parse_gherkin.py | 141 --------------- tests/test_data/cli_test_data.py | 1 - tests/test_gherkin_parser.py | 179 ------------------- tests_e2e/test_end2end.py | 81 +-------- trcli/commands/cmd_parse_gherkin.py | 141 --------------- trcli/constants.py | 1 - trcli/readers/gherkin_parser.py | 268 ---------------------------- 10 files changed, 1 insertion(+), 875 deletions(-) delete mode 100644 tests/test_cmd_parse_gherkin.py delete mode 100644 tests/test_gherkin_parser.py delete mode 100644 trcli/commands/cmd_parse_gherkin.py delete mode 100644 trcli/readers/gherkin_parser.py diff --git a/README.md b/README.md index 00bf828..9fb7bea 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,6 @@ Supported and loaded modules: - parse_cucumber: Cucumber JSON results (BDD) - import_gherkin: Upload .feature files to TestRail BDD - export_gherkin: Export BDD test cases as .feature files - - parse_gherkin: Parse Gherkin .feature file locally - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run @@ -92,7 +91,6 @@ Commands: import_gherkin Upload Gherkin .feature file to TestRail labels Manage labels in TestRail parse_cucumber Parse Cucumber JSON results and upload to TestRail - parse_gherkin Parse Gherkin .feature file locally parse_junit Parse JUnit report and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail @@ -343,7 +341,6 @@ The TestRail CLI provides four commands for complete BDD workflow management: | `import_gherkin` | Import .feature files to create test cases | Create BDD test cases in TestRail from existing .feature files | | `export_gherkin` | Export test cases as .feature files | Extract test cases from TestRail for automation | | `parse_cucumber` | Parse Cucumber JSON and upload results | Upload test results from Cucumber/Behave/pytest-bdd execution | -| `parse_gherkin` | Parse .feature files locally (no upload) | Validate syntax, convert to JSON, preview TestRail structure | ### Uploading Cucumber/BDD Test Results @@ -688,64 +685,6 @@ Retrieving BDD test case 123... - Generate documentation from test cases - Migrate test cases between projects -### Parsing Gherkin Feature Files Locally - -The `parse_gherkin` command parses Gherkin .feature files locally and converts them into TestRail data structure format without uploading to TestRail. This is useful for validation, conversion, or integration with custom workflows. - -#### Reference -```shell -$ trcli parse_gherkin --help -Usage: trcli parse_gherkin [OPTIONS] - - Parse Gherkin .feature file locally - - This command parses Gherkin/BDD .feature files and converts them into - TestRail data structure format without uploading to TestRail. - -Options: - -f, --file Path to Gherkin .feature file to parse [required] - --output Optional output file path to save parsed JSON - --pretty Pretty print JSON output with indentation - --help Show this message and exit. -``` - -#### Usage Examples -```shell -# Parse a feature file and output to console -$ trcli parse_gherkin -f features/login.feature - -# Parse and save to JSON file with pretty formatting -$ trcli parse_gherkin -f features/login.feature \ - --output parsed-output.json \ - --pretty - -# Parse multiple feature files -$ trcli parse_gherkin -f features/checkout.feature \ - --output checkout.json \ - --pretty -``` - -**Use cases:** -- Validate Gherkin syntax locally before uploading -- Convert .feature files to TestRail JSON format -- Preview how features will be structured in TestRail -- Integrate with custom automation workflows -- Debug feature file parsing issues - -### BDD Mapping to TestRail - -When using parse_cucumber with `--upload-feature`, the following mapping rules apply: - -| Gherkin Element | TestRail Field | Description | -|----------------|----------------|-------------| -| `Feature:` name + description | Test Case title + Preconditions | Feature metadata becomes test case info | -| `Background:` | BDD Scenario field | Shared setup steps | -| `Scenario:` / `Scenario Outline:` | BDD Scenario field | Individual test scenarios | -| `Given`/`When`/`Then`/`And`/`But` | BDD Scenario field | Test steps with keywords | -| `Examples:` table | BDD Scenario field | Data table for scenario outlines | -| `@tags` | References/BDD fields | Tags become references (e.g., @JIRA-123) | -| `@C` tags | Case ID | Map to existing test cases (e.g., @C456) | - ### Exploring other features #### General features diff --git a/setup.py b/setup.py index 7b98ee0..10c9653 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,6 @@ "openapi-spec-validator>=0.5.0,<1.0.0", "beartype>=0.17.0,<1.0.0", "prance", # Does not use semantic versioning - "gherkin-official>=27.0.0,<28.0.0", # Gherkin/BDD feature file parser ], entry_points=""" [console_scripts] diff --git a/tests/pytest.ini b/tests/pytest.ini index 1115013..66ad3a0 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -14,8 +14,6 @@ markers = proxy: test for proxy feature cmd_import_gherkin: tests for import_gherkin command cmd_export_gherkin: tests for export_gherkin command - cmd_parse_gherkin: tests for parse_gherkin command cmd_parse_cucumber: tests for parse_cucumber command - parse_gherkin: tests for gherkin parser parse_cucumber: tests for cucumber parser cucumber_bdd_matching: tests for cucumber bdd matching diff --git a/tests/test_cmd_parse_gherkin.py b/tests/test_cmd_parse_gherkin.py deleted file mode 100644 index 1d6624e..0000000 --- a/tests/test_cmd_parse_gherkin.py +++ /dev/null @@ -1,141 +0,0 @@ -import pytest -import json -from unittest import mock -from unittest.mock import MagicMock, patch, mock_open -from click.testing import CliRunner -from pathlib import Path - -from trcli.cli import Environment -from trcli.commands import cmd_parse_gherkin -from trcli.readers.gherkin_parser import GherkinParser - - -class TestCmdParseGherkin: - """Test class for parse_gherkin command functionality""" - - def setup_method(self): - """Set up test environment and runner""" - self.runner = CliRunner() - self.test_feature_path = str(Path(__file__).parent / "test_data" / "FEATURE" / "sample_login.feature") - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_success_stdout(self): - """Test successful parsing with output to stdout""" - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path]) - - assert result.exit_code == 0 - # Output contains logging messages + JSON, extract JSON (starts with '{') - json_start = result.output.find("{") - assert json_start >= 0, "No JSON found in output" - json_str = result.output[json_start:] - output_data = json.loads(json_str) - assert "suites" in output_data - assert "summary" in output_data - assert output_data["summary"]["total_suites"] >= 1 - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_success_with_output_file(self): - """Test successful parsing with output to file""" - with self.runner.isolated_filesystem(): - output_file = "parsed_output.json" - result = self.runner.invoke( - cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--output", output_file] - ) - - assert result.exit_code == 0 - assert "parsed results saved to" in result.output.lower() - - # Verify file was created - with open(output_file, "r") as f: - output_data = json.load(f) - assert "suites" in output_data - assert "summary" in output_data - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_pretty_print(self): - """Test parsing with pretty print formatting""" - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--pretty"]) - - assert result.exit_code == 0 - # Extract JSON from output - json_start = result.output.find("{") - json_str = result.output[json_start:] - output_data = json.loads(json_str) - assert "suites" in output_data - # Check that JSON portion contains newlines and indentation (pretty format) - assert "\n" in json_str - assert " " in json_str # Indentation - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_missing_file(self): - """Test parsing with non-existent file""" - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", "/nonexistent/file.feature"]) - - # Click returns exit code 2 for invalid parameter (file doesn't exist) - assert result.exit_code in [1, 2] # Either our error handling or Click's - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_invalid_feature_file(self): - """Test parsing with invalid Gherkin syntax""" - with self.runner.isolated_filesystem(): - # Create invalid feature file - invalid_file = "invalid.feature" - with open(invalid_file, "w") as f: - f.write("This is not valid Gherkin syntax at all!!!") - - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", invalid_file]) - - assert result.exit_code == 1 - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_required_file_parameter(self): - """Test that --file parameter is required""" - result = self.runner.invoke(cmd_parse_gherkin.cli, []) - - assert result.exit_code == 2 # Click returns 2 for missing required params - assert "Missing option" in result.output or "required" in result.output.lower() - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_output_structure(self): - """Test that output has correct structure""" - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path]) - - assert result.exit_code == 0 - json_start = result.output.find("{") - output_data = json.loads(result.output[json_start:]) - - # Verify top-level structure - assert "suites" in output_data - assert "summary" in output_data - - # Verify summary structure - summary = output_data["summary"] - assert "total_suites" in summary - assert "total_sections" in summary - assert "total_cases" in summary - assert "source_file" in summary - - # Verify suites structure - if output_data["suites"]: - suite = output_data["suites"][0] - assert "name" in suite - assert "source" in suite - assert "testsections" in suite - - if suite["testsections"]: - section = suite["testsections"][0] - assert "name" in section - assert "testcases" in section - - @pytest.mark.cmd_parse_gherkin - def test_parse_gherkin_empty_file(self): - """Test parsing with empty feature file""" - with self.runner.isolated_filesystem(): - empty_file = "empty.feature" - with open(empty_file, "w") as f: - f.write("") - - result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", empty_file]) - - # Should fail with parsing error - assert result.exit_code == 1 diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index c282137..cf6f59b 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -68,7 +68,6 @@ " - parse_cucumber: Cucumber JSON results (BDD)\n" " - import_gherkin: Upload .feature files to TestRail BDD\n" " - export_gherkin: Export BDD test cases as .feature files\n" - " - parse_gherkin: Parse Gherkin .feature file locally\n" " - parse_robot: Robot Framework XML Files\n" " - parse_openapi: OpenAPI YML Files\n" " - add_run: Create a new test run\n" diff --git a/tests/test_gherkin_parser.py b/tests/test_gherkin_parser.py deleted file mode 100644 index 496a4bc..0000000 --- a/tests/test_gherkin_parser.py +++ /dev/null @@ -1,179 +0,0 @@ -import pytest -from pathlib import Path -from trcli.cli import Environment -from trcli.data_classes.data_parsers import MatchersParser -from trcli.readers.gherkin_parser import GherkinParser - - -class TestGherkinParser: - """Tests for Gherkin .feature file parser""" - - @pytest.fixture - def sample_feature_path(self): - """Path to the sample login feature file""" - return Path(__file__).parent / "test_data" / "FEATURE" / "sample_login.feature" - - @pytest.fixture - def environment(self, sample_feature_path): - """Create a test environment""" - env = Environment() - env.file = str(sample_feature_path) - env.case_matcher = MatchersParser.AUTO - env.suite_name = None - env.verbose = False - return env - - @pytest.mark.parse_gherkin - def test_gherkin_parser_sample_file(self, environment, sample_feature_path): - """Test parsing of sample_login.feature""" - # Ensure file exists - assert sample_feature_path.exists(), f"Sample file not found: {sample_feature_path}" - - # Create parser and parse - parser = GherkinParser(environment) - suites = parser.parse_file() - - # Verify structure - assert suites is not None - assert len(suites) == 1, "Should parse into exactly one suite" - - suite = suites[0] - assert suite.name == "User Login" - assert suite.source == "sample_login.feature" - - # Check sections - assert len(suite.testsections) == 1 - section = suite.testsections[0] - assert section.name == "User Login" - - # Check background stored as property - assert section.properties is not None - assert len(section.properties) > 0 - background_prop = section.properties[0] - assert background_prop.name == "background" - assert "the application is running" in background_prop.value - - # Check test cases (should have expanded scenario outline) - # Expected: 2 regular scenarios + 4 scenario outline examples = 6 total - assert len(section.testcases) >= 2, "Should have at least 2 test cases" - - # Verify first test case structure - first_case = section.testcases[0] - assert first_case.title is not None - assert first_case.custom_automation_id is not None - assert first_case.result is not None - assert len(first_case.result.custom_step_results) > 0 - - @pytest.mark.parse_gherkin - def test_gherkin_parser_scenario_parsing(self, environment, sample_feature_path): - """Test that scenarios are correctly parsed with steps""" - parser = GherkinParser(environment) - suites = parser.parse_file() - - suite = suites[0] - section = suite.testsections[0] - test_cases = section.testcases - - # Find the "Successful login" scenario - successful_login_case = None - for case in test_cases: - if "Successful login" in case.title: - successful_login_case = case - break - - assert successful_login_case is not None, "Should find 'Successful login' test case" - - # Verify steps - steps = successful_login_case.result.custom_step_results - assert len(steps) == 6, "Successful login scenario should have 6 steps" - - # Check first step - first_step = steps[0] - assert "Given" in first_step.content - assert "valid username" in first_step.content - - @pytest.mark.parse_gherkin - def test_gherkin_parser_tags_in_automation_id(self, environment, sample_feature_path): - """Test that tags are included in automation ID""" - parser = GherkinParser(environment) - suites = parser.parse_file() - - suite = suites[0] - section = suite.testsections[0] - test_cases = section.testcases - - # Find a case with tags - tagged_case = None - for case in test_cases: - if "@smoke" in case.custom_automation_id or "@authentication" in case.custom_automation_id: - tagged_case = case - break - - assert tagged_case is not None, "Should find a test case with tags in automation_id" - assert "@" in tagged_case.custom_automation_id, "Automation ID should contain tags" - - @pytest.mark.parse_gherkin - def test_gherkin_parser_scenario_outline_expansion(self, environment, sample_feature_path): - """Test that Scenario Outlines are expanded into multiple test cases""" - parser = GherkinParser(environment) - suites = parser.parse_file() - - suite = suites[0] - section = suite.testsections[0] - test_cases = section.testcases - - # Find scenario outline examples - outline_examples = [case for case in test_cases if "Example" in case.title] - - assert len(outline_examples) >= 4, "Should have at least 4 example cases from Scenario Outline" - - # Verify example case has parameters - example_case = outline_examples[0] - assert "example_params" in example_case.case_fields - assert example_case.result is not None - - @pytest.mark.parse_gherkin - def test_gherkin_parser_with_custom_suite_name(self, environment, sample_feature_path): - """Test parser with custom suite name""" - environment.suite_name = "Custom Suite Name" - - parser = GherkinParser(environment) - suites = parser.parse_file() - - assert suites[0].name == "Custom Suite Name" - - @pytest.mark.parse_gherkin - def test_gherkin_parser_case_matcher_name(self, environment, sample_feature_path): - """Test parser with NAME case matcher""" - environment.case_matcher = MatchersParser.NAME - - parser = GherkinParser(environment) - suites = parser.parse_file() - - # Should parse without errors - assert suites is not None - assert len(suites) == 1 - - @pytest.mark.parse_gherkin - def test_gherkin_parser_missing_file(self): - """Test parser with non-existent file""" - env = Environment() - env.file = "nonexistent.feature" - env.case_matcher = MatchersParser.AUTO - - with pytest.raises(FileNotFoundError): - parser = GherkinParser(env) - - @pytest.mark.parse_gherkin - def test_gherkin_parser_all_steps_untested(self, environment, sample_feature_path): - """Test that all steps are marked as untested by default""" - parser = GherkinParser(environment) - suites = parser.parse_file() - - suite = suites[0] - section = suite.testsections[0] - - for test_case in section.testcases: - assert test_case.result.status_id == 3, "Result status should be 3 (Untested)" - for step in test_case.result.custom_step_results: - assert step.status_id == 3, "All steps should be untested (status_id=3)" diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 6cb60a8..9dd9424 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1951,62 +1951,6 @@ def test_assign_failures_with_existing_run(self): # ==================== BDD/GHERKIN FEATURE TESTS ==================== - def test_parse_gherkin_local_parsing(self): - """Test parse_gherkin command for local .feature file parsing (no TestRail upload)""" - output = _run_cmd( - f""" -trcli parse_gherkin \\ - -f "reports_gherkin/sample_login.feature" - """ - ) - _assert_contains( - output, - [ - "Parsing Gherkin feature file:", - "sample_login.feature", - '"suites"', - '"summary"', - '"total_suites"', - '"total_cases"', - ], - ) - - def test_parse_gherkin_with_output_file(self): - """Test parse_gherkin command with output file option""" - output = _run_cmd( - f""" -trcli parse_gherkin \\ - -f "reports_gherkin/sample_login.feature" \\ - --output "parsed_gherkin.json" - """ - ) - _assert_contains( - output, - ["Parsing Gherkin feature file:", "sample_login.feature", "Parsed results saved to", "parsed_gherkin.json"], - ) - - def test_parse_gherkin_pretty_format(self): - """Test parse_gherkin command with pretty print formatting""" - output = _run_cmd( - f""" -trcli parse_gherkin \\ - -f "reports_gherkin/sample_login.feature" \\ - --pretty - """ - ) - _assert_contains(output, ["Parsing Gherkin feature file:", "sample_login.feature", '"suites"', '"summary"']) - - def test_parse_gherkin_custom_suite_name(self): - """Test parse_gherkin command with custom suite name""" - output = _run_cmd( - f""" -trcli parse_gherkin \\ - -f "reports_gherkin/sample_login.feature" \\ - --suite-name "Custom BDD Suite" - """ - ) - _assert_contains(output, ["Parsing Gherkin feature file:", '"name": "Custom BDD Suite"']) - def test_import_gherkin_upload_feature(self): """Test import_gherkin command to upload .feature file to TestRail""" output = _run_cmd( @@ -2270,21 +2214,7 @@ def test_bdd_help_commands(self): # Test main CLI help shows BDD commands main_help_output = _run_cmd("trcli --help") - _assert_contains(main_help_output, ["parse_gherkin", "import_gherkin", "export_gherkin", "parse_cucumber"]) - - # Test parse_gherkin help - parse_gherkin_help = _run_cmd("trcli parse_gherkin --help") - _assert_contains( - parse_gherkin_help, - [ - "Parse Gherkin .feature file locally", - "-f, --file", - "--output", - "--pretty", - "--suite-name", - "--case-matcher", - ], - ) + _assert_contains(main_help_output, ["import_gherkin", "export_gherkin", "parse_cucumber"]) # Test import_gherkin help import_gherkin_help = _run_cmd("trcli import_gherkin --help") @@ -2321,15 +2251,6 @@ def test_bdd_help_commands(self): def test_bdd_error_handling_invalid_file(self): """Test BDD commands with invalid file paths""" - # Test parse_gherkin with non-existent file - invalid_parse_output, return_code = _run_cmd_allow_failure( - """ -trcli parse_gherkin \\ - -f "nonexistent.feature" - """ - ) - assert return_code != 0 - # Test import_gherkin with non-existent file invalid_import_output, return_code = _run_cmd_allow_failure( f""" diff --git a/trcli/commands/cmd_parse_gherkin.py b/trcli/commands/cmd_parse_gherkin.py deleted file mode 100644 index c9af7f0..0000000 --- a/trcli/commands/cmd_parse_gherkin.py +++ /dev/null @@ -1,141 +0,0 @@ -import json -import click - -from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS -from trcli.constants import FAULT_MAPPING -from trcli.readers.gherkin_parser import GherkinParser -from serde import to_dict - - -@click.command(context_settings=CONTEXT_SETTINGS) -@click.option( - "-f", - "--file", - type=click.Path(exists=True), - metavar="", - required=True, - help="Path to Gherkin .feature file to parse.", -) -@click.option("--output", type=click.Path(), metavar="", help="Optional output file path to save parsed JSON.") -@click.option("--pretty", is_flag=True, help="Pretty print JSON output with indentation.") -@click.pass_context -@pass_environment -def cli(environment: Environment, context: click.Context, file: str, output: str, pretty: bool): - """Parse Gherkin .feature file locally - - This command parses Gherkin/BDD .feature files and converts them into - TestRail data structure format without uploading to TestRail. - - """ - environment.cmd = "parse_gherkin" - environment.file = file - - try: - # Parse the feature file - parser = GherkinParser(environment) - parsed_suites = parser.parse_file() - - # Convert to dictionary format (manual serialization to include skipped fields) - suites_data = [] - for suite in parsed_suites: - # Manually serialize the suite to include testsections - sections_data = [] - for section in suite.testsections: - # Manually serialize test cases - cases_data = [] - for case in section.testcases: - case_dict = { - "title": case.title, - "case_id": case.case_id, - "custom_automation_id": case.custom_automation_id, - "case_fields": case.case_fields, - } - # Include result if present - if case.result: - result_data = { - "status_id": case.result.status_id, - "comment": case.result.comment, - "elapsed": case.result.elapsed, - } - # Include steps - if case.result.custom_step_results: - steps_data = [] - for step in case.result.custom_step_results: - steps_data.append( - { - "content": step.content, - "status_id": step.status_id if hasattr(step, "status_id") else None, - } - ) - result_data["custom_step_results"] = steps_data - case_dict["result"] = result_data - cases_data.append(case_dict) - - # Serialize properties - properties_data = [] - if section.properties: - for prop in section.properties: - properties_data.append( - { - "name": prop.name, - "value": prop.value, - } - ) - - section_dict = { - "name": section.name, - "testcases": cases_data, - "properties": properties_data, - } - sections_data.append(section_dict) - - suite_dict = { - "name": suite.name, - "source": suite.source, - "testsections": sections_data, - } - suites_data.append(suite_dict) - - # Prepare JSON output - output_data = { - "suites": suites_data, - "summary": { - "total_suites": len(suites_data), - "total_sections": sum(len(suite.get("testsections", [])) for suite in suites_data), - "total_cases": sum( - len(section.get("testcases", [])) - for suite in suites_data - for section in suite.get("testsections", []) - ), - "source_file": file, - }, - } - - # Format JSON - if pretty: - json_output = json.dumps(output_data, indent=2, ensure_ascii=False) - else: - json_output = json.dumps(output_data, ensure_ascii=False) - - # Output results - if output: - # Save to file - with open(output, "w", encoding="utf-8") as f: - f.write(json_output) - environment.log(f"✓ Parsed results saved to: {output}") - environment.log(f" Total suites: {output_data['summary']['total_suites']}") - environment.log(f" Total sections: {output_data['summary']['total_sections']}") - environment.log(f" Total test cases: {output_data['summary']['total_cases']}") - else: - # Print to stdout - print(json_output) - - except FileNotFoundError: - environment.elog(FAULT_MAPPING["missing_file"]) - exit(1) - except ValueError as e: - environment.elog(f"Error parsing Gherkin file: {str(e)}") - exit(1) - except Exception as e: - environment.elog(f"Unexpected error during parsing: {str(e)}") - exit(1) diff --git a/trcli/constants.py b/trcli/constants.py index 26f479b..dc5f5f9 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -101,7 +101,6 @@ - parse_cucumber: Cucumber JSON results (BDD) - import_gherkin: Upload .feature files to TestRail BDD - export_gherkin: Export BDD test cases as .feature files - - parse_gherkin: Parse Gherkin .feature file locally - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run diff --git a/trcli/readers/gherkin_parser.py b/trcli/readers/gherkin_parser.py deleted file mode 100644 index fe980b4..0000000 --- a/trcli/readers/gherkin_parser.py +++ /dev/null @@ -1,268 +0,0 @@ -from pathlib import Path -from beartype.typing import List, Dict, Any, Optional -from gherkin.parser import Parser -from gherkin.token_scanner import TokenScanner - -from trcli.cli import Environment -from trcli.data_classes.data_parsers import MatchersParser, TestRailCaseFieldsOptimizer -from trcli.data_classes.dataclass_testrail import ( - TestRailCase, - TestRailSuite, - TestRailSection, - TestRailProperty, - TestRailResult, - TestRailSeparatedStep, -) -from trcli.readers.file_parser import FileParser - - -class GherkinParser(FileParser): - """Parser for Gherkin .feature files""" - - def __init__(self, environment: Environment): - super().__init__(environment) - self.case_matcher = environment.case_matcher - - def parse_file(self) -> List[TestRailSuite]: - """Parse a Gherkin .feature file and convert to TestRailSuite structure""" - self.env.log(f"Parsing Gherkin feature file: {self.filename}") - - # Read and parse the feature file - with open(self.filepath, "r", encoding="utf-8") as f: - feature_text = f.read() - - parser = Parser() - scanner = TokenScanner(feature_text) - gherkin_document = parser.parse(scanner) - - # Extract feature - feature = gherkin_document.get("feature") - if not feature: - raise ValueError("No feature found in the Gherkin file") - - # Parse feature into TestRail structure - suite_name = self.env.suite_name if self.env.suite_name else feature.get("name", self.filepath.stem) - sections = self._parse_feature_children(feature) - - cases_count = sum(len(section.testcases) for section in sections) - self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") - - testrail_suite = TestRailSuite( - name=suite_name, - testsections=sections, - source=self.filename, - ) - - return [testrail_suite] - - def _parse_feature_children(self, feature: Dict[str, Any]) -> List[TestRailSection]: - """Parse feature children (Background, Scenarios, Scenario Outlines) into sections""" - sections = [] - background_steps = None - - # First pass: extract background if present - for child in feature.get("children", []): - if "background" in child: - background_steps = self._extract_steps(child["background"]) - break - - # Group scenarios into a single section (using feature name) - feature_name = feature.get("name", "Feature") - section = TestRailSection(name=feature_name, testcases=[]) - - # Store background as section property if exists - if background_steps: - background_text = "\n".join([f"{step['keyword']}{step['text']}" for step in background_steps]) - section.properties = [TestRailProperty(name="background", value=background_text)] - - # Second pass: process scenarios - for child in feature.get("children", []): - if "scenario" in child: - scenario = child["scenario"] - # Check if it's a Scenario Outline - if scenario.get("keyword") == "Scenario Outline": - # Expand scenario outline into multiple test cases - test_cases = self._parse_scenario_outline(scenario, feature_name) - section.testcases.extend(test_cases) - else: - # Regular scenario - test_case = self._parse_scenario(scenario, feature_name) - if test_case: - section.testcases.append(test_case) - - if section.testcases: - sections.append(section) - - return sections - - def _parse_scenario(self, scenario: Dict[str, Any], feature_name: str) -> Optional[TestRailCase]: - """Parse a single Gherkin scenario into a TestRailCase""" - scenario_name = scenario.get("name", "Untitled Scenario") - tags = self._extract_tags(scenario) - steps = self._extract_steps(scenario) - - # Extract case ID if using name or property matcher - case_id = None - if self.case_matcher == MatchersParser.NAME: - case_id, scenario_name = MatchersParser.parse_name_with_id(scenario_name) - elif self.case_matcher == MatchersParser.PROPERTY: - # Look for @C tag pattern - for tag in tags: - if tag.startswith("@C") or tag.startswith("@c"): - try: - case_id = int(tag[2:]) - break - except ValueError: - pass - - # Create automation ID from feature, tags, and scenario name - # Format: "feature_name.@tag1.@tag2.scenario_name" - tag_part = ".".join(tags) if tags else "" - automation_id = f"{feature_name}.{tag_part}.{scenario_name}" if tag_part else f"{feature_name}.{scenario_name}" - - # Convert Gherkin steps to TestRail separated steps - step_results = [] - for step in steps: - step_content = f"{step['keyword']}{step['text']}" - tr_step = TestRailSeparatedStep(content=step_content) - tr_step.status_id = 3 # Untested by default - step_results.append(tr_step) - - # Create result object - result = TestRailResult( - case_id=case_id, - status_id=3, # Untested (no execution results yet) - comment=f"Gherkin scenario with {len(steps)} steps", - custom_step_results=step_results, - ) - - # Create test case - test_case = TestRailCase( - title=TestRailCaseFieldsOptimizer.extract_last_words( - scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH - ), - case_id=case_id, - result=result, - custom_automation_id=automation_id, - case_fields={"tags": ", ".join(tags)} if tags else {}, - ) - - return test_case - - def _parse_scenario_outline(self, scenario_outline: Dict[str, Any], feature_name: str) -> List[TestRailCase]: - """Parse a Scenario Outline into multiple TestRailCases (one per example row)""" - test_cases = [] - outline_name = scenario_outline.get("name", "Untitled Outline") - tags = self._extract_tags(scenario_outline) - steps = self._extract_steps(scenario_outline) - examples = scenario_outline.get("examples", []) - - if not examples: - # No examples, treat as regular scenario - test_case = self._parse_scenario(scenario_outline, feature_name) - if test_case: - return [test_case] - - # Process each example table - for example_table in examples: - table_header = example_table.get("tableHeader", {}) - table_body = example_table.get("tableBody", []) - - # Get column names from header - header_cells = table_header.get("cells", []) - column_names = [cell.get("value", "") for cell in header_cells] - - # Create a test case for each row - for row_idx, row in enumerate(table_body, start=1): - row_cells = row.get("cells", []) - row_values = [cell.get("value", "") for cell in row_cells] - - # Create parameter mapping - params = dict(zip(column_names, row_values)) - - # Replace placeholders in scenario name - scenario_name = self._replace_placeholders(outline_name, params) - scenario_name = f"{outline_name} [Example {row_idx}]" - - # Replace placeholders in steps - instantiated_steps = [] - for step in steps: - step_text = self._replace_placeholders(step["text"], params) - instantiated_steps.append( - {"keyword": step["keyword"], "text": step_text, "keywordType": step.get("keywordType")} - ) - - # Create automation ID - tag_part = ".".join(tags) if tags else "" - automation_id = ( - f"{feature_name}.{tag_part}.{outline_name}.example_{row_idx}" - if tag_part - else f"{feature_name}.{outline_name}.example_{row_idx}" - ) - - # Convert steps to TestRail format - step_results = [] - for step in instantiated_steps: - step_content = f"{step['keyword']}{step['text']}" - tr_step = TestRailSeparatedStep(content=step_content) - tr_step.status_id = 3 # Untested - step_results.append(tr_step) - - # Create result - result = TestRailResult( - case_id=None, - status_id=3, - comment=f"Scenario Outline example {row_idx}: {params}", - custom_step_results=step_results, - ) - - # Create test case - test_case = TestRailCase( - title=TestRailCaseFieldsOptimizer.extract_last_words( - scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH - ), - case_id=None, - result=result, - custom_automation_id=automation_id, - case_fields=( - {"tags": ", ".join(tags), "example_params": str(params)} - if tags - else {"example_params": str(params)} - ), - ) - - test_cases.append(test_case) - - return test_cases - - @staticmethod - def _extract_tags(scenario: Dict[str, Any]) -> List[str]: - """Extract tags from a scenario""" - tags = [] - for tag in scenario.get("tags", []): - tag_name = tag.get("name", "") - if tag_name: - tags.append(tag_name) - return tags - - @staticmethod - def _extract_steps(scenario_or_background: Dict[str, Any]) -> List[Dict[str, Any]]: - """Extract steps from a scenario or background""" - steps = [] - for step in scenario_or_background.get("steps", []): - steps.append( - { - "keyword": step.get("keyword", ""), - "text": step.get("text", ""), - "keywordType": step.get("keywordType", ""), - } - ) - return steps - - @staticmethod - def _replace_placeholders(text: str, params: Dict[str, str]) -> str: - """Replace with actual values from params""" - result = text - for key, value in params.items(): - result = result.replace(f"<{key}>", value) - return result From f0fc65e96a33331f87ffa0847a8073fcdc2f4943 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 15 Jan 2026 16:10:22 +0800 Subject: [PATCH 14/33] TRCLI-193: Added new flag option --update for import_gherkin, also updated readme and tests --- README.md | 34 +++++-- tests/test_cmd_import_gherkin.py | 138 +++++++++++++++++++++++++++ trcli/api/api_request_handler.py | 40 ++++++++ trcli/commands/cmd_import_gherkin.py | 80 ++++++++++++---- 4 files changed, 268 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 9fb7bea..ff95a01 100644 --- a/README.md +++ b/README.md @@ -573,34 +573,52 @@ The `import_gherkin` command allows you to upload BDD test cases in TestRail fro $ trcli import_gherkin --help Usage: trcli import_gherkin [OPTIONS] - Import Gherkin .feature file to create BDD test cases in TestRail + Upload or update Gherkin .feature file in TestRail Options: -f, --file Path to .feature file to import [required] - --section-id Section ID where test cases will be created [x>=1] [required] + --section-id Section ID where test cases will be created (required for create mode) [x>=1] + --case-id Case ID to update (required with --update flag) [x>=1] + --json-output Output case IDs in JSON format + --update Update existing BDD test case instead of creating new one -v, --verbose Enable verbose logging output --help Show this message and exit. ``` -#### Usage Example +#### Usage Examples ```shell -# Import a single feature file +# Create new test case (requires --section-id) $ trcli import_gherkin -f features/login.feature \ --project "Your Project" \ --section-id 456 \ -y -# Import with custom project settings +# Update existing test case (requires --case-id) +$ trcli import_gherkin -f features/login.feature \ + --project "Your Project" \ + --case-id 789 \ + --update \ + -y + +# Create with custom project settings $ trcli import_gherkin -f features/checkout.feature \ --project-id 10 \ - --section-id 789 \ + --section-id 123 \ -v -y ``` **How it works:** + +**Create mode (default):** +1. Reads the .feature file +2. Uploads to TestRail via `add_bdd/{section_id}` endpoint +3. TestRail creates new test case(s) with complete Gherkin content +4. Returns created case ID(s) + +**Update mode (--update):** 1. Reads the .feature file -2. Uploads to TestRail via `add_bdd` endpoint -3. TestRail creates test case(s) with complete Gherkin content +2. Uploads to TestRail via `update_bdd/{case_id}` endpoint +3. TestRail updates existing test case with new Gherkin content 4. Returns created case ID(s) **Example .feature file:** diff --git a/tests/test_cmd_import_gherkin.py b/tests/test_cmd_import_gherkin.py index 16c95cc..6b290d7 100644 --- a/tests/test_cmd_import_gherkin.py +++ b/tests/test_cmd_import_gherkin.py @@ -256,3 +256,141 @@ def test_import_gherkin_unicode_content(self, mock_api_client_class, mock_api_ha ) assert result.exit_code == 0 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_update_mode(self, mock_api_client_class, mock_api_handler_class): + """Test feature file update with --update flag""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.update_bdd.return_value = ([456], "") # Success: case ID 456, no error + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Updated scenario\n Given updated step\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--case-id", "456", "--update"], + obj=self.environment, + ) + + assert result.exit_code == 0 + assert "successfully updated" in result.output.lower() + assert "456" in result.output + # Verify update_bdd was called with case_id, not add_bdd + mock_handler.update_bdd.assert_called_once_with(456, mock.ANY) + mock_handler.add_bdd.assert_not_called() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_update_mode_json_output(self, mock_api_client_class, mock_api_handler_class): + """Test feature file update with --update and JSON output""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.update_bdd.return_value = ([789], "") + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--case-id", "789", "--update", "--json-output"], + obj=self.environment, + ) + + assert result.exit_code == 0 + # Extract JSON from output + json_start = result.output.find("{") + assert json_start >= 0, "No JSON found in output" + json_str = result.output[json_start:] + import json + + output_data = json.loads(json_str) + assert "case_ids" in output_data + assert output_data["case_ids"] == [789] + # Verify update_bdd was called with case_id + mock_handler.update_bdd.assert_called_once_with(789, mock.ANY) + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_update_mode_api_error(self, mock_api_client_class, mock_api_handler_class): + """Test update mode with API error""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with error + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.update_bdd.return_value = ([], "TestRail API error: Case not found") + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--case-id", "999", "--update"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "error" in result.output.lower() + assert "updating" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_update_mode_verbose(self, mock_api_client_class, mock_api_handler_class): + """Test update mode with verbose logging shows correct endpoint""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.update_bdd.return_value = ([456], "") + + # Enable verbose mode + self.environment.verbose = True + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--case-id", "456", "--update"], + obj=self.environment, + ) + + assert result.exit_code == 0 + # Verify verbose output shows update_bdd endpoint + assert "update_bdd" in result.output + assert "456" in result.output # case_id in verbose log + # Verify update_bdd was called with case_id + mock_handler.update_bdd.assert_called_once_with(456, mock.ANY) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index e719172..e32f790 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1802,6 +1802,46 @@ def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str error_msg = response.error_message or f"Failed to upload feature file (HTTP {response.status_code})" return [], error_msg + def update_bdd(self, case_id: int, feature_content: str) -> Tuple[List[int], str]: + """ + Update existing BDD test case with .feature file content + + Updates TestRail BDD test case from Gherkin .feature content. + The Gherkin content is sent in the request body as plain text. + + Args: + case_id: TestRail test case ID to update + feature_content: Raw .feature file content (Gherkin syntax) + + Returns: + Tuple of (case_ids, error_message) + - case_ids: List containing the updated test case ID + - error_message: Empty string on success, error details on failure + + API Endpoint: POST /api/v2/update_bdd/{case_id} + Request Body: Raw Gherkin text (multipart/form-data) + Response: Standard TestRail test case JSON with BDD custom fields + """ + # Send Gherkin content as file upload (multipart/form-data) + # TestRail expects the .feature file as an attachment + self.environment.vlog(f"Updating .feature file via update_bdd/{case_id}") + files = {"attachment": ("feature.feature", feature_content, "text/plain")} + response = self.client.send_post(f"update_bdd/{case_id}", payload=None, files=files) + + if response.status_code == 200: + # Response is a test case object with 'id' field + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + return [case_id], "" + else: + return [], "Response missing 'id' field" + else: + return [], "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to update feature file (HTTP {response.status_code})" + return [], error_msg + def get_bdd(self, case_id: int) -> Tuple[str, str]: """ Retrieve BDD test case as .feature file content diff --git a/trcli/commands/cmd_import_gherkin.py b/trcli/commands/cmd_import_gherkin.py index 95bddf8..d47928a 100644 --- a/trcli/commands/cmd_import_gherkin.py +++ b/trcli/commands/cmd_import_gherkin.py @@ -22,19 +22,31 @@ "--section-id", type=click.IntRange(min=1), metavar="", - required=True, - help="TestRail section ID where test cases will be created.", + required=False, + help="TestRail section ID where test cases will be created (required for create mode).", +) +@click.option( + "--case-id", + type=click.IntRange(min=1), + metavar="", + required=False, + help="TestRail case ID to update (required with --update flag).", ) @click.option("--json-output", is_flag=True, help="Output case IDs in JSON format.") +@click.option("--update", is_flag=True, help="Update existing BDD test case instead of creating new one.") @click.pass_context @pass_environment -def cli(environment: Environment, context: click.Context, file: str, section_id: int, **kwargs): - """Upload Gherkin .feature file to TestRail +def cli(environment: Environment, context: click.Context, file: str, section_id: int, case_id: int, **kwargs): + """Upload or update Gherkin .feature file in TestRail This command uploads a Gherkin/BDD .feature file directly to TestRail, which will create or update test cases based on the scenarios in the file. - TestRail will parse the .feature file and automatically create test cases + Two modes: + - Create mode (default): Requires --section-id, creates new test case(s) + - Update mode (--update): Requires --case-id, updates existing test case + + TestRail will parse the .feature file and automatically create/update test cases for each scenario, maintaining the BDD structure in TestRail's native format. Mapping Rules (.feature to TestRail): @@ -45,14 +57,35 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: - @Tags before Feature: → Reference field (@ stripped) - @Tags before scenarios → BDD field - Example: + Examples: + # Create new test case (requires --section-id) trcli import_gherkin -f login.feature --section-id 123 --project-id 1 + + # Update existing test case (requires --case-id) + trcli import_gherkin -f login.feature --case-id 456 --update --project-id 1 """ environment.cmd = "import_gherkin" environment.set_parameters(context) environment.check_for_required_parameters() json_output = kwargs.get("json_output", False) + update_mode = kwargs.get("update", False) + + # Validate mutually exclusive parameters + if update_mode: + if not case_id: + environment.elog("Error: --case-id is required when using --update flag") + exit(1) + if section_id: + environment.elog("Error: --section-id cannot be used with --update flag (use --case-id instead)") + exit(1) + else: + if not section_id: + environment.elog("Error: --section-id is required for create mode") + exit(1) + if case_id: + environment.elog("Error: --case-id can only be used with --update flag") + exit(1) try: # Read the feature file @@ -67,9 +100,13 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: environment.elog("Error: Feature file is empty") exit(1) + endpoint_name = "update_bdd" if update_mode else "add_bdd" + target_id = case_id if update_mode else section_id + id_type = "case ID" if update_mode else "section ID" + environment.vlog(f"Feature file size: {len(feature_content)} characters") - environment.vlog(f"Target section ID: {section_id}") - environment.vlog(f"API endpoint: POST /api/v2/add_bdd/{section_id}") + environment.vlog(f"Target {id_type}: {target_id}") + environment.vlog(f"API endpoint: POST /api/v2/{endpoint_name}/{target_id}") # Initialize API client if not json_output: @@ -100,18 +137,25 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: suites_data=minimal_suite, ) - # Upload feature file - if not json_output: - environment.log(f"Uploading feature file to TestRail...") - case_ids, error_message = api_request_handler.add_bdd(section_id, feature_content) + # Upload or update feature file based on mode + if update_mode: + if not json_output: + environment.log(f"Updating existing BDD test case (C{case_id}) in TestRail...") + case_ids, error_message = api_request_handler.update_bdd(case_id, feature_content) + else: + if not json_output: + environment.log(f"Uploading feature file to TestRail...") + case_ids, error_message = api_request_handler.add_bdd(section_id, feature_content) if error_message: - environment.elog(f"Error uploading feature file: {error_message}") + action = "updating" if update_mode else "uploading" + environment.elog(f"Error {action} feature file: {error_message}") exit(1) if not case_ids: + action = "updated" if update_mode else "uploaded" environment.log("Warning: No case IDs returned from TestRail") - environment.log("Feature file was uploaded but no cases were created.") + environment.log(f"Feature file was {action} but no cases were created/updated.") exit(0) # Display results @@ -120,8 +164,12 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: print(json.dumps({"case_ids": case_ids, "count": len(case_ids)}, indent=2)) else: - environment.log(f"\nSuccessfully uploaded feature file!") - environment.log(f" Created/updated {len(case_ids)} test case(s)") + if update_mode: + environment.log(f"\nSuccessfully updated feature file!") + environment.log(f" Updated {len(case_ids)} test case(s)") + else: + environment.log(f"\nSuccessfully uploaded feature file!") + environment.log(f" Created {len(case_ids)} test case(s)") environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") except FileNotFoundError: From 577d5158f3861cec6970dc0bac6cf08740aa9542 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 15 Jan 2026 16:41:42 +0800 Subject: [PATCH 15/33] TRCLI-193: Updated parse_cucumber options --- README.md | 22 ++++-- trcli/commands/cmd_parse_cucumber.py | 24 +++---- trcli/commands/results_parser_helpers.py | 91 +++++++++++++++++++----- 3 files changed, 101 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index ff95a01..badf06d 100644 --- a/README.md +++ b/README.md @@ -358,30 +358,38 @@ Usage: trcli parse_cucumber [OPTIONS] Parse Cucumber JSON results and upload to TestRail - This command parses Cucumber JSON test results and uploads them to TestRail. - Uses BDD matching mode to match features by name and auto-create missing test cases. + This command parses Cucumber JSON test results and uploads them to TestRail + using BDD matching mode. Features are matched to TestRail BDD test cases by + feature name only (case-insensitive, whitespace-normalized). + + BDD Matching: + - Matches Cucumber features to TestRail BDD test cases by feature name + - Auto-creates missing BDD test cases by default (use -n to disable) + - Sections are auto-created based on feature names + - Does not use automation_id or case-matcher (BDD uses feature name matching only) Options: -f, --file Filename and path. --close-run Close the newly created run --title Title of Test Run to be created in TestRail. - --case-matcher Mechanism to match cases between the report and - TestRail. --suite-id Suite ID to submit results to. [x>=1] - --suite-name Suite name to submit results to. --run-id Run ID for the results they are reporting. [x>=1] --plan-id Plan ID with which the Test Run will be associated. [x>=1] --config-ids Comma-separated configuration IDs to use along with Test Plans. --milestone-id Milestone ID to which the Test Run should be associated to. [x>=1] - --section-id Section ID to create new sections with test cases under. [x>=1] --run-description Summary text to be added to the test run. - --case-fields List of case fields and values for new test cases creation. --result-fields List of result fields and values for test results creation. --allow-ms Allows using milliseconds for elapsed times. -v, --verbose Enable verbose logging output. --help Show this message and exit. ``` +**Note:** The following options are NOT supported for `parse_cucumber` as they are not relevant for BDD matching: +- `--case-matcher` - BDD always uses feature name matching +- `--suite-name` - Use `--suite-id` instead +- `--section-id` - Sections are auto-created based on feature names +- `--case-fields` - BDD test cases are created via `.feature` file upload, not standard case creation + #### Cucumber JSON Format Example ```json [ diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py index 041b2d6..a7f468a 100644 --- a/trcli/commands/cmd_parse_cucumber.py +++ b/trcli/commands/cmd_parse_cucumber.py @@ -3,15 +3,14 @@ from trcli.api.results_uploader import ResultsUploader from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS -from trcli.commands.results_parser_helpers import results_parser_options, print_config +from trcli.commands.results_parser_helpers import bdd_parser_options, print_config from trcli.constants import FAULT_MAPPING, ProjectErrors -from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.validation_exception import ValidationException from trcli.readers.cucumber_json import CucumberParser @click.command(context_settings=CONTEXT_SETTINGS) -@results_parser_options +@bdd_parser_options @click.option( "-v", "--verbose", @@ -23,8 +22,15 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): """Parse Cucumber JSON results and upload to TestRail - This command parses Cucumber JSON test results and uploads them to TestRail. - Uses BDD matching mode to match features by name and auto-create missing test cases. + This command parses Cucumber JSON test results and uploads them to TestRail + using BDD matching mode. Features are matched to TestRail BDD test cases by + feature name only (case-insensitive, whitespace-normalized). + + BDD Matching: + - Matches Cucumber features to TestRail BDD test cases by feature name + - Auto-creates missing BDD test cases by default (use -n to disable) + - Sections are auto-created based on feature names + - Does not use automation_id or case-matcher (BDD uses feature name matching only) """ environment.cmd = "parse_cucumber" environment.set_parameters(context) @@ -248,11 +254,6 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): if environment.suite_id and not suite.suite_id: suite.suite_id = environment.suite_id - # For BDD mode, bypass automation_id check by using NAME matcher - # BDD cases already have case_id set, so we don't need automation_id - original_case_matcher = environment.case_matcher - environment.case_matcher = MatchersParser.NAME - run_id = None for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) @@ -263,9 +264,6 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): if run_id is None and hasattr(result_uploader, "last_run_id"): run_id = result_uploader.last_run_id - # Restore original case matcher - environment.case_matcher = original_case_matcher - # Summary if run_id: environment.log(f"Results uploaded successfully to run ID: {run_id}") diff --git a/trcli/commands/results_parser_helpers.py b/trcli/commands/results_parser_helpers.py index da528e2..fcb1aba 100644 --- a/trcli/commands/results_parser_helpers.py +++ b/trcli/commands/results_parser_helpers.py @@ -7,25 +7,31 @@ def print_config(env: Environment): - assign_info = f"Yes ({env.assign_failed_to})" if hasattr(env, 'assign_failed_to') and env.assign_failed_to and env.assign_failed_to.strip() else "No" - env.log(f"Parser Results Execution Parameters" - f"\n> Report file: {env.file}" - f"\n> Config file: {env.config}" - f"\n> TestRail instance: {env.host} (user: {env.username})" - f"\n> Project: {env.project if env.project else env.project_id}" - f"\n> Run title: {env.title}" - f"\n> Update run: {env.run_id if env.run_id else 'No'}" - f"\n> Add to milestone: {env.milestone_id if env.milestone_id else 'No'}" - f"\n> Auto-assign failures: {assign_info}" - f"\n> Auto-create entities: {env.auto_creation_response}") + assign_info = ( + f"Yes ({env.assign_failed_to})" + if hasattr(env, "assign_failed_to") and env.assign_failed_to and env.assign_failed_to.strip() + else "No" + ) + env.log( + f"Parser Results Execution Parameters" + f"\n> Report file: {env.file}" + f"\n> Config file: {env.config}" + f"\n> TestRail instance: {env.host} (user: {env.username})" + f"\n> Project: {env.project if env.project else env.project_id}" + f"\n> Run title: {env.title}" + f"\n> Update run: {env.run_id if env.run_id else 'No'}" + f"\n> Add to milestone: {env.milestone_id if env.milestone_id else 'No'}" + f"\n> Auto-assign failures: {assign_info}" + f"\n> Auto-create entities: {env.auto_creation_response}" + ) def resolve_comma_separated_list(ctx, param, value): if value: try: - return [int(part.strip()) for part in value.split(',')] + return [int(part.strip()) for part in value.split(",")] except: - raise BadParameter('Invalid format, use a comma-separated list (i.e.: 43,19)') + raise BadParameter("Invalid format, use a comma-separated list (i.e.: 43,19)") def results_parser_options(f): @@ -37,7 +43,7 @@ def results_parser_options(f): metavar="", default="auto", type=click.Choice(["auto", "name", "property"], case_sensitive=False), - help="Mechanism to match cases between the report and TestRail." + help="Mechanism to match cases between the report and TestRail.", ) @click.option( "--suite-id", @@ -87,7 +93,7 @@ def results_parser_options(f): metavar="", default=[], help="List of case fields and values for new test cases creation. " - "Usage: --case-fields type_id:1 --case-fields priority_id:3", + "Usage: --case-fields type_id:1 --case-fields priority_id:3", ) @click.option( "--result-fields", @@ -95,7 +101,7 @@ def results_parser_options(f): metavar="", default=[], help="List of result fields and values for test results creation. " - "Usage: --result-fields custom_field_a:value1 --result-fields custom_field_b:3", + "Usage: --result-fields custom_field_a:value1 --result-fields custom_field_b:3", ) @click.option("--allow-ms", is_flag=True, help="Allows using milliseconds for elapsed times.") @functools.wraps(f) @@ -103,3 +109,56 @@ def wrapper_common_options(*args, **kwargs): return f(*args, **kwargs) return wrapper_common_options + + +def bdd_parser_options(f): + """Options decorator for BDD/Cucumber parsers that don't need case-matcher or section-id""" + + @click.option("-f", "--file", type=click.Path(), metavar="", help="Filename and path.") + @click.option("--close-run", is_flag=True, help="Close the newly created run") + @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.") + @click.option( + "--suite-id", + type=click.IntRange(min=1), + metavar="", + help="Suite ID to submit results to.", + ) + @click.option( + "--run-id", + type=click.IntRange(min=1), + metavar="", + help="Run ID for the results they are reporting (otherwise the tool will attempt to create a new run).", + ) + @click.option( + "--plan-id", + type=click.IntRange(min=1), + metavar="", + help="Plan ID with which the Test Run will be associated.", + ) + @click.option( + "--config-ids", + metavar="", + callback=resolve_comma_separated_list, + help="Comma-separated configuration IDs to use along with Test Plans (i.e.: 34,52).", + ) + @click.option( + "--milestone-id", + type=click.IntRange(min=1), + metavar="", + help="Milestone ID to which the Test Run should be associated to.", + ) + @click.option("--run-description", metavar="", default="", help="Summary text to be added to the test run.") + @click.option( + "--result-fields", + multiple=True, + metavar="", + default=[], + help="List of result fields and values for test results creation. " + "Usage: --result-fields custom_field_a:value1 --result-fields custom_field_b:3", + ) + @click.option("--allow-ms", is_flag=True, help="Allows using milliseconds for elapsed times.") + @functools.wraps(f) + def wrapper_bdd_options(*args, **kwargs): + return f(*args, **kwargs) + + return wrapper_bdd_options From 5414b28e9aedf2c8313f8db1860d9f51e0c35fc9 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 19 Jan 2026 17:57:52 +0800 Subject: [PATCH 16/33] TRCLI-21: Updated special-parser bdd to correctly validate against bdd test case and upload results to custom bdd results field --- trcli/api/api_request_handler.py | 120 ++++++++++++++++++++++++++++++- trcli/readers/cucumber_json.py | 112 ++++++++--------------------- trcli/readers/junit_xml.py | 10 +-- 3 files changed, 155 insertions(+), 87 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index a11de82..d5d7a74 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,7 +1,7 @@ import html, json, os import time from concurrent.futures import ThreadPoolExecutor, as_completed -from beartype.typing import List, Union, Tuple, Dict +from beartype.typing import List, Union, Tuple, Dict, Optional from trcli.api.api_client import APIClient, APIClientResult from trcli.api.api_response_verify import ApiResponseVerify @@ -46,6 +46,10 @@ def __init__( self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) + # BDD case cache for feature name matching (shared by CucumberParser and JunitParser) + # Structure: {"{project_id}_{suite_id}": {normalized_name: [case_dict, case_dict, ...]}} + self._bdd_case_cache = {} + def check_automation_id_field(self, project_id: int) -> Union[str, None]: """ Checks if the automation_id field (custom_automation_id or custom_case_automation_id) is available for the project @@ -2335,6 +2339,120 @@ def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: error_msg = response.error_message or f"Failed to get templates (HTTP {response.status_code})" return None, error_msg + def find_bdd_case_by_name( + self, feature_name: str, project_id: int, suite_id: int + ) -> Tuple[Optional[int], Optional[str], List[int]]: + """ + Find a BDD test case by feature name (normalized matching). + + This method is shared by CucumberParser and JunitParser for feature name matching. + + Args: + feature_name: The feature name to search for + project_id: TestRail project ID + suite_id: TestRail suite ID + + Returns: + Tuple of (case_id, error_message, duplicate_case_ids): + - case_id: The matched case ID, or -1 if not found, or None if error/duplicates + - error_message: Error message if operation failed, None otherwise + - duplicate_case_ids: List of case IDs if duplicates found, empty list otherwise + """ + # Build cache if not already cached for this project/suite + cache_key = f"{project_id}_{suite_id}" + if cache_key not in self._bdd_case_cache: + error = self._build_bdd_case_cache(project_id, suite_id) + if error: + return None, error, [] + + # Normalize the feature name for matching + normalized_name = self._normalize_feature_name(feature_name) + + # Look up in cache + cache = self._bdd_case_cache.get(cache_key, {}) + matching_cases = cache.get(normalized_name, []) + + if len(matching_cases) == 0: + # Not found + self.environment.vlog(f"Feature '{feature_name}' not found in TestRail") + return -1, None, [] + elif len(matching_cases) == 1: + # Single match - success + case_id = matching_cases[0].get("id") + self.environment.vlog(f"Feature '{feature_name}' matched to case ID: C{case_id}") + return case_id, None, [] + else: + # Multiple matches - duplicate error + duplicate_ids = [case.get("id") for case in matching_cases] + self.environment.vlog(f"Feature '{feature_name}' has {len(matching_cases)} duplicates: {duplicate_ids}") + return None, None, duplicate_ids + + def _build_bdd_case_cache(self, project_id: int, suite_id: int) -> Optional[str]: + """ + Build cache of BDD test cases for a project/suite. + + Args: + project_id: TestRail project ID + suite_id: TestRail suite ID + + Returns: + Error message if failed, None if successful + """ + cache_key = f"{project_id}_{suite_id}" + + self.environment.vlog(f"Building BDD case cache for project {project_id}, suite {suite_id}...") + + # Fetch all cases for this suite + all_cases, error = self.__get_all_cases(project_id, suite_id) + + if error: + return f"Error fetching cases for cache: {error}" + + # Filter to BDD cases only (have custom_testrail_bdd_scenario field) + bdd_cases = [case for case in all_cases if case.get("custom_testrail_bdd_scenario")] + + self.environment.vlog(f"Found {len(bdd_cases)} BDD cases out of {len(all_cases)} total cases") + + # Build normalized name -> [case, case, ...] mapping + cache = {} + for case in bdd_cases: + title = case.get("title", "") + normalized = self._normalize_feature_name(title) + + if normalized not in cache: + cache[normalized] = [] + cache[normalized].append(case) + + self._bdd_case_cache[cache_key] = cache + self.environment.vlog(f"Cached {len(cache)} unique feature name(s)") + + return None + + @staticmethod + def _normalize_feature_name(name: str) -> str: + """ + Normalize a feature name for case-insensitive, whitespace-insensitive matching. + + Converts to lowercase, strips whitespace, and removes special characters. + Hyphens, underscores, and special chars are converted to spaces for word boundaries. + + Args: + name: The feature name to normalize + + Returns: + Normalized name (lowercase, special chars removed, collapsed whitespace, stripped) + """ + import re + + # Convert to lowercase and strip + normalized = name.lower().strip() + # Replace hyphens, underscores, and special chars with spaces + normalized = re.sub(r"[^a-z0-9\s]", " ", normalized) + # Collapse multiple spaces to single space + normalized = re.sub(r"\s+", " ", normalized) + # Final strip + return normalized.strip() + def add_case_bdd( self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None ) -> Tuple[int, str]: diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py index 965f60d..6d9a081 100644 --- a/trcli/readers/cucumber_json.py +++ b/trcli/readers/cucumber_json.py @@ -639,7 +639,7 @@ def _generate_rule_content(self, rule: Dict[str, Any]) -> str: return "\n".join(lines) def _normalize_title(self, title: str) -> str: - """Normalize title for robust matching + """Normalize title for robust matching (delegates to API handler for consistency) Converts to lowercase, strips whitespace, and removes special characters. Hyphens, underscores, and special chars are converted to spaces for word boundaries. @@ -650,16 +650,10 @@ def _normalize_title(self, title: str) -> str: Returns: Normalized title string """ - import re + # Use shared normalization from API handler for consistency + from trcli.api.api_request_handler import ApiRequestHandler - # Convert to lowercase and strip - normalized = title.lower().strip() - # Replace hyphens, underscores, and special chars with spaces - normalized = re.sub(r"[^a-z0-9\s]", " ", normalized) - # Collapse multiple spaces to single space - normalized = re.sub(r"\s+", " ", normalized) - # Final strip - return normalized.strip() + return ApiRequestHandler._normalize_feature_name(title) def set_api_handler(self, api_handler): """Set API handler for BDD matching mode @@ -669,88 +663,44 @@ def set_api_handler(self, api_handler): """ self._api_handler = api_handler - def _get_bdd_cases_cache(self, project_id: int, suite_id: int) -> Dict[str, int]: - """Fetch and cache all BDD cases in suite (one-time batch operation) - - This method fetches all test cases once and caches BDD cases for fast lookups. - Performance: 40 API requests for 10K cases (due to pagination), then O(1) lookups. + def _find_case_by_title(self, feature_name: str, project_id: int, suite_id: int) -> Optional[int]: + """Find BDD case by feature name using cached index (delegates to API handler) Args: + feature_name: Feature name from Cucumber JSON project_id: TestRail project ID suite_id: TestRail suite ID Returns: - Dictionary mapping normalized_title → case_id for BDD cases only + Case ID if found, None otherwise (also None if error or duplicates) """ - if self._bdd_case_cache is not None: - return self._bdd_case_cache - if self._api_handler is None: - self.env.elog("Error: API handler not set. Cannot fetch BDD cases.") - return {} - - self.env.vlog(f"Fetching all BDD cases for suite {suite_id} (one-time operation)...") + self.env.elog("Error: API handler not set. Cannot find case by title.") + return None - # Fetch ALL cases in suite (with pagination handled internally) - all_cases, error = self._api_handler._ApiRequestHandler__get_all_cases(project_id=project_id, suite_id=suite_id) + # Use shared API handler method for consistency + case_id, error, duplicates = self._api_handler.find_bdd_case_by_name( + feature_name=feature_name, project_id=project_id, suite_id=suite_id + ) + # Handle errors if error: - self.env.elog(f"Error fetching cases: {error}") - return {} - - # Build hash table index: normalized_title → case_id (BDD cases only) - # Also track duplicates for warning - bdd_cache = {} - duplicate_tracker = {} # normalized_title → list of case IDs - bdd_count = 0 - - for case in all_cases: - # Filter to BDD template cases only - if case.get("custom_testrail_bdd_scenario"): - normalized = self._normalize_title(case["title"]) - case_id = case["id"] - - # Track duplicates - if normalized in duplicate_tracker: - duplicate_tracker[normalized].append(case_id) - else: - duplicate_tracker[normalized] = [case_id] - - bdd_cache[normalized] = case_id - bdd_count += 1 - - # Warn about duplicates - for normalized_title, case_ids in duplicate_tracker.items(): - if len(case_ids) > 1: - # Find original title (use first case's title) - original_title = None - for case in all_cases: - if case["id"] == case_ids[0]: - original_title = case["title"] - break - - case_ids_str = ", ".join([f"C{cid}" for cid in case_ids]) - self.env.elog(f"Warning: Multiple BDD cases found with title '{original_title}': {case_ids_str}") - self.env.elog(f" Using case ID C{case_ids[-1]} (last match)") - - self.env.vlog(f"Cached {bdd_count} BDD cases from {len(all_cases)} total cases") - self._bdd_case_cache = bdd_cache - return bdd_cache - - def _find_case_by_title(self, feature_name: str, project_id: int, suite_id: int) -> Optional[int]: - """Find BDD case by feature name using cached index (O(1) lookup) - - Args: - feature_name: Feature name from Cucumber JSON - project_id: TestRail project ID - suite_id: TestRail suite ID - - Returns: - Case ID if found, None otherwise - """ - cache = self._get_bdd_cases_cache(project_id, suite_id) - normalized = self._normalize_title(feature_name) - return cache.get(normalized) + self.env.elog(f"Error finding case by title: {error}") + return None + + # Handle duplicates + if duplicates: + case_ids_str = ", ".join([f"C{cid}" for cid in duplicates]) + self.env.elog(f"Warning: Multiple BDD cases found with title '{feature_name}': {case_ids_str}") + self.env.elog(f" Cannot proceed - please ensure unique feature names in TestRail") + return None + + # Handle not found (case_id == -1) + if case_id == -1: + return None + + # Success + return case_id def _extract_case_id_from_tags(self, feature_tags: List[str], scenario_tags: List[str]) -> Optional[int]: """Extract case ID from @C tags diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index e6f85a7..0dcc475 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -595,7 +595,7 @@ def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, No self.env.log(f"BDD: Case C{case_id} validated as BDD test case for feature '{feature_name}'") # Step 3: Parse all scenarios - scenarios = [] + bdd_scenario_results = [] scenario_statuses = [] total_time = 0 failure_messages = [] @@ -630,10 +630,10 @@ def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, No # Track status for aggregation scenario_statuses.append(scenario_status) - # Create step result for this scenario - step = TestRailSeparatedStep(content=f"Scenario {idx}: {clean_scenario_name}") + # Create BDD scenario result (matches Cucumber parser format) + step = TestRailSeparatedStep(content=clean_scenario_name) step.status_id = scenario_status - scenarios.append(step) + bdd_scenario_results.append(step) self.env.vlog(f" - Scenario {idx}: {clean_scenario_name} → {scenario_status_label} " f"({scenario_time}s)") @@ -667,7 +667,7 @@ def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, No case_id=case_id, status_id=overall_status, elapsed=total_time if total_time > 0 else None, # Pass numeric value, not formatted string - custom_step_results=scenarios, + custom_testrail_bdd_scenario_results=bdd_scenario_results, comment=comment, ) From 5e258dd0ad10839aaf894deccfbaa9fab787c8a3 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 19 Jan 2026 17:59:32 +0800 Subject: [PATCH 17/33] TRCLI-21: Updated test data and unit tests for parse_junit special-parser bdd --- tests/test_data/XML/505289_junit_bdd.xml | 34 +++++++++++++++++++ .../XML/505289_junit_bdd_all_pass.xml | 19 +++++++++++ tests/test_junit_bdd_parser.py | 10 +++--- 3 files changed, 58 insertions(+), 5 deletions(-) create mode 100644 tests/test_data/XML/505289_junit_bdd.xml create mode 100644 tests/test_data/XML/505289_junit_bdd_all_pass.xml diff --git a/tests/test_data/XML/505289_junit_bdd.xml b/tests/test_data/XML/505289_junit_bdd.xml new file mode 100644 index 0000000..9f33d9a --- /dev/null +++ b/tests/test_data/XML/505289_junit_bdd.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + +Step: Then I should see an error message "Invalid credentials" +Expected: Error message displayed with text "Invalid credentials" +Actual: No error message was found on the page + +Test execution details: + Given I am on the login page ✓ + When I enter valid username "testuser" ✓ + And I enter invalid password "wrongpass" ✓ + And I click the login button ✓ + Then I should see an error message "Invalid credentials" ✗ + And I should remain on the login page (not executed) + +at features/step_definitions/authentication_steps.py:45 +at features/authentication.feature:23 + + + + diff --git a/tests/test_data/XML/505289_junit_bdd_all_pass.xml b/tests/test_data/XML/505289_junit_bdd_all_pass.xml new file mode 100644 index 0000000..511d872 --- /dev/null +++ b/tests/test_data/XML/505289_junit_bdd_all_pass.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/tests/test_junit_bdd_parser.py b/tests/test_junit_bdd_parser.py index b9b798f..13da136 100644 --- a/tests/test_junit_bdd_parser.py +++ b/tests/test_junit_bdd_parser.py @@ -323,7 +323,7 @@ def test_parse_bdd_feature_all_pass(self, environment, mock_api_validation_succe assert test_case is not None assert test_case.case_id == 100 assert test_case.result.status_id == 1 # Passed - assert len(test_case.result.custom_step_results) == 2 + assert len(test_case.result.custom_testrail_bdd_scenario_results) == 2 assert "Total Scenarios: 2" in test_case.result.comment assert "Passed: 2" in test_case.result.comment @@ -346,12 +346,12 @@ def test_parse_bdd_feature_mixed_results(self, environment, mock_api_validation_ assert test_case is not None assert test_case.case_id == 25293 assert test_case.result.status_id == 5 # Failed (fail-fast) - assert len(test_case.result.custom_step_results) == 3 + assert len(test_case.result.custom_testrail_bdd_scenario_results) == 3 # Check step statuses - assert test_case.result.custom_step_results[0].status_id == 1 # Passed - assert test_case.result.custom_step_results[1].status_id == 5 # Failed - assert test_case.result.custom_step_results[2].status_id == 4 # Skipped + assert test_case.result.custom_testrail_bdd_scenario_results[0].status_id == 1 # Passed + assert test_case.result.custom_testrail_bdd_scenario_results[1].status_id == 5 # Failed + assert test_case.result.custom_testrail_bdd_scenario_results[2].status_id == 4 # Skipped # Check comment contains summary and failure details assert "Total Scenarios: 3" in test_case.result.comment From b74e5c5e9f0e4634b3142e6938016216817b86e3 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 20 Jan 2026 14:20:00 +0800 Subject: [PATCH 18/33] TRCLI-21: Updated unit tests --- tests/test_cucumber_bdd_matching.py | 161 ++++++++++++++++++---------- 1 file changed, 106 insertions(+), 55 deletions(-) diff --git a/tests/test_cucumber_bdd_matching.py b/tests/test_cucumber_bdd_matching.py index c1fdca4..94f42a4 100644 --- a/tests/test_cucumber_bdd_matching.py +++ b/tests/test_cucumber_bdd_matching.py @@ -172,108 +172,159 @@ def test_extract_case_id_invalid_format(self): assert case_id is None @pytest.mark.cucumber_bdd_matching - @patch("trcli.readers.cucumber_json.CucumberParser._get_bdd_cases_cache") - def test_find_case_by_title_found(self, mock_get_cache): + def test_find_case_by_title_found(self): """Test finding case by title using cached lookup""" parser = CucumberParser(self.environment) - # Mock cache with normalized titles - mock_get_cache.return_value = {"user login": 101, "product search": 102, "checkout": 103} + # Mock API handler + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock find_bdd_case_by_name to return case ID 101 + mock_api_handler.find_bdd_case_by_name.return_value = (101, None, []) case_id = parser._find_case_by_title("User Login", project_id=1, suite_id=2) assert case_id == 101 - # Verify cache was accessed - mock_get_cache.assert_called_once_with(1, 2) + # Verify API handler was called correctly + mock_api_handler.find_bdd_case_by_name.assert_called_once_with( + feature_name="User Login", project_id=1, suite_id=2 + ) @pytest.mark.cucumber_bdd_matching - @patch("trcli.readers.cucumber_json.CucumberParser._get_bdd_cases_cache") - def test_find_case_by_title_not_found(self, mock_get_cache): + def test_find_case_by_title_not_found(self): """Test finding case by title returns None when not in cache""" parser = CucumberParser(self.environment) - mock_get_cache.return_value = {"user login": 101, "product search": 102} + # Mock API handler + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock find_bdd_case_by_name to return -1 (not found) + mock_api_handler.find_bdd_case_by_name.return_value = (-1, None, []) case_id = parser._find_case_by_title("Nonexistent Feature", project_id=1, suite_id=2) assert case_id is None @pytest.mark.cucumber_bdd_matching - @patch("trcli.readers.cucumber_json.CucumberParser._get_bdd_cases_cache") - def test_find_case_by_title_normalization(self, mock_get_cache): - """Test case matching with different formatting""" + def test_find_case_by_title_normalization(self): + """Test case matching with different formatting (normalization happens in API handler)""" parser = CucumberParser(self.environment) - mock_get_cache.return_value = {"user login": 101} + # Mock API handler + mock_api_handler = MagicMock() + parser._api_handler = mock_api_handler + + # Mock find_bdd_case_by_name to always return case ID 101 + # (normalization is tested in API handler tests) + mock_api_handler.find_bdd_case_by_name.return_value = (101, None, []) - # Should match despite different formatting + # Should call API handler with each variation assert parser._find_case_by_title("User Login", 1, 2) == 101 assert parser._find_case_by_title("User-Login", 1, 2) == 101 assert parser._find_case_by_title("user_login", 1, 2) == 101 assert parser._find_case_by_title("USER LOGIN", 1, 2) == 101 + # Verify API handler was called 4 times + assert mock_api_handler.find_bdd_case_by_name.call_count == 4 + @pytest.mark.cucumber_bdd_matching - def test_get_bdd_cases_cache_builds_correctly(self): - """Test BDD cases cache is built correctly from API response""" - parser = CucumberParser(self.environment) + def test_api_handler_builds_cache_correctly(self): + """Test API handler builds BDD cases cache correctly (integration test)""" + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.api_client import APIClient - # Mock API handler - mock_api_handler = MagicMock() - parser._api_handler = mock_api_handler + # Create mock environment and client + mock_env = MagicMock() + mock_env.vlog = MagicMock() + mock_client = MagicMock(spec=APIClient) + mock_client.VERSION = "v2" + + # Create API handler with mock suite data + from trcli.data_classes.dataclass_testrail import TestRailSuite + + mock_suite = TestRailSuite(name="test", suite_id=2) - # Mock API response with mix of BDD and non-BDD cases + api_handler = ApiRequestHandler( + environment=mock_env, api_client=mock_client, suites_data=mock_suite, verify=False + ) + + # Mock __get_all_cases to return BDD and non-BDD cases mock_cases = [ {"id": 101, "title": "User Login", "custom_testrail_bdd_scenario": "Scenario: Login"}, {"id": 102, "title": "Product Search", "custom_testrail_bdd_scenario": None}, # Not BDD {"id": 103, "title": "Checkout Process", "custom_testrail_bdd_scenario": "Scenario: Checkout"}, ] - mock_api_handler._ApiRequestHandler__get_all_cases.return_value = (mock_cases, None) - # Build cache - cache = parser._get_bdd_cases_cache(project_id=1, suite_id=2) + with patch.object(api_handler, "_ApiRequestHandler__get_all_cases", return_value=(mock_cases, None)): + # Call find_bdd_case_by_name which triggers cache build + case_id, error, duplicates = api_handler.find_bdd_case_by_name("User Login", 1, 2) - # Should only include BDD cases (101 and 103) - assert len(cache) == 2 - assert cache["user login"] == 101 - assert cache["checkout process"] == 103 - assert "product search" not in cache + # Should find case 101 + assert case_id == 101 + assert error is None + assert duplicates == [] @pytest.mark.cucumber_bdd_matching - def test_get_bdd_cases_cache_caching_behavior(self): - """Test cache is only fetched once""" - parser = CucumberParser(self.environment) - - mock_api_handler = MagicMock() - parser._api_handler = mock_api_handler + def test_api_handler_caching_behavior(self): + """Test API handler cache is only built once per project/suite""" + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.api_client import APIClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + # Create mock environment and client + mock_env = MagicMock() + mock_env.vlog = MagicMock() + mock_client = MagicMock(spec=APIClient) + mock_client.VERSION = "v2" + + mock_suite = TestRailSuite(name="test", suite_id=2) + api_handler = ApiRequestHandler( + environment=mock_env, api_client=mock_client, suites_data=mock_suite, verify=False + ) mock_cases = [{"id": 101, "title": "User Login", "custom_testrail_bdd_scenario": "Scenario: Login"}] - mock_api_handler._ApiRequestHandler__get_all_cases.return_value = (mock_cases, None) - # First call - should fetch from API - cache1 = parser._get_bdd_cases_cache(1, 2) - assert mock_api_handler._ApiRequestHandler__get_all_cases.call_count == 1 + with patch.object( + api_handler, "_ApiRequestHandler__get_all_cases", return_value=(mock_cases, None) + ) as mock_get_cases: + # First call - should build cache + case_id1, _, _ = api_handler.find_bdd_case_by_name("User Login", 1, 2) + assert mock_get_cases.call_count == 1 - # Second call - should use cache - cache2 = parser._get_bdd_cases_cache(1, 2) - assert mock_api_handler._ApiRequestHandler__get_all_cases.call_count == 1 # No additional call + # Second call with same project/suite - should use cache + case_id2, _, _ = api_handler.find_bdd_case_by_name("User Login", 1, 2) + assert mock_get_cases.call_count == 1 # No additional call - # Verify same cache returned - assert cache1 is cache2 + # Both calls should find the same case + assert case_id1 == case_id2 == 101 @pytest.mark.cucumber_bdd_matching - def test_get_bdd_cases_cache_api_error(self): - """Test cache handles API errors gracefully""" - parser = CucumberParser(self.environment) - - mock_api_handler = MagicMock() - parser._api_handler = mock_api_handler + def test_api_handler_handles_api_error(self): + """Test API handler handles API errors gracefully""" + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.api_client import APIClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + # Create mock environment and client + mock_env = MagicMock() + mock_env.vlog = MagicMock() + mock_client = MagicMock(spec=APIClient) + mock_client.VERSION = "v2" + + mock_suite = TestRailSuite(name="test", suite_id=2) + api_handler = ApiRequestHandler( + environment=mock_env, api_client=mock_client, suites_data=mock_suite, verify=False + ) # Mock API error - mock_api_handler._ApiRequestHandler__get_all_cases.return_value = ([], "API Error") - - cache = parser._get_bdd_cases_cache(1, 2) + with patch.object(api_handler, "_ApiRequestHandler__get_all_cases", return_value=([], "API Error")): + case_id, error, duplicates = api_handler.find_bdd_case_by_name("User Login", 1, 2) - # Should return empty cache on error - assert cache == {} + # Should return None with error message + assert case_id is None + assert "API Error" in error + assert duplicates == [] @pytest.mark.cucumber_bdd_matching def test_validate_bdd_case_exists_valid(self): From 3a4413f24b236a2f96101c9908dd4552fd8a4287 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 4 Feb 2026 17:41:13 +0800 Subject: [PATCH 19/33] TRCLI-21: Fixed an issue with finding and checking of BDD test cases to be dynamic --- trcli/api/api_request_handler.py | 73 ++++++++++++++++++++++++++++++-- trcli/readers/cucumber_json.py | 16 +++++-- trcli/readers/junit_xml.py | 18 ++++++-- 3 files changed, 97 insertions(+), 10 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index d5d7a74..7526d0b 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -50,6 +50,10 @@ def __init__( # Structure: {"{project_id}_{suite_id}": {normalized_name: [case_dict, case_dict, ...]}} self._bdd_case_cache = {} + # Cache for resolved BDD field names (resolved from TestRail API) + self._bdd_case_field_name = None # BDD Scenarios field (type_id=13) + self._bdd_result_field_name = None # BDD Scenario Results field (type_id=14) + def check_automation_id_field(self, project_id: int) -> Union[str, None]: """ Checks if the automation_id field (custom_automation_id or custom_case_automation_id) is available for the project @@ -2408,10 +2412,15 @@ def _build_bdd_case_cache(self, project_id: int, suite_id: int) -> Optional[str] if error: return f"Error fetching cases for cache: {error}" - # Filter to BDD cases only (have custom_testrail_bdd_scenario field) - bdd_cases = [case for case in all_cases if case.get("custom_testrail_bdd_scenario")] + # Resolve BDD case field name dynamically + bdd_field_name = self.get_bdd_case_field_name() + + # Filter to BDD cases only (have BDD scenarios field with content) + bdd_cases = [case for case in all_cases if case.get(bdd_field_name)] - self.environment.vlog(f"Found {len(bdd_cases)} BDD cases out of {len(all_cases)} total cases") + self.environment.vlog( + f"Found {len(bdd_cases)} BDD cases out of {len(all_cases)} total cases (using field: {bdd_field_name})" + ) # Build normalized name -> [case, case, ...] mapping cache = {} @@ -2453,6 +2462,64 @@ def _normalize_feature_name(name: str) -> str: # Final strip return normalized.strip() + def get_bdd_case_field_name(self) -> str: + """Resolve BDD Scenarios case field name from TestRail API + + Dynamically resolves the actual field name for BDD Scenarios (type_id=13). + This supports custom field names when users rename the default field in TestRail. + + Returns: + Resolved system_name of BDD Scenarios field, or default name if resolution fails + """ + # Return cached value if already resolved + if self._bdd_case_field_name is not None: + return self._bdd_case_field_name + + try: + response = self.client.send_get("get_case_fields") + if not response.error_message and response.response_text: + for field in response.response_text: + if field.get("type_id") == 13: # BDD Scenarios type + self._bdd_case_field_name = field.get("system_name") + self.environment.vlog(f"Resolved BDD case field name: {self._bdd_case_field_name}") + return self._bdd_case_field_name + except Exception as e: + self.environment.vlog(f"Error resolving BDD case field name: {e}") + + # Fallback to default name + self._bdd_case_field_name = "custom_testrail_bdd_scenario" + self.environment.vlog(f"Using default BDD case field name: {self._bdd_case_field_name}") + return self._bdd_case_field_name + + def get_bdd_result_field_name(self) -> str: + """Resolve BDD Scenario Results result field name from TestRail API + + Dynamically resolves the actual field name for BDD Scenario Results (type_id=14). + This supports custom field names when users rename the default field in TestRail. + + Returns: + Resolved system_name of BDD Scenario Results field, or default name if resolution fails + """ + # Return cached value if already resolved + if self._bdd_result_field_name is not None: + return self._bdd_result_field_name + + try: + response = self.client.send_get("get_result_fields") + if not response.error_message and response.response_text: + for field in response.response_text: + if field.get("type_id") == 14: # BDD Scenario Results type + self._bdd_result_field_name = field.get("system_name") + self.environment.vlog(f"Resolved BDD result field name: {self._bdd_result_field_name}") + return self._bdd_result_field_name + except Exception as e: + self.environment.vlog(f"Error resolving BDD result field name: {e}") + + # Fallback to default name + self._bdd_result_field_name = "custom_testrail_bdd_scenario_results" + self.environment.vlog(f"Using default BDD result field name: {self._bdd_result_field_name}") + return self._bdd_result_field_name + def add_case_bdd( self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None ) -> Tuple[int, str]: diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py index 6d9a081..67b3811 100644 --- a/trcli/readers/cucumber_json.py +++ b/trcli/readers/cucumber_json.py @@ -758,9 +758,12 @@ def _validate_bdd_case_exists(self, case_id: int) -> Tuple[bool, Optional[str]]: case_data = response.response_text - # Validate it's a BDD template case - if not case_data.get("custom_testrail_bdd_scenario"): - return False, f"Case C{case_id} is not a BDD template case" + # Resolve BDD case field name dynamically + bdd_field_name = self._api_handler.get_bdd_case_field_name() + + # Validate it's a BDD template case (has BDD scenarios field with content) + if not case_data.get(bdd_field_name): + return False, f"Case C{case_id} is not a BDD template case (missing field: {bdd_field_name})" return True, None @@ -901,14 +904,19 @@ def _parse_feature_as_bdd_case( comment = "\n\n".join(comment_parts) if comment_parts else "" # Step 7: Create result with BDD scenario results + # Resolve BDD result field name dynamically + bdd_result_field_name = self._api_handler.get_bdd_result_field_name() + result = TestRailResult( case_id=case_id, status_id=overall_status, comment=comment, elapsed=elapsed_time, - custom_testrail_bdd_scenario_results=bdd_scenario_results, # Use BDD field ) + # Set BDD scenario results using dynamically resolved field name + setattr(result, bdd_result_field_name, bdd_scenario_results) + # Step 8: Create test case test_case = TestRailCase( title=TestRailCaseFieldsOptimizer.extract_last_words( diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 0dcc475..522fc71 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -459,7 +459,9 @@ def _validate_bdd_case_exists(self, case_id: int, feature_name: str) -> tuple: case_data = response.response_text # Step 2: Validate it's a BDD test case - bdd_scenario_field = case_data.get("custom_testrail_bdd_scenario") + # Resolve BDD case field name dynamically + bdd_field_name = api_handler.get_bdd_case_field_name() + bdd_scenario_field = case_data.get(bdd_field_name) if not bdd_scenario_field: return ( @@ -468,7 +470,7 @@ def _validate_bdd_case_exists(self, case_id: int, feature_name: str) -> tuple: f"BDD Validation Error: Case C{case_id} is NOT a BDD test case.\n" f"Feature: '{feature_name}'\n" f"Case Title: '{case_data.get('title', 'Unknown')}'\n\n" - f"Reason: The 'custom_testrail_bdd_scenario' field is empty or null.\n" + f"Reason: The '{bdd_field_name}' field is empty or null.\n" f"This indicates the case is using a regular template, not the BDD template.\n\n" f"Action Required:\n" f" Option 1: Upload this case using standard mode (remove --special-parser bdd)\n" @@ -663,14 +665,24 @@ def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, No comment = summary # Step 6: Create aggregated result + # Get API handler to resolve BDD result field name + from trcli.api.project_based_client import ProjectBasedClient + from trcli.data_classes.dataclass_testrail import TestRailSuite as TRSuite + + temp_suite = TRSuite(name="temp", suite_id=1) + project_client = ProjectBasedClient(environment=self.env, suite=temp_suite) + bdd_result_field_name = project_client.api_request_handler.get_bdd_result_field_name() + result = TestRailResult( case_id=case_id, status_id=overall_status, elapsed=total_time if total_time > 0 else None, # Pass numeric value, not formatted string - custom_testrail_bdd_scenario_results=bdd_scenario_results, comment=comment, ) + # Set BDD scenario results using dynamically resolved field name + setattr(result, bdd_result_field_name, bdd_scenario_results) + # Step 7: Create test case test_case = TestRailCase( title=feature_name, From cdf4632d2f3937b0abf3e0ef26288a4b9ba72e02 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 4 Feb 2026 17:43:05 +0800 Subject: [PATCH 20/33] TRCLI-21: Updated tests for junit special parser bdd and parse cucumber --- tests/test_cucumber_bdd_matching.py | 28 ++++++++++++++++++---------- tests/test_junit_bdd_parser.py | 5 +++++ trcli/commands/cmd_parse_cucumber.py | 24 ++++++------------------ 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/tests/test_cucumber_bdd_matching.py b/tests/test_cucumber_bdd_matching.py index 94f42a4..869662e 100644 --- a/tests/test_cucumber_bdd_matching.py +++ b/tests/test_cucumber_bdd_matching.py @@ -93,6 +93,14 @@ def teardown_method(self): if hasattr(self, "temp_file") and os.path.exists(self.temp_file.name): os.unlink(self.temp_file.name) + def _create_mock_api_handler(self): + """Helper to create mock API handler with BDD field resolution""" + mock_api_handler = MagicMock() + # Mock BDD field name resolution (returns default field names) + mock_api_handler.get_bdd_case_field_name.return_value = "custom_testrail_bdd_scenario" + mock_api_handler.get_bdd_result_field_name.return_value = "custom_testrail_bdd_scenario_results" + return mock_api_handler + @pytest.mark.cucumber_bdd_matching def test_normalize_title_basic(self): """Test title normalization removes special characters and normalizes case""" @@ -331,7 +339,7 @@ def test_validate_bdd_case_exists_valid(self): """Test validation succeeds for valid BDD case""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock valid BDD case - mock send_get response @@ -354,7 +362,7 @@ def test_validate_bdd_case_not_found(self): """Test validation fails when case not found""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock case not found - mock send_get response @@ -373,7 +381,7 @@ def test_validate_bdd_case_not_bdd_template(self): """Test validation fails when case is not BDD template""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock non-BDD case - mock send_get response @@ -392,7 +400,7 @@ def test_parse_feature_as_bdd_case_with_tag(self): """Test parsing feature as BDD case using @C tag""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock validation - mock send_get response @@ -419,7 +427,7 @@ def test_parse_feature_as_bdd_case_by_title(self, mock_find): """Test parsing feature as BDD case using title matching""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock title matching @@ -442,7 +450,7 @@ def test_parse_feature_as_bdd_case_scenario_statuses(self): """Test BDD scenario results have correct statuses""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock validation - mock send_get response @@ -471,7 +479,7 @@ def test_parse_feature_as_bdd_case_elapsed_time(self): """Test elapsed time calculation for BDD case""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock validation - mock send_get response @@ -493,7 +501,7 @@ def test_parse_feature_as_bdd_case_not_found(self): """Test parsing returns None when case not found""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock no case found (no tag, no title match) @@ -507,7 +515,7 @@ def test_parse_feature_as_bdd_case_validation_fails(self): """Test parsing returns None when validation fails""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock validation failure (not BDD template) - mock send_get response @@ -528,7 +536,7 @@ def test_parse_feature_branching_bdd_mode(self): """Test _parse_feature branches correctly to BDD matching mode""" parser = CucumberParser(self.environment) - mock_api_handler = MagicMock() + mock_api_handler = self._create_mock_api_handler() parser._api_handler = mock_api_handler # Mock validation - mock send_get response diff --git a/tests/test_junit_bdd_parser.py b/tests/test_junit_bdd_parser.py index 13da136..a87d406 100644 --- a/tests/test_junit_bdd_parser.py +++ b/tests/test_junit_bdd_parser.py @@ -49,6 +49,9 @@ def mock_api_validation_success(self): } mock_api_handler.client.send_get.return_value = mock_response + # Mock BDD field name resolution (returns default names) + mock_api_handler.get_bdd_case_field_name.return_value = "custom_testrail_bdd_scenario" + mock_api_handler.get_bdd_result_field_name.return_value = "custom_testrail_bdd_scenario_results" mock_client.api_request_handler = mock_api_handler mock_client_class.return_value = mock_client @@ -243,6 +246,7 @@ def test_validate_case_exists_success(self, mock_client_class, environment): } mock_api_handler.client.send_get.return_value = mock_response + mock_api_handler.get_bdd_case_field_name.return_value = "custom_testrail_bdd_scenario" # Mock field resolution mock_client.api_request_handler = mock_api_handler mock_client_class.return_value = mock_client @@ -294,6 +298,7 @@ def test_validate_case_not_bdd(self, mock_client_class, environment): } mock_api_handler.client.send_get.return_value = mock_response + mock_api_handler.get_bdd_case_field_name.return_value = "custom_testrail_bdd_scenario" # Mock field resolution mock_client.api_request_handler = mock_api_handler mock_client_class.return_value = mock_client diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py index a7f468a..feea27f 100644 --- a/trcli/commands/cmd_parse_cucumber.py +++ b/trcli/commands/cmd_parse_cucumber.py @@ -219,27 +219,15 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.log(f"Created case ID: C{case_id}") environment.log(f"Successfully created {len(created_case_ids)} BDD test case(s)") + environment.vlog("Clearing BDD cache to include newly created cases...") + api_handler._bdd_case_cache.clear() - # Re-parse with the newly created case IDs in cache - environment.vlog("\nRe-parsing to match newly created cases...") + # Re-parse with the newly created case IDs + environment.vlog("Re-parsing to match newly created cases...") parser_for_results = CucumberParser(environment) parser_for_results.set_api_handler(api_handler) - # Build cache with newly created case IDs - temp_cache = created_case_ids.copy() - - # Also include existing cases from original parse - for suite in parsed_suites: - for section in suite.testsections: - for test_case in section.testcases: - if test_case.case_id != -1: - normalized = parser_for_results._normalize_title(section.name) - temp_cache[normalized] = test_case.case_id - - # Override cache - parser_for_results._bdd_case_cache = temp_cache - - # Re-parse in BDD matching mode with updated cache + # Re-parse in BDD matching mode (cache will rebuild with new cases) parsed_suites = parser_for_results.parse_file( bdd_matching_mode=True, project_id=resolved_project_id, @@ -247,7 +235,7 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): auto_create=False, # No need to mark for creation again ) - environment.vlog(f"Re-parsed with {len(temp_cache)} cached case(s)") + environment.vlog(f"Re-parsed successfully with {len(created_case_ids)} newly created case(s)") # Ensure all suites have suite_id set from environment for suite in parsed_suites: From 2e66d3cb8a3478529b5442922ff8f2e88bd95f82 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 5 Feb 2026 15:10:34 +0800 Subject: [PATCH 21/33] TRCLI-21: Fixed an issue where results are not correctly added to custom bdd result fields --- trcli/readers/cucumber_json.py | 7 +++++-- trcli/readers/junit_xml.py | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py index 67b3811..d018aa2 100644 --- a/trcli/readers/cucumber_json.py +++ b/trcli/readers/cucumber_json.py @@ -914,8 +914,11 @@ def _parse_feature_as_bdd_case( elapsed=elapsed_time, ) - # Set BDD scenario results using dynamically resolved field name - setattr(result, bdd_result_field_name, bdd_scenario_results) + # Add BDD scenario results to result_fields dict (for serialization) + # Convert TestRailSeparatedStep objects to dicts for API + result.result_fields[bdd_result_field_name] = [ + {"content": step.content, "status_id": step.status_id} for step in bdd_scenario_results + ] # Step 8: Create test case test_case = TestRailCase( diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 522fc71..c8756d8 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -680,8 +680,11 @@ def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, No comment=comment, ) - # Set BDD scenario results using dynamically resolved field name - setattr(result, bdd_result_field_name, bdd_scenario_results) + # Add BDD scenario results to result_fields dict (for serialization) + # Convert TestRailSeparatedStep objects to dicts for API + result.result_fields[bdd_result_field_name] = [ + {"content": step.content, "status_id": step.status_id} for step in bdd_scenario_results + ] # Step 7: Create test case test_case = TestRailCase( From e9dcf711aa9dd644b8124bb8b9c541bbb4c2ecab Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 5 Feb 2026 15:11:24 +0800 Subject: [PATCH 22/33] TRCLI-21: Updated unit tests for special parser bdd and parse cucumber --- tests/test_cucumber_bdd_matching.py | 24 ++++++++++++++++-------- tests/test_junit_bdd_parser.py | 22 ++++++++++++++++------ 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/tests/test_cucumber_bdd_matching.py b/tests/test_cucumber_bdd_matching.py index 869662e..a39d66f 100644 --- a/tests/test_cucumber_bdd_matching.py +++ b/tests/test_cucumber_bdd_matching.py @@ -418,7 +418,12 @@ def test_parse_feature_as_bdd_case_with_tag(self): assert test_case is not None assert test_case.case_id == 123 assert test_case.result.case_id == 123 - assert len(test_case.result.custom_testrail_bdd_scenario_results) == 2 # Two scenarios + + # Check BDD scenario results are in result_fields dict + bdd_field_name = "custom_testrail_bdd_scenario_results" + assert bdd_field_name in test_case.result.result_fields + assert len(test_case.result.result_fields[bdd_field_name]) == 2 # Two scenarios + assert test_case.result.status_id == 5 # Failed (one scenario failed) @pytest.mark.cucumber_bdd_matching @@ -464,15 +469,18 @@ def test_parse_feature_as_bdd_case_scenario_statuses(self): test_case = parser._parse_feature_as_bdd_case(feature_with_tag, project_id=1, suite_id=2) - scenarios = test_case.result.custom_testrail_bdd_scenario_results + # Check BDD scenario results are in result_fields dict + bdd_field_name = "custom_testrail_bdd_scenario_results" + assert bdd_field_name in test_case.result.result_fields + scenarios = test_case.result.result_fields[bdd_field_name] - # First scenario: passed - assert scenarios[0].content == "Successful login" - assert scenarios[0].status_id == 1 + # First scenario: passed (results are stored as dicts) + assert scenarios[0]["content"] == "Successful login" + assert scenarios[0]["status_id"] == 1 - # Second scenario: failed - assert scenarios[1].content == "Failed login" - assert scenarios[1].status_id == 5 + # Second scenario: failed (results are stored as dicts) + assert scenarios[1]["content"] == "Failed login" + assert scenarios[1]["status_id"] == 5 @pytest.mark.cucumber_bdd_matching def test_parse_feature_as_bdd_case_elapsed_time(self): diff --git a/tests/test_junit_bdd_parser.py b/tests/test_junit_bdd_parser.py index a87d406..b02ed1f 100644 --- a/tests/test_junit_bdd_parser.py +++ b/tests/test_junit_bdd_parser.py @@ -328,7 +328,12 @@ def test_parse_bdd_feature_all_pass(self, environment, mock_api_validation_succe assert test_case is not None assert test_case.case_id == 100 assert test_case.result.status_id == 1 # Passed - assert len(test_case.result.custom_testrail_bdd_scenario_results) == 2 + + # Check BDD scenario results are in result_fields dict + bdd_field_name = "custom_testrail_bdd_scenario_results" + assert bdd_field_name in test_case.result.result_fields + assert len(test_case.result.result_fields[bdd_field_name]) == 2 + assert "Total Scenarios: 2" in test_case.result.comment assert "Passed: 2" in test_case.result.comment @@ -351,12 +356,17 @@ def test_parse_bdd_feature_mixed_results(self, environment, mock_api_validation_ assert test_case is not None assert test_case.case_id == 25293 assert test_case.result.status_id == 5 # Failed (fail-fast) - assert len(test_case.result.custom_testrail_bdd_scenario_results) == 3 - # Check step statuses - assert test_case.result.custom_testrail_bdd_scenario_results[0].status_id == 1 # Passed - assert test_case.result.custom_testrail_bdd_scenario_results[1].status_id == 5 # Failed - assert test_case.result.custom_testrail_bdd_scenario_results[2].status_id == 4 # Skipped + # Check BDD scenario results are in result_fields dict + bdd_field_name = "custom_testrail_bdd_scenario_results" + assert bdd_field_name in test_case.result.result_fields + assert len(test_case.result.result_fields[bdd_field_name]) == 3 + + # Check step statuses (results are stored as dicts in result_fields) + bdd_results = test_case.result.result_fields[bdd_field_name] + assert bdd_results[0]["status_id"] == 1 # Passed + assert bdd_results[1]["status_id"] == 5 # Failed + assert bdd_results[2]["status_id"] == 4 # Skipped # Check comment contains summary and failure details assert "Total Scenarios: 3" in test_case.result.comment From 077f28bcfdab3194f623820eee9f2dc12070e969 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 18 Dec 2025 17:38:03 +0800 Subject: [PATCH 23/33] TRCLI-211 Improved caching strategy and implemented N+1 optmizations --- tests/test_api_request_handler.py | 82 +++++++++++- trcli/api/api_cache.py | 205 ++++++++++++++++++++++++++++++ trcli/api/api_request_handler.py | 59 +++++++-- 3 files changed, 330 insertions(+), 16 deletions(-) create mode 100644 trcli/api/api_cache.py diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index 7864410..88877e6 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -30,6 +30,8 @@ def _make_handler(verify=False, custom_json=None): json_string = json.dumps(json.load(file_json)) test_input = from_json(TestRailSuite, json_string) api_request = ApiRequestHandler(environment, api_client, test_input, verify) + # Clear cache for each test to ensure isolation + api_request._cache.clear() return api_request return _make_handler @@ -37,18 +39,27 @@ def _make_handler(verify=False, custom_json=None): @pytest.fixture(scope="function") def api_request_handler(handler_maker): - yield handler_maker() + handler = handler_maker() + yield handler + # Clean up cache after test + handler._cache.clear() @pytest.fixture(scope="function") def api_request_handler_verify(handler_maker): - yield handler_maker(verify=True) + handler = handler_maker(verify=True) + yield handler + # Clean up cache after test + handler._cache.clear() @pytest.fixture(scope="function") def api_request_handler_update_case_json(handler_maker): json_path = Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" - yield handler_maker(custom_json=json_path, verify=False) + handler = handler_maker(custom_json=json_path, verify=False) + yield handler + # Clean up cache after test + handler._cache.clear() class TestApiRequestHandler: @@ -1191,3 +1202,68 @@ def test_upload_attachments_file_not_found(self, api_request_handler: ApiRequest # Call upload_attachments - should not raise exception api_request_handler.upload_attachments(report_results, results, run_id) + + @pytest.mark.api_handler + def test_caching_reduces_api_calls(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test that caching reduces the number of API calls for repeated requests""" + mocked_response = { + "offset": 0, + "limit": 250, + "size": 2, + "_links": {"next": None, "prev": None}, + "projects": [ + {"id": 1, "name": "DataHub", "suite_mode": 1}, + {"id": 2, "name": "Test Project", "suite_mode": 1}, + ], + } + + # Set up mock + mock_get = requests_mock.get(create_url("get_projects"), json=mocked_response) + + # First call should hit the API + result1 = api_request_handler.get_project_data("Test Project") + assert result1.project_id == 2 + assert mock_get.call_count == 1, "First call should hit the API" + + # Second call should use cache + result2 = api_request_handler.get_project_data("Test Project") + assert result2.project_id == 2 + assert mock_get.call_count == 1, "Second call should use cache, not hit API again" + + # Third call with different name should still use cache (same endpoint) + result3 = api_request_handler.get_project_data("DataHub") + assert result3.project_id == 1 + assert mock_get.call_count == 1, "Third call should still use cached data" + + @pytest.mark.api_handler + def test_cache_stats(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test that cache statistics are tracked correctly""" + mocked_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "projects": [{"id": 1, "name": "Test Project", "suite_mode": 1}], + } + + requests_mock.get(create_url("get_projects"), json=mocked_response) + + # Check initial stats + stats = api_request_handler._cache.get_stats() + assert stats["hit_count"] == 0 + assert stats["miss_count"] == 0 + assert stats["size"] == 0 + + # Make first call (cache miss) + api_request_handler.get_project_data("Test Project") + stats = api_request_handler._cache.get_stats() + assert stats["miss_count"] == 1 + assert stats["hit_count"] == 0 + assert stats["size"] == 1 + + # Make second call (cache hit) + api_request_handler.get_project_data("Test Project") + stats = api_request_handler._cache.get_stats() + assert stats["miss_count"] == 1 + assert stats["hit_count"] == 1 + assert stats["hit_rate"] == 50.0 # 1 hit out of 2 total requests diff --git a/trcli/api/api_cache.py b/trcli/api/api_cache.py new file mode 100644 index 0000000..9ab6741 --- /dev/null +++ b/trcli/api/api_cache.py @@ -0,0 +1,205 @@ +""" +API Response Cache Module + +This module provides a session-scoped caching mechanism for API responses +to reduce redundant API calls and improve performance. + +The cache is designed to be: +- Thread-safe +- Session-scoped (per ApiRequestHandler instance) +- Backwards compatible (transparent to existing code) +- Memory-efficient (uses LRU eviction) +""" + +from functools import lru_cache +from typing import Any, Tuple, Optional, Callable +from threading import Lock +from beartype.typing import List, Dict + + +class RequestCache: + """ + Session-scoped cache for API responses. + + This cache stores API responses during a single command execution session + to avoid redundant API calls. Each ApiRequestHandler instance should have + its own cache instance. + + Key features: + - Automatic cache key generation from endpoint and parameters + - LRU eviction policy to prevent unbounded memory growth + - Thread-safe operations + - Simple invalidation mechanism + """ + + def __init__(self, max_size: int = 512): + """ + Initialize the request cache. + + Args: + max_size: Maximum number of cached responses (default: 512) + """ + self.max_size = max_size + self._cache: Dict[str, Any] = {} + self._lock = Lock() + self._hit_count = 0 + self._miss_count = 0 + + def _make_cache_key(self, endpoint: str, params: Optional[Tuple] = None) -> str: + """ + Generate a unique cache key from endpoint and parameters. + + Args: + endpoint: API endpoint (e.g., "get_cases/123") + params: Optional tuple of parameters + + Returns: + String cache key + """ + if params is None: + return endpoint + + # Convert params to a sorted tuple to ensure consistent keys + if isinstance(params, dict): + params_tuple = tuple(sorted(params.items())) + elif isinstance(params, (list, tuple)): + params_tuple = tuple(params) + else: + params_tuple = (params,) + + return f"{endpoint}::{params_tuple}" + + def get(self, endpoint: str, params: Optional[Tuple] = None) -> Optional[Any]: + """ + Retrieve a cached response. + + Args: + endpoint: API endpoint + params: Optional parameters + + Returns: + Cached response or None if not found + """ + cache_key = self._make_cache_key(endpoint, params) + + with self._lock: + if cache_key in self._cache: + self._hit_count += 1 + return self._cache[cache_key] + else: + self._miss_count += 1 + return None + + def set(self, endpoint: str, response: Any, params: Optional[Tuple] = None) -> None: + """ + Store a response in the cache. + + Args: + endpoint: API endpoint + response: Response to cache + params: Optional parameters + """ + cache_key = self._make_cache_key(endpoint, params) + + with self._lock: + # Implement simple LRU: if cache is full, remove oldest entry + if len(self._cache) >= self.max_size: + # Remove the first (oldest) item + first_key = next(iter(self._cache)) + del self._cache[first_key] + + self._cache[cache_key] = response + + def invalidate(self, endpoint: Optional[str] = None, params: Optional[Tuple] = None) -> None: + """ + Invalidate cache entries. + + Args: + endpoint: If provided, invalidate only this endpoint. + If None, clear entire cache. + params: Optional parameters to narrow invalidation + """ + with self._lock: + if endpoint is None: + # Clear entire cache + self._cache.clear() + else: + cache_key = self._make_cache_key(endpoint, params) + if cache_key in self._cache: + del self._cache[cache_key] + + def invalidate_pattern(self, pattern: str) -> None: + """ + Invalidate all cache entries matching a pattern. + + Args: + pattern: String pattern to match against cache keys + """ + with self._lock: + keys_to_delete = [key for key in self._cache if pattern in key] + for key in keys_to_delete: + del self._cache[key] + + def get_or_fetch( + self, + endpoint: str, + fetch_func: Callable[[], Tuple[Any, str]], + params: Optional[Tuple] = None, + force_refresh: bool = False, + ) -> Tuple[Any, str]: + """ + Get cached response or fetch if not cached. + + This is the main method for integrating caching into existing code. + It transparently handles cache hits/misses and maintains the same + return signature as the original fetch functions. + + Args: + endpoint: API endpoint + fetch_func: Function to call if cache miss (should return (data, error)) + params: Optional parameters for cache key + force_refresh: If True, bypass cache and fetch fresh data + + Returns: + Tuple of (data, error_message) matching API call signature + """ + if not force_refresh: + cached = self.get(endpoint, params) + if cached is not None: + # Return cached result + return cached + + # Cache miss or force refresh - fetch fresh data + result = fetch_func() + + # Only cache successful responses (no error) + data, error = result + if not error: + self.set(endpoint, result, params) + + return result + + def get_stats(self) -> Dict[str, int]: + """ + Get cache statistics. + + Returns: + Dictionary with hit_count, miss_count, size, and hit_rate + """ + with self._lock: + total = self._hit_count + self._miss_count + hit_rate = (self._hit_count / total * 100) if total > 0 else 0.0 + + return { + "hit_count": self._hit_count, + "miss_count": self._miss_count, + "size": len(self._cache), + "hit_rate": hit_rate, + } + + def clear(self) -> None: + """Clear all cached data and reset statistics.""" + with self._lock: + self._cache.clear() + self._hit_count = 0 + self._miss_count = 0 diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 7526d0b..cac3379 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -5,6 +5,7 @@ from trcli.api.api_client import APIClient, APIClientResult from trcli.api.api_response_verify import ApiResponseVerify +from trcli.api.api_cache import RequestCache from trcli.cli import Environment from trcli.constants import ( ProjectErrors, @@ -45,6 +46,8 @@ def __init__( ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) + # Initialize session-scoped cache for API responses + self._cache = RequestCache(max_size=512) # BDD case cache for feature name matching (shared by CucumberParser and JunitParser) # Structure: {"{project_id}_{suite_id}": {normalized_name: [case_dict, case_dict, ...]}} @@ -1057,36 +1060,66 @@ def __cancel_running_futures(self, futures, action_string): def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ - Get all cases from all pages + Get all cases from all pages (with caching) """ - if suite_id is None: - return self.__get_all_entities("cases", f"get_cases/{project_id}") - else: - return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}") + cache_key = f"get_cases/{project_id}" + params = (project_id, suite_id) + + def fetch(): + if suite_id is None: + return self.__get_all_entities("cases", f"get_cases/{project_id}", entities=[]) + else: + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ - Get all sections from all pages + Get all sections from all pages (with caching) """ - return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}") + cache_key = f"get_sections/{project_id}" + params = (project_id, suite_id) + + def fetch(): + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ - Get all tests from all pages + Get all tests from all pages (with caching) """ - return self.__get_all_entities("tests", f"get_tests/{run_id}") + cache_key = f"get_tests/{run_id}" + params = (run_id,) + + def fetch(): + return self.__get_all_entities("tests", f"get_tests/{run_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_projects(self) -> Tuple[List[dict], str]: """ - Get all projects from all pages + Get all projects from all pages (with caching) """ - return self.__get_all_entities("projects", f"get_projects") + cache_key = "get_projects" + params = None + + def fetch(): + return self.__get_all_entities("projects", f"get_projects", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ - Get all suites from all pages + Get all suites from all pages (with caching) """ - return self.__get_all_entities("suites", f"get_suites/{project_id}") + cache_key = f"get_suites/{project_id}" + params = (project_id,) + + def fetch(): + return self.__get_all_entities("suites", f"get_suites/{project_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ From cf830d6d441211495a4a8162c9fcc67c1b053829 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 23 Dec 2025 18:01:45 +0800 Subject: [PATCH 24/33] TRCLI-213 Refactor monolithic class api_request_handler into separate handlers by feature and responsibilities --- trcli/api/api_request_handler.py | 1775 ++---------------------------- trcli/api/api_utils.py | 285 +++++ trcli/api/bdd_handler.py | 224 ++++ trcli/api/case_handler.py | 217 ++++ trcli/api/case_matcher.py | 249 +++++ trcli/api/label_manager.py | 644 +++++++++++ trcli/api/reference_manager.py | 134 +++ trcli/api/result_handler.py | 178 +++ trcli/api/run_handler.py | 292 +++++ trcli/api/section_handler.py | 140 +++ trcli/api/suite_handler.py | 163 +++ 11 files changed, 2638 insertions(+), 1663 deletions(-) create mode 100644 trcli/api/api_utils.py create mode 100644 trcli/api/bdd_handler.py create mode 100644 trcli/api/case_handler.py create mode 100644 trcli/api/case_matcher.py create mode 100644 trcli/api/label_manager.py create mode 100644 trcli/api/reference_manager.py create mode 100644 trcli/api/result_handler.py create mode 100644 trcli/api/run_handler.py create mode 100644 trcli/api/section_handler.py create mode 100644 trcli/api/suite_handler.py diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index cac3379..b2e5f15 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,4 +1,4 @@ -import html, json, os +import os import time from concurrent.futures import ThreadPoolExecutor, as_completed from beartype.typing import List, Union, Tuple, Dict, Optional @@ -6,6 +6,15 @@ from trcli.api.api_client import APIClient, APIClientResult from trcli.api.api_response_verify import ApiResponseVerify from trcli.api.api_cache import RequestCache +from trcli.api.label_manager import LabelManager +from trcli.api.reference_manager import ReferenceManager +from trcli.api.case_matcher import CaseMatcherFactory +from trcli.api.suite_handler import SuiteHandler +from trcli.api.section_handler import SectionHandler +from trcli.api.result_handler import ResultHandler +from trcli.api.run_handler import RunHandler +from trcli.api.bdd_handler import BddHandler +from trcli.api.case_handler import CaseHandler from trcli.cli import Environment from trcli.constants import ( ProjectErrors, @@ -48,6 +57,33 @@ def __init__( self.response_verifier = ApiResponseVerify(verify) # Initialize session-scoped cache for API responses self._cache = RequestCache(max_size=512) + # Initialize specialized managers + self.label_manager = LabelManager(api_client, environment) + self.reference_manager = ReferenceManager(api_client, environment) + self.suite_handler = SuiteHandler( + api_client, environment, self.data_provider, get_all_suites_callback=self.__get_all_suites + ) + self.section_handler = SectionHandler( + api_client, environment, self.data_provider, get_all_sections_callback=self.__get_all_sections + ) + self.result_handler = ResultHandler( + api_client, + environment, + self.data_provider, + get_all_tests_in_run_callback=self.__get_all_tests_in_run, + handle_futures_callback=self.handle_futures, + ) + self.run_handler = RunHandler( + api_client, environment, self.data_provider, get_all_tests_in_run_callback=self.__get_all_tests_in_run + ) + self.bdd_handler = BddHandler(api_client, environment) + self.case_handler = CaseHandler( + api_client, + environment, + self.data_provider, + handle_futures_callback=self.handle_futures, + retrieve_results_callback=ApiRequestHandler.retrieve_results_after_cancelling, + ) # BDD case cache for feature name matching (shared by CucumberParser and JunitParser) # Structure: {"{project_id}_{suite_id}": {normalized_name: [case_dict, case_dict, ...]}} @@ -78,11 +114,13 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: return FAULT_MAPPING["automation_id_unavailable"] if not automation_id_field["configs"]: self._active_automation_id_field = automation_id_field["system_name"] + self.case_handler._active_automation_id_field = automation_id_field["system_name"] return None for config in automation_id_field["configs"]: context = config["context"] if context["is_global"] or project_id in context["project_ids"]: self._active_automation_id_field = automation_id_field["system_name"] + self.case_handler._active_automation_id_field = automation_id_field["system_name"] return None return FAULT_MAPPING["automation_id_unavailable"] else: @@ -136,334 +174,50 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project ) def check_suite_id(self, project_id: int) -> Tuple[bool, str]: - """ - Check if suite from DataProvider exist using get_suites endpoint - :project_id: project id - :returns: True if exists in suites. False if not. - """ suite_id = self.suites_data_from_provider.suite_id - suites_data, error = self.__get_all_suites(project_id) - if not error: - available_suites = [suite for suite in suites_data if suite["id"] == suite_id] - return ( - (True, "") - if len(available_suites) > 0 - else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ) - else: - return None, suites_data.error_message + return self.suite_handler.check_suite_id(project_id, suite_id) def resolve_suite_id_using_name(self, project_id: int) -> Tuple[int, str]: - """Get suite ID matching suite name on data provider or returns -1 if unable to match any suite. - :arg project_id: project id - :returns: tuple with id of the suite and error message""" - suite_id = -1 suite_name = self.suites_data_from_provider.name - suites_data, error = self.__get_all_suites(project_id) - if not error: - for suite in suites_data: - if suite["name"] == suite_name: - suite_id = suite["id"] - self.data_provider.update_data([{"suite_id": suite["id"], "name": suite["name"]}]) - break - return ( - (suite_id, "") - if suite_id != -1 - else (-1, FAULT_MAPPING["missing_suite_by_name"].format(suite_name=suite_name)) - ) - else: - return -1, error + return self.suite_handler.resolve_suite_id_using_name(project_id, suite_name) def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: - """Get suite IDs for requested project_id. - : project_id: project id - : returns: tuple with list of suite ids and error string""" - available_suites = [] - returned_resources = [] - suites_data, error = self.__get_all_suites(project_id) - if not error: - for suite in suites_data: - available_suites.append(suite["id"]) - returned_resources.append( - { - "suite_id": suite["id"], - "name": suite["name"], - } - ) - if returned_resources: - self.data_provider.update_data(suite_data=returned_resources) - else: - print("Update skipped") - return ( - (available_suites, "") - if len(available_suites) > 0 - else ([], FAULT_MAPPING["no_suites_found"].format(project_id=project_id)) - ) - else: - return [], error + return self.suite_handler.get_suite_ids(project_id) def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: - """ - Adds suites that doesn't have ID's in DataProvider. - Runs update_data in data_provider for successfully created resources. - :project_id: project_id - :returns: Tuple with list of dict created resources and error string. - """ - add_suite_data = self.data_provider.add_suites_data() - responses = [] - error_message = "" - for body in add_suite_data: - response = self.client.send_post(f"add_suite/{project_id}", body) - if not response.error_message: - responses.append(response) - if not self.response_verifier.verify_returned_data(body, response.response_text): - responses.append(response) - error_message = FAULT_MAPPING["data_verification_error"] - break - else: - error_message = response.error_message - break - - returned_resources = [ - { - "suite_id": response.response_text["id"], - "name": response.response_text["name"], - } - for response in responses - ] - ( - self.data_provider.update_data(suite_data=returned_resources) - if len(returned_resources) > 0 - else "Update skipped" - ) - return returned_resources, error_message + return self.suite_handler.add_suites(project_id, verify_callback=self.response_verifier.verify_returned_data) def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: - """ - Check what section id's are missing in DataProvider. - :project_id: project_id - :returns: Tuple with list missing section ID and error string. - """ suite_id = self.suites_data_from_provider.suite_id - returned_sections, error_message = self.__get_all_sections(project_id, suite_id) - if not error_message: - missing_test_sections = False - sections_by_id = {section["id"]: section for section in returned_sections} - sections_by_name = {section["name"]: section for section in returned_sections} - section_data = [] - for section in self.suites_data_from_provider.testsections: - if self.environment.section_id: - if section.section_id in sections_by_id.keys(): - section_json = sections_by_id[section.section_id] - section_data.append( - { - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - } - ) - else: - missing_test_sections = True - if section.name in sections_by_name.keys(): - section_json = sections_by_name[section.name] - section_data.append( - { - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - } - ) - else: - missing_test_sections = True - self.data_provider.update_data(section_data=section_data) - return missing_test_sections, error_message - else: - return False, error_message + return self.section_handler.check_missing_section_ids(project_id, suite_id, self.suites_data_from_provider) def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: - """ - Add sections that doesn't have ID in DataProvider. - Runs update_data in data_provider for successfully created resources. - :project_id: project_id - :returns: Tuple with list of dict created resources and error string. - """ - add_sections_data = self.data_provider.add_sections_data() - responses = [] - error_message = "" - for body in add_sections_data: - response = self.client.send_post(f"add_section/{project_id}", body) - if not response.error_message: - responses.append(response) - if not self.response_verifier.verify_returned_data(body, response.response_text): - responses.append(response) - error_message = FAULT_MAPPING["data_verification_error"] - break - else: - error_message = response.error_message - break - returned_resources = [ - { - "section_id": response.response_text["id"], - "suite_id": response.response_text["suite_id"], - "name": response.response_text["name"], - } - for response in responses - ] - ( - self.data_provider.update_data(section_data=returned_resources) - if len(returned_resources) > 0 - else "Update skipped" + return self.section_handler.add_sections( + project_id, verify_callback=self.response_verifier.verify_returned_data ) - return returned_resources, error_message def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: """ - Check what test cases id's are missing in DataProvider. + Check what test cases id's are missing in DataProvider using the configured matcher strategy. :project_id: project_id :returns: Tuple with list test case ID missing and error string. """ - missing_cases_number = 0 suite_id = self.suites_data_from_provider.suite_id - # Performance optimization: Only fetch all cases if using AUTO matcher - # NAME/PROPERTY matchers can validate case IDs individually - if self.environment.case_matcher == MatchersParser.AUTO: - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message - - if self.environment.case_matcher == MatchersParser.AUTO: - test_cases_by_aut_id = {} - for case in returned_cases: - aut_case_id = case.get(OLD_SYSTEM_NAME_AUTOMATION_ID) or case.get(UPDATED_SYSTEM_NAME_AUTOMATION_ID) - if aut_case_id: - aut_case_id = html.unescape(aut_case_id) - test_cases_by_aut_id[aut_case_id] = case - test_case_data = [] - for section in self.suites_data_from_provider.testsections: - for test_case in section.testcases: - aut_id = test_case.custom_automation_id - if aut_id in test_cases_by_aut_id.keys(): - case = test_cases_by_aut_id[aut_id] - test_case_data.append( - { - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, - } - ) - else: - missing_cases_number += 1 - self.data_provider.update_data(case_data=test_case_data) - if missing_cases_number: - self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") - else: - # For NAME or PROPERTY matcher we validate case IDs - nonexistent_ids = [] - case_ids_to_validate = set() - - # Collect all unique case IDs that need validation - for section in self.suites_data_from_provider.testsections: - for test_case in section.testcases: - if not test_case.case_id: - missing_cases_number += 1 - else: - case_ids_to_validate.add(int(test_case.case_id)) - - total_tests_in_report = missing_cases_number + len(case_ids_to_validate) - - if missing_cases_number: - self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") - - # Smart validation strategy based on report size - # Threshold: 1000 cases (same as skip validation threshold for consistency) - if case_ids_to_validate: - # Skip validation for large reports with all IDs (most efficient) - if missing_cases_number == 0 and total_tests_in_report >= 1000: - # All tests have IDs and report is large: Skip validation (trust IDs) - self.environment.log( - f"Skipping validation of {len(case_ids_to_validate)} case IDs " - f"(all tests have IDs, trusting they exist). " - f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." - ) - nonexistent_ids = [] - - # Fetch all for large reports with missing IDs - elif total_tests_in_report >= 1000: - # Large report (>=1000 cases) with some missing IDs: Fetch all cases and validate locally - # This is more efficient than individual validation for large batches - self.environment.log( - f"Large report detected ({total_tests_in_report} cases). " - f"Fetching all cases from TestRail for efficient validation..." - ) - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message - - # Build lookup dictionary from fetched cases - all_case_ids = {case["id"] for case in returned_cases} - - # Validate locally (O(1) lookup) - nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in all_case_ids] - - if nonexistent_ids: - self.environment.elog( - f"Nonexistent case IDs found in the report file: {nonexistent_ids[:20]}" - f"{' ...' if len(nonexistent_ids) > 20 else ''}" - ) - return False, "Case IDs not in TestRail project or suite were detected in the report file." + # Create appropriate matcher based on configuration (Strategy pattern) + matcher = CaseMatcherFactory.create_matcher(self.environment.case_matcher, self.environment, self.data_provider) - # Individual validation for small reports - else: - # Small report (<1000 cases): Use individual validation - # This is more efficient for small batches - self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") - validated_ids = self.__validate_case_ids_exist(suite_id, list(case_ids_to_validate)) - nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] - - if nonexistent_ids: - self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") - return False, "Case IDs not in TestRail project or suite were detected in the report file." - - return missing_cases_number > 0, "" + # Delegate to the matcher + return matcher.check_missing_cases( + project_id, + suite_id, + self.suites_data_from_provider, + get_all_cases_callback=self.__get_all_cases, + validate_case_ids_callback=self.__validate_case_ids_exist, + ) def add_cases(self) -> Tuple[List[dict], str]: - """ - Add cases that doesn't have ID in DataProvider. - Runs update_data in data_provider for successfully created resources. - :returns: Tuple with list of dict created resources and error string. - """ - add_case_data = self.data_provider.add_cases() - responses = [] - error_message = "" - with self.environment.get_progress_bar( - results_amount=len(add_case_data), prefix="Adding test cases" - ) as progress_bar: - with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_CASE) as executor: - futures = { - executor.submit( - self._add_case_and_update_data, - body, - ): body - for body in add_case_data - } - responses, error_message = self.handle_futures( - futures=futures, action_string="add_case", progress_bar=progress_bar - ) - if error_message: - # When error_message is present we cannot be sure that responses contains all added items. - # Iterate through futures to get all responses from done tasks (not cancelled) - responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) - returned_resources = [ - { - "case_id": response.response_text["id"], - "section_id": response.response_text["section_id"], - "title": response.response_text["title"], - } - for response in responses - ] - return returned_resources, error_message + return self.case_handler.add_cases() def add_run( self, @@ -479,39 +233,19 @@ def add_run( refs: str = None, case_ids: List[int] = None, ) -> Tuple[int, str]: - """ - Creates a new test run. - :project_id: project_id - :run_name: run name - :returns: Tuple with run id and error string. - """ - add_run_data = self.data_provider.add_run( + return self.run_handler.add_run( + project_id, run_name, - case_ids=case_ids, - start_date=start_date, - end_date=end_date, - milestone_id=milestone_id, - assigned_to_id=assigned_to_id, - include_all=include_all, - refs=refs, + milestone_id, + start_date, + end_date, + plan_id, + config_ids, + assigned_to_id, + include_all, + refs, + case_ids, ) - if not plan_id: - response = self.client.send_post(f"add_run/{project_id}", add_run_data) - run_id = response.response_text.get("id") - else: - if config_ids: - add_run_data["config_ids"] = config_ids - entry_data = { - "name": add_run_data["name"], - "suite_id": add_run_data["suite_id"], - "config_ids": config_ids, - "runs": [add_run_data], - } - else: - entry_data = add_run_data - response = self.client.send_post(f"add_plan_entry/{plan_id}", entry_data) - run_id = response.response_text["runs"][0]["id"] - return run_id, response.error_message def update_run( self, @@ -523,371 +257,24 @@ def update_run( refs: str = None, refs_action: str = "add", ) -> Tuple[dict, str]: - """ - Updates an existing run - :run_id: run id - :run_name: run name - :refs: references to manage - :refs_action: action to perform ('add', 'update', 'delete') - :returns: Tuple with run and error string. - """ - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.error_message: - return None, run_response.error_message - - existing_description = run_response.response_text.get("description", "") - existing_refs = run_response.response_text.get("refs", "") - - add_run_data = self.data_provider.add_run( - run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id - ) - add_run_data["description"] = existing_description # Retain the current description - - # Handle references based on action - if refs is not None: - updated_refs = self._manage_references(existing_refs, refs, refs_action) - add_run_data["refs"] = updated_refs - else: - add_run_data["refs"] = existing_refs # Keep existing refs if none provided - - existing_include_all = run_response.response_text.get("include_all", False) - add_run_data["include_all"] = existing_include_all - - if not existing_include_all: - # Only manage explicit case_ids when include_all=False - run_tests, error_message = self.__get_all_tests_in_run(run_id) - if error_message: - return None, f"Failed to get tests in run: {error_message}" - run_case_ids = [test["case_id"] for test in run_tests] - report_case_ids = add_run_data["case_ids"] - joint_case_ids = list(set(report_case_ids + run_case_ids)) - add_run_data["case_ids"] = joint_case_ids - else: - # include_all=True: TestRail includes all suite cases automatically - # Do NOT send case_ids array (TestRail ignores it anyway) - add_run_data.pop("case_ids", None) - - plan_id = run_response.response_text["plan_id"] - config_ids = run_response.response_text["config_ids"] - if not plan_id: - update_response = self.client.send_post(f"update_run/{run_id}", add_run_data) - elif plan_id and config_ids: - update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", add_run_data) - else: - response = self.client.send_get(f"get_plan/{plan_id}") - entry_id = next( - ( - run["entry_id"] - for entry in response.response_text["entries"] - for run in entry["runs"] - if run["id"] == run_id - ), - None, - ) - update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", add_run_data) - run_response = self.client.send_get(f"get_run/{run_id}") - return run_response.response_text, update_response.error_message + return self.run_handler.update_run(run_id, run_name, start_date, end_date, milestone_id, refs, refs_action) def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> str: - """ - Manage references based on the specified action. - :existing_refs: current references in the run - :new_refs: new references to process - :action: 'add', 'update', or 'delete' - :returns: updated references string - """ - if not existing_refs: - existing_refs = "" - - if action == "update": - # Replace all references with new ones - return new_refs - elif action == "delete": - if not new_refs: - # Delete all references - return "" - else: - # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(",") if ref.strip()] - updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ",".join(updated_list) - else: # action == 'add' (default) - # Add new references to existing ones - if not existing_refs: - return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(",") if ref.strip()] - # Avoid duplicates - combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ",".join(combined_list) + return self.run_handler._manage_references(existing_refs, new_refs, action) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: - """ - Append references to a test run, avoiding duplicates. - :param run_id: ID of the test run - :param references: List of references to append - :returns: Tuple with (run_data, added_refs, skipped_refs, error_message) - """ - # Get current run data - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.error_message: - return None, [], [], run_response.error_message - - existing_refs = run_response.response_text.get("refs", "") or "" - - # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] - # Deduplicate input references - new_list = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - new_list.append(ref_clean) - seen.add(ref_clean) - - # Determine which references are new vs duplicates - added_refs = [ref for ref in new_list if ref not in existing_list] - skipped_refs = [ref for ref in new_list if ref in existing_list] - - # If no new references to add, return current state - if not added_refs: - return run_response.response_text, added_refs, skipped_refs, None - - # Combine references - combined_list = existing_list + added_refs - combined_refs = ",".join(combined_list) - - if len(combined_refs) > 250: - return ( - None, - [], - [], - f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit", - ) - - update_data = {"refs": combined_refs} - - # Determine the correct API endpoint based on plan membership - plan_id = run_response.response_text.get("plan_id") - config_ids = run_response.response_text.get("config_ids") - - if not plan_id: - # Standalone run - update_response = self.client.send_post(f"update_run/{run_id}", update_data) - elif plan_id and config_ids: - # Run in plan with configurations - update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", update_data) - else: - # Run in plan without configurations - need to use plan entry endpoint - plan_response = self.client.send_get(f"get_plan/{plan_id}") - if plan_response.error_message: - return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - - # Find the entry_id for this run - entry_id = None - for entry in plan_response.response_text.get("entries", []): - for run in entry.get("runs", []): - if run["id"] == run_id: - entry_id = entry["id"] - break - if entry_id: - break - - if not entry_id: - return None, [], [], f"Could not find plan entry for run {run_id}" - - update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - - if update_response.error_message: - return None, [], [], update_response.error_message - - updated_run_response = self.client.send_get(f"get_run/{run_id}") - return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message + return self.run_handler.append_run_references(run_id, references) def update_existing_case_references( self, case_id: int, junit_refs: str, strategy: str = "append" ) -> Tuple[bool, str, List[str], List[str]]: - """ - Update existing case references with values from JUnit properties. - :param case_id: ID of the test case - :param junit_refs: References from JUnit testrail_case_field property - :param strategy: 'append' or 'replace' - :returns: Tuple with (success, error_message, added_refs, skipped_refs) - """ - if not junit_refs or not junit_refs.strip(): - return True, None, [], [] # No references to process - - # Parse and validate JUnit references, deduplicating input - junit_ref_list = [] - seen = set() - for ref in junit_refs.split(","): - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - junit_ref_list.append(ref_clean) - seen.add(ref_clean) - - if not junit_ref_list: - return False, "No valid references found in JUnit property", [], [] - - # Get current case data - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.error_message: - return False, case_response.error_message, [], [] - - existing_refs = case_response.response_text.get("refs", "") or "" - - if strategy == "replace": - # Replace strategy: use JUnit refs as-is - new_refs = ",".join(junit_ref_list) - added_refs = junit_ref_list - skipped_refs = [] - else: - # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = ( - [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] - ) - - # Determine which references are new vs duplicates - added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] - skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - - # If no new references to add, return current state - if not added_refs: - return True, None, added_refs, skipped_refs - - # Combine references - combined_list = existing_ref_list + added_refs - new_refs = ",".join(combined_list) - - # Validate 2000 character limit for test case references - if len(new_refs) > 2000: - return ( - False, - f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", - [], - [], - ) - - # Update the case - update_data = {"refs": new_refs} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.error_message: - return False, update_response.error_message, [], [] + return self.case_handler.update_existing_case_references(case_id, junit_refs, strategy) - return True, None, added_refs, skipped_refs - - def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """Getting test result id and upload attachments for it.""" - tests_in_run, error = self.__get_all_tests_in_run(run_id) - if not error: - failed_uploads = [] - for report_result in report_results: - case_id = report_result["case_id"] - test_id = next((test["id"] for test in tests_in_run if test["case_id"] == case_id), None) - result_id = next((result["id"] for result in results if result["test_id"] == test_id), None) - for file_path in report_result.get("attachments"): - try: - with open(file_path, "rb") as file: - response = self.client.send_post( - f"add_attachment_to_result/{result_id}", files={"attachment": file} - ) - - # Check if upload was successful - if response.status_code != 200: - file_name = os.path.basename(file_path) - - # Handle 413 Request Entity Too Large specifically - if response.status_code == 413: - error_msg = FAULT_MAPPING["attachment_too_large"].format( - file_name=file_name, case_id=case_id - ) - self.environment.elog(error_msg) - failed_uploads.append(f"{file_name} (case {case_id})") - else: - # Handle other HTTP errors - error_msg = FAULT_MAPPING["attachment_upload_failed"].format( - file_path=file_name, - case_id=case_id, - error_message=response.error_message or f"HTTP {response.status_code}", - ) - self.environment.elog(error_msg) - failed_uploads.append(f"{file_name} (case {case_id})") - except FileNotFoundError: - self.environment.elog(f"Attachment file not found: {file_path} (case {case_id})") - failed_uploads.append(f"{file_path} (case {case_id})") - except Exception as ex: - file_name = os.path.basename(file_path) if os.path.exists(file_path) else file_path - self.environment.elog(f"Error uploading attachment '{file_name}' for case {case_id}: {ex}") - failed_uploads.append(f"{file_name} (case {case_id})") - - # Provide a summary if there were failed uploads - if failed_uploads: - self.environment.log(f"\nWarning: {len(failed_uploads)} attachment(s) failed to upload.") - else: - self.environment.elog(f"Unable to upload attachments due to API request error: {error}") + def upload_attachments(self, report_results: List[Dict], results: List[Dict], run_id: int): + return self.result_handler.upload_attachments(report_results, results, run_id) def add_results(self, run_id: int) -> Tuple[List, str, int]: - """ - Adds one or more new test results. - :run_id: run id - :returns: Tuple with dict created resources, error string, and results count. - """ - responses = [] - error_message = "" - # Get pre-validated user IDs if available - user_ids = getattr(self.environment, "_validated_user_ids", []) - - add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) - # Get assigned count from data provider - assigned_count = getattr(self.data_provider, "_assigned_count", 0) - - results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) - - with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: - with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: - futures = { - executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body - for body in add_results_data_chunks - } - responses, error_message = self.handle_futures( - futures=futures, - action_string="add_results", - progress_bar=progress_bar, - ) - if error_message: - # When error_message is present we cannot be sure that responses contains all added items. - # Iterate through futures to get all responses from done tasks (not cancelled) - responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) - responses = [response.response_text for response in responses] - results = [result for results_list in responses for result in results_list] - report_results_w_attachments = [] - for results_data_chunk in add_results_data_chunks: - for test_result in results_data_chunk["results"]: - if test_result["attachments"]: - report_results_w_attachments.append(test_result) - if report_results_w_attachments: - attachments_count = 0 - for result in report_results_w_attachments: - attachments_count += len(result["attachments"]) - self.environment.log( - f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." - ) - self.upload_attachments(report_results_w_attachments, results, run_id) - else: - self.environment.log(f"No attachments found to upload.") - - # Log assignment results if assignment was performed - if user_ids: - total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) - if assigned_count > 0: - self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") - else: - self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + return self.result_handler.add_results(run_id) def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -923,69 +310,27 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st return responses, error_message def close_run(self, run_id: int) -> Tuple[dict, str]: - """ - Closes an existing test run and archives its tests & results. - :run_id: run id - :returns: Tuple with dict created resources and error string. - """ - body = {"run_id": run_id} - response = self.client.send_post(f"close_run/{run_id}", body) - return response.response_text, response.error_message + return self.run_handler.close_run(run_id) def delete_suite(self, suite_id: int) -> Tuple[dict, str]: - """ - Delete suite given suite id - :suite_id: suite id - :returns: Tuple with dict created resources and error string. - """ - response = self.client.send_post(f"delete_suite/{suite_id}", payload={}) - return response.response_text, response.error_message + return self.suite_handler.delete_suite(suite_id) def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: - """ - Delete section given add_sections response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - responses = [] - error_message = "" - for section in added_sections: - response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) - if not response.error_message: - responses.append(response.response_text) - else: - error_message = response.error_message - break - return responses, error_message + return self.section_handler.delete_sections(added_sections) def delete_cases(self, suite_id: int, added_cases: List[Dict]) -> Tuple[Dict, str]: - """ - Delete cases given add_cases response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - body = {"case_ids": [case["case_id"] for case in added_cases]} - response = self.client.send_post(f"delete_cases/{suite_id}", payload=body) - return response.response_text, response.error_message + return self.case_handler.delete_cases(suite_id, added_cases) def delete_run(self, run_id) -> Tuple[dict, str]: - """ - Delete run given add_run response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - response = self.client.send_post(f"delete_run/{run_id}", payload={}) - return response.response_text, response.error_message + return self.run_handler.delete_run(run_id) @staticmethod def retrieve_results_after_cancelling(futures) -> list: - responses = [] - for future in as_completed(futures): - if not future.cancelled(): - response = future.result() - if not response.error_message: - responses.append(response) - return responses + """ + Retrieve results from futures after cancellation has been triggered. + Delegated to ResultHandler for backward compatibility. + """ + return ResultHandler.retrieve_results_after_cancelling(futures) def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ @@ -1040,18 +385,7 @@ def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: return None, f"API error (status {response.status_code}) when validating user: {email}" def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: - case_body = case.to_dict() - active_field = getattr(self, "_active_automation_id_field", None) - if active_field == UPDATED_SYSTEM_NAME_AUTOMATION_ID and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: - case_body[UPDATED_SYSTEM_NAME_AUTOMATION_ID] = case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) - if self.environment.case_matcher != MatchersParser.AUTO and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: - case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) - response = self.client.send_post(f"add_case/{case_body.pop('section_id')}", case_body) - if response.status_code == 200: - case.case_id = response.response_text["id"] - case.result.case_id = response.response_text["id"] - case.section_id = response.response_text["section_id"] - return response + return self.case_handler._add_case_and_update_data(case) def __cancel_running_futures(self, futures, action_string): self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") @@ -1205,7 +539,6 @@ def __get_all_entities_parallel(self, entity: str, link: str) -> Tuple[List[Dict next_link = response.response_text["_links"]["next"] # Extract offset/limit from the link to calculate total pages - import re from urllib.parse import urlparse, parse_qs # Parse the next link to get offset and limit @@ -1434,799 +767,67 @@ def check_case_exists(case_id): return valid_ids - # Label management methods + # Label management methods (delegated to LabelManager for backward compatibility) def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: - """ - Add a new label to the project - :param project_id: ID of the project - :param title: Title of the label (max 20 characters) - :returns: Tuple with created label data and error string - """ - payload = {"title": title} - response = self.client.send_post(f"add_label/{project_id}", payload=payload) - return response.response_text, response.error_message + return self.label_manager.add_label(project_id, title) def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: - """ - Update an existing label - :param label_id: ID of the label to update - :param project_id: ID of the project - :param title: New title for the label (max 20 characters) - :returns: Tuple with updated label data and error string - """ - payload = {"project_id": project_id, "title": title} - response = self.client.send_post(f"update_label/{label_id}", payload=payload) - return response.response_text, response.error_message + return self.label_manager.update_label(label_id, project_id, title) def get_label(self, label_id: int) -> Tuple[dict, str]: - """ - Get a specific label by ID - :param label_id: ID of the label to retrieve - :returns: Tuple with label data and error string - """ - response = self.client.send_get(f"get_label/{label_id}") - return response.response_text, response.error_message + return self.label_manager.get_label(label_id) def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: - """ - Get all labels for a project with pagination - :param project_id: ID of the project - :param offset: Offset for pagination - :param limit: Limit for pagination - :returns: Tuple with labels data (including pagination info) and error string - """ - params = [] - if offset > 0: - params.append(f"offset={offset}") - if limit != 250: - params.append(f"limit={limit}") - - url = f"get_labels/{project_id}" - if params: - url += "&" + "&".join(params) - - response = self.client.send_get(url) - return response.response_text, response.error_message + return self.label_manager.get_labels(project_id, offset, limit) def delete_label(self, label_id: int) -> Tuple[bool, str]: - """ - Delete a single label - :param label_id: ID of the label to delete - :returns: Tuple with success status and error string - """ - response = self.client.send_post(f"delete_label/{label_id}") - success = response.status_code == 200 - return success, response.error_message + return self.label_manager.delete_label(label_id) def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: - """ - Delete multiple labels - :param label_ids: List of label IDs to delete - :returns: Tuple with success status and error string - """ - payload = {"label_ids": label_ids} - response = self.client.send_post("delete_labels", payload=payload) - success = response.status_code == 200 - return success, response.error_message + return self.label_manager.delete_labels(label_ids) def add_labels_to_cases( self, case_ids: List[int], title: str, project_id: int, suite_id: int = None ) -> Tuple[dict, str]: - """ - Add a label to multiple test cases - - :param case_ids: List of test case IDs - :param title: Label title (max 20 characters) - :param project_id: Project ID for validation - :param suite_id: Suite ID (optional) - :returns: Tuple with response data and error string - """ - # Initialize results structure - results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} - - # Check if project is multi-suite by getting all cases without suite_id - all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) - if error_message: - return results, error_message - - # Check if project has multiple suites - suite_ids = set() - for case in all_cases_no_suite: - if "suite_id" in case and case["suite_id"]: - suite_ids.add(case["suite_id"]) - - # If project has multiple suites and no suite_id provided, require it - if len(suite_ids) > 1 and suite_id is None: - return results, "This project is multisuite, suite id is required" - - # Get all cases to validate that the provided case IDs exist - all_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return results, error_message - - # Create a set of existing case IDs for quick lookup - existing_case_ids = {case["id"] for case in all_cases} - - # Validate case IDs and separate valid from invalid ones - invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] - valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - - # Record invalid case IDs - for case_id in invalid_case_ids: - results["case_not_found"].append(case_id) - - # If no valid case IDs, return early - if not valid_case_ids: - return results, "" - - # Check if label exists or create it - existing_labels, error_message = self.get_labels(project_id) - if error_message: - return results, error_message - - # Find existing label with the same title - label_id = None - for label in existing_labels.get("labels", []): - if label.get("title") == title: - label_id = label.get("id") - break - - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get("label", label_data) - label_id = label_info.get("id") - - # Collect case data and validate constraints - cases_to_update = [] - for case_id in valid_case_ids: - # Get current case to check existing labels - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - results["failed_cases"].append( - {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} - ) - continue - - case_data = case_response.response_text - current_labels = case_data.get("labels", []) - - # Check if label already exists on this case - if any(label.get("id") == label_id for label in current_labels): - results["successful_cases"].append( - {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} - ) - continue - - # Check maximum labels limit (10) - if len(current_labels) >= 10: - results["max_labels_reached"].append(case_id) - continue - - # Prepare case for update - existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] - updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) - - # Update cases using appropriate endpoint - if len(cases_to_update) == 1: - # Single case: use update_case/{case_id} - case_info = cases_to_update[0] - case_update_data = {"labels": case_info["labels"]} - - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - - if update_response.status_code == 200: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - results["failed_cases"].append( - {"case_id": case_info["case_id"], "error": update_response.error_message} - ) - elif len(cases_to_update) > 1: - # Multiple cases: use update_cases/{suite_id} - # Need to determine suite_id from the cases - case_suite_id = suite_id - if not case_suite_id: - # Get suite_id from the first case if not provided - first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get("suite_id") if first_case else None - - if not case_suite_id: - # Fall back to individual updates if no suite_id available - for case_info in cases_to_update: - case_update_data = {"labels": case_info["labels"]} - update_response = self.client.send_post( - f"update_case/{case_info['case_id']}", payload=case_update_data - ) - - if update_response.status_code == 200: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - results["failed_cases"].append( - {"case_id": case_info["case_id"], "error": update_response.error_message} - ) - else: - # Batch update using update_cases/{suite_id} - batch_update_data = { - "case_ids": [case_info["case_id"] for case_info in cases_to_update], - "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases - } - - batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - - if batch_response.status_code == 200: - for case_info in cases_to_update: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - # If batch update fails, fall back to individual updates - for case_info in cases_to_update: - case_update_data = {"labels": case_info["labels"]} - update_response = self.client.send_post( - f"update_case/{case_info['case_id']}", payload=case_update_data - ) - - if update_response.status_code == 200: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - results["failed_cases"].append( - {"case_id": case_info["case_id"], "error": update_response.error_message} - ) - - return results, "" + return self.label_manager.add_labels_to_cases( + case_ids, title, project_id, suite_id, get_all_cases_callback=self.__get_all_cases + ) def get_cases_by_label( self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None ) -> Tuple[List[dict], str]: - """ - Get test cases filtered by label ID or title - - :param project_id: Project ID - :param suite_id: Suite ID (optional) - :param label_ids: List of label IDs to filter by - :param label_title: Label title to filter by - :returns: Tuple with list of matching cases and error string - """ - # Get all cases first - all_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return [], error_message - - # If filtering by title, first get the label ID - target_label_ids = label_ids or [] - if label_title and not target_label_ids: - labels_data, error_message = self.get_labels(project_id) - if error_message: - return [], error_message - - for label in labels_data.get("labels", []): - if label.get("title") == label_title: - target_label_ids.append(label.get("id")) - - if not target_label_ids: - return [], "" # No label found is a valid case with 0 results - - # Filter cases that have any of the target labels - matching_cases = [] - for case in all_cases: - case_labels = case.get("labels", []) - case_label_ids = [label.get("id") for label in case_labels] - - # Check if any of the target label IDs are present in this case - if any(label_id in case_label_ids for label_id in target_label_ids): - matching_cases.append(case) - - return matching_cases, "" + return self.label_manager.get_cases_by_label( + project_id, suite_id, label_ids, label_title, get_all_cases_callback=self.__get_all_cases + ) def add_labels_to_tests( self, test_ids: List[int], titles: Union[str, List[str]], project_id: int ) -> Tuple[dict, str]: - """ - Add labels to multiple tests - - :param test_ids: List of test IDs - :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) - :param project_id: Project ID for validation - :returns: Tuple with response data and error string - """ - # Initialize results structure - results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} - - # Normalize titles to a list - if isinstance(titles, str): - title_list = [titles] - else: - title_list = titles - - # At this point, title_list should already be validated by the CLI - # Just ensure we have clean titles - title_list = [title.strip() for title in title_list if title.strip()] - - if not title_list: - return {}, "No valid labels provided" - - # Validate test IDs by getting run information for each test - valid_test_ids = [] - for test_id in test_ids: - # Get test information to validate it exists - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results["test_not_found"].append(test_id) - continue - - test_data = test_response.response_text - # Validate that the test belongs to the correct project - run_id = test_data.get("run_id") - if run_id: - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.status_code == 200: - run_data = run_response.response_text - if run_data.get("project_id") == project_id: - valid_test_ids.append(test_id) - else: - results["test_not_found"].append(test_id) - else: - results["test_not_found"].append(test_id) - else: - results["test_not_found"].append(test_id) - - # If no valid test IDs, return early - if not valid_test_ids: - return results, "" - - # Check if labels exist or create them - existing_labels, error_message = self.get_labels(project_id) - if error_message: - return results, error_message - - # Process each title to get/create label IDs - label_ids = [] - label_id_to_title = {} # Map label IDs to their titles - for title in title_list: - # Find existing label with the same title - label_id = None - for label in existing_labels.get("labels", []): - if label.get("title") == title: - label_id = label.get("id") - break - - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get("label", label_data) - label_id = label_info.get("id") - - if label_id: - label_ids.append(label_id) - label_id_to_title[label_id] = title - - # Collect test data and validate constraints - tests_to_update = [] - for test_id in valid_test_ids: - # Get current test to check existing labels - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results["failed_tests"].append( - {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} - ) - continue - - test_data = test_response.response_text - current_labels = test_data.get("labels", []) - current_label_ids = [label.get("id") for label in current_labels if label.get("id")] - - new_label_ids = [] - already_exists_titles = [] - - for label_id in label_ids: - if label_id not in current_label_ids: - new_label_ids.append(label_id) - else: - if label_id in label_id_to_title: - already_exists_titles.append(label_id_to_title[label_id]) - - if not new_label_ids: - results["successful_tests"].append( - { - "test_id": test_id, - "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", - } - ) - continue - - # Check maximum labels limit (10) - if len(current_label_ids) + len(new_label_ids) > 10: - results["max_labels_reached"].append(test_id) - continue - - # Prepare test for update - updated_label_ids = current_label_ids + new_label_ids - - new_label_titles = [] - for label_id in new_label_ids: - if label_id in label_id_to_title: - new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append( - { - "test_id": test_id, - "labels": updated_label_ids, - "new_labels": new_label_ids, - "new_label_titles": new_label_titles, - } - ) - - # Update tests using appropriate endpoint - if len(tests_to_update) == 1: - # Single test: use update_test/{test_id} - test_info = tests_to_update[0] - test_update_data = {"labels": test_info["labels"]} - - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - - if update_response.status_code == 200: - new_label_titles = test_info.get("new_label_titles", []) - new_label_count = len(new_label_titles) - - if new_label_count == 1: - message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" - elif new_label_count > 1: - message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" - else: - message = f"No new labels added to test {test_info['test_id']}" - - results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) - else: - results["failed_tests"].append( - {"test_id": test_info["test_id"], "error": update_response.error_message} - ) - else: - # Multiple tests: use individual updates to ensure each test gets its specific labels - for test_info in tests_to_update: - test_update_data = {"labels": test_info["labels"]} - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - - if update_response.status_code == 200: - new_label_titles = test_info.get("new_label_titles", []) - new_label_count = len(new_label_titles) - - if new_label_count == 1: - message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" - elif new_label_count > 1: - message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" - else: - message = f"No new labels added to test {test_info['test_id']}" - - results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) - else: - results["failed_tests"].append( - {"test_id": test_info["test_id"], "error": update_response.error_message} - ) - - return results, "" + return self.label_manager.add_labels_to_tests(test_ids, titles, project_id) def get_tests_by_label( self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None ) -> Tuple[List[dict], str]: - """ - Get tests filtered by label ID or title from specific runs - - :param project_id: Project ID - :param label_ids: List of label IDs to filter by - :param label_title: Label title to filter by - :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) - :returns: Tuple with list of matching tests and error string - """ - # If filtering by title, first get the label ID - target_label_ids = label_ids or [] - if label_title and not target_label_ids: - labels_data, error_message = self.get_labels(project_id) - if error_message: - return [], error_message - - for label in labels_data.get("labels", []): - if label.get("title") == label_title: - target_label_ids.append(label.get("id")) - - if not target_label_ids: - return [], "" # No label found is a valid case with 0 results - - # Get runs for the project (either all runs or specific run IDs) - if run_ids: - # Use specific run IDs - validate they exist by getting run details - runs = [] - for run_id in run_ids: - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.status_code == 200: - runs.append(run_response.response_text) - else: - return [], f"Run ID {run_id} not found or inaccessible" - else: - # Get all runs for the project - runs_response = self.client.send_get(f"get_runs/{project_id}") - if runs_response.status_code != 200: - return [], runs_response.error_message - - runs_data = runs_response.response_text - runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data - - # Collect all tests from all runs - matching_tests = [] - for run in runs: - run_id = run.get("id") - if not run_id: - continue - - # Get tests for this run - tests_response = self.client.send_get(f"get_tests/{run_id}") - if tests_response.status_code != 200: - continue # Skip this run if we can't get tests - - tests_data = tests_response.response_text - tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data - - # Filter tests that have any of the target labels - for test in tests: - test_labels = test.get("labels", []) - test_label_ids = [label.get("id") for label in test_labels] - - # Check if any of the target label IDs are present in this test - if any(label_id in test_label_ids for label_id in target_label_ids): - matching_tests.append(test) - - return matching_tests, "" + return self.label_manager.get_tests_by_label(project_id, label_ids, label_title, run_ids) def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: - """ - Get labels for specific tests - - :param test_ids: List of test IDs to get labels for - :returns: Tuple with list of test label information and error string - """ - results = [] - - for test_id in test_ids: - # Get test information - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) - continue - - test_data = test_response.response_text - test_labels = test_data.get("labels", []) - - results.append( - { - "test_id": test_id, - "title": test_data.get("title", "Unknown"), - "status_id": test_data.get("status_id"), - "labels": test_labels, - "error": None, - } - ) - - return results, "" + return self.label_manager.get_test_labels(test_ids) - # Test case reference management methods + # Test case reference management methods (delegated to ReferenceManager for backward compatibility) def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: - """ - Add references to a test case - :param case_id: ID of the test case - :param references: List of references to add - :returns: Tuple with success status and error string - """ - # First get the current test case to retrieve existing references - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - - case_data = case_response.response_text - existing_refs = case_data.get("refs", "") or "" - - # Parse existing references - existing_ref_list = [] - if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - - # Deduplicate input references while preserving order - deduplicated_input = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - deduplicated_input.append(ref_clean) - seen.add(ref_clean) - - # Add new references (avoid duplicates with existing) - all_refs = existing_ref_list.copy() - for ref in deduplicated_input: - if ref not in all_refs: - all_refs.append(ref) - - # Join all references - new_refs_string = ",".join(all_refs) - - # Validate total character limit - if len(new_refs_string) > 2000: - return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - - # Update the test case with new references - update_data = {"refs": new_refs_string} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return self.reference_manager.add_case_references(case_id, references) def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: - """ - Update references on a test case by replacing existing ones - :param case_id: ID of the test case - :param references: List of references to replace existing ones - :returns: Tuple with success status and error string - """ - # Deduplicate input references while preserving order - deduplicated_refs = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - deduplicated_refs.append(ref_clean) - seen.add(ref_clean) - - # Join references - new_refs_string = ",".join(deduplicated_refs) - - # Validate total character limit - if len(new_refs_string) > 2000: - return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - - # Update the test case with new references - update_data = {"refs": new_refs_string} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return self.reference_manager.update_case_references(case_id, references) def delete_case_references(self, case_id: int, specific_references: List[str] = None) -> Tuple[bool, str]: - """ - Delete all or specific references from a test case - :param case_id: ID of the test case - :param specific_references: List of specific references to delete (None to delete all) - :returns: Tuple with success status and error string - """ - if specific_references is None: - # Delete all references by setting refs to empty string - update_data = {"refs": ""} - else: - # First get the current test case to retrieve existing references - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - - case_data = case_response.response_text - existing_refs = case_data.get("refs", "") or "" - - if not existing_refs: - # No references to delete - return True, "" - - # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - - # Deduplicate input references for efficient processing - refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) - - # Remove specific references - remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] - - # Join remaining references - new_refs_string = ",".join(remaining_refs) - update_data = {"refs": new_refs_string} - - # Update the test case - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return self.reference_manager.delete_case_references(case_id, specific_references) def update_case_automation_id(self, case_id: int, automation_id: str) -> Tuple[bool, str]: - """ - Update the automation_id field of a test case - - Args: - case_id: TestRail test case ID - automation_id: Automation ID value to set - - Returns: - Tuple of (success, error_message) - - success: True if update succeeded, False otherwise - - error_message: Empty string on success, error details on failure - - API Endpoint: POST /api/v2/update_case/{case_id} - """ - self.environment.vlog(f"Setting automation_id '{automation_id}' on case {case_id}") - - update_data = {"custom_automation_id": automation_id} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - error_msg = ( - update_response.error_message or f"Failed to update automation_id (HTTP {update_response.status_code})" - ) - return False, error_msg + return self.case_handler.update_case_automation_id(case_id, automation_id) def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: - """ - Upload .feature file to TestRail BDD endpoint - - Creates TestRail test case from Gherkin .feature content. - The Gherkin content is sent in the request body as plain text. - - Args: - section_id: TestRail section ID where test case will be created - feature_content: Raw .feature file content (Gherkin syntax) - - Returns: - Tuple of (case_ids, error_message) - - case_ids: List containing the created test case ID - - error_message: Empty string on success, error details on failure - - API Endpoint: POST /api/v2/add_bdd/{section_id} - Request Body: Raw Gherkin text - Response: Standard TestRail test case JSON with BDD custom fields - """ - # Send Gherkin content as file upload (multipart/form-data) - # TestRail expects the .feature file as an attachment - self.environment.vlog(f"Uploading .feature file to add_bdd/{section_id}") - - files = {"attachment": ("feature.feature", feature_content, "text/plain")} - - response = self.client.send_post(f"add_bdd/{section_id}", payload=None, files=files) - - if response.status_code == 200: - # Response is a test case object with 'id' field - if isinstance(response.response_text, dict): - case_id = response.response_text.get("id") - if case_id: - return [case_id], "" - else: - return [], "Response missing 'id' field" - else: - return [], "Unexpected response format" - else: - error_msg = response.error_message or f"Failed to upload feature file (HTTP {response.status_code})" - return [], error_msg + return self.bdd_handler.add_bdd(section_id, feature_content) def update_bdd(self, case_id: int, feature_content: str) -> Tuple[List[int], str]: """ @@ -2269,112 +870,10 @@ def update_bdd(self, case_id: int, feature_content: str) -> Tuple[List[int], str return [], error_msg def get_bdd(self, case_id: int) -> Tuple[str, str]: - """ - Retrieve BDD test case as .feature file content - - Args: - case_id: TestRail test case ID - - Returns: - Tuple of (feature_content, error_message) - - feature_content: .feature file content (Gherkin syntax) - - error_message: Empty string on success, error details on failure - - API Endpoint: GET /api/v2/get_bdd/{case_id} - Response: Raw Gherkin text - """ - self.environment.vlog(f"Retrieving BDD test case from get_bdd/{case_id}") - response = self.client.send_get(f"get_bdd/{case_id}") - - if response.status_code == 200: - # TestRail returns raw Gherkin text (not JSON) - # APIClient treats non-JSON as error and stores str(response.content) - if isinstance(response.response_text, dict): - # Some versions might return JSON with 'feature' field - feature_content = response.response_text.get("feature", "") - elif isinstance(response.response_text, str) and response.response_text.startswith("b'"): - # APIClient converted bytes to string representation: "b'text'" - # Need to extract the actual content - try: - # Remove b' prefix and ' suffix, then decode escape sequences - feature_content = response.response_text[2:-1].encode().decode("unicode_escape") - except (ValueError, AttributeError): - feature_content = response.response_text - else: - # Plain text response - feature_content = response.response_text - - return feature_content, "" - else: - error_msg = response.error_message or f"Failed to retrieve BDD test case (HTTP {response.status_code})" - return "", error_msg + return self.bdd_handler.get_bdd(case_id) def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: - """ - Get the BDD template ID for a project - - Args: - project_id: TestRail project ID - - Returns: - Tuple of (template_id, error_message) - - template_id: BDD template ID if found, None otherwise - - error_message: Empty string on success, error details on failure - - API Endpoint: GET /api/v2/get_templates/{project_id} - """ - self.environment.vlog(f"Getting templates for project {project_id}") - response = self.client.send_get(f"get_templates/{project_id}") - - if response.status_code == 200: - templates = response.response_text - if isinstance(templates, list): - self.environment.vlog(f"Retrieved {len(templates)} template(s) from TestRail") - - # Log all available templates for debugging - if templates: - self.environment.vlog("Available templates:") - for template in templates: - template_id = template.get("id") - template_name = template.get("name", "") - self.environment.vlog(f" - ID {template_id}: '{template_name}'") - - # Look for BDD template by name - for template in templates: - template_name = template.get("name", "").strip() - template_name_lower = template_name.lower() - template_id = template.get("id") - - self.environment.vlog(f"Checking template '{template_name}' (ID: {template_id})") - self.environment.vlog(f" Lowercase: '{template_name_lower}'") - - # Check for BDD template (support both US and UK spellings) - if ( - "behavior" in template_name_lower - or "behaviour" in template_name_lower - or "bdd" in template_name_lower - ): - self.environment.vlog(f" ✓ MATCH: This is the BDD template!") - self.environment.log(f"Found BDD template: '{template_name}' (ID: {template_id})") - return template_id, "" - else: - self.environment.vlog(f" ✗ No match: Does not contain 'behavior', 'behaviour', or 'bdd'") - - # Build detailed error message with available templates - error_parts = ["BDD template not found. Please enable BDD template in TestRail project settings."] - if templates: - template_list = ", ".join([f"'{t.get('name', 'Unknown')}'" for t in templates]) - error_parts.append(f"Available templates: {template_list}") - error_parts.append("The BDD template name should contain 'behavior', 'behaviour', or 'bdd'.") - else: - error_parts.append("No templates are available in this project.") - - return None, "\n".join(error_parts) - else: - return None, "Unexpected response format from get_templates" - else: - error_msg = response.error_message or f"Failed to get templates (HTTP {response.status_code})" - return None, error_msg + return self.bdd_handler.get_bdd_template_id(project_id) def find_bdd_case_by_name( self, feature_name: str, project_id: int, suite_id: int @@ -2556,54 +1055,4 @@ def get_bdd_result_field_name(self) -> str: def add_case_bdd( self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None ) -> Tuple[int, str]: - """ - Create a BDD test case with Gherkin content - - Args: - section_id: TestRail section ID where test case will be created - title: Test case title (scenario name) - bdd_content: Gherkin scenario content - template_id: BDD template ID - tags: Optional list of tags (for refs field) - - Returns: - Tuple of (case_id, error_message) - - case_id: Created test case ID if successful, None otherwise - - error_message: Empty string on success, error details on failure - - API Endpoint: POST /api/v2/add_case/{section_id} - """ - self.environment.vlog(f"Creating BDD test case '{title}' in section {section_id}") - - # Build request body - # Note: custom_testrail_bdd_scenario expects an array of lines, not a single string - bdd_lines = bdd_content.split("\n") if bdd_content else [] - - body = { - "title": title, - "template_id": template_id, - "custom_testrail_bdd_scenario": bdd_lines, - } - - # Add tags as references if provided - if tags: - # Filter out @C tags (case IDs) and format others - ref_tags = [tag for tag in tags if not tag.upper().startswith("@C")] - if ref_tags: - body["refs"] = ", ".join(ref_tags) - - response = self.client.send_post(f"add_case/{section_id}", body) - - if response.status_code == 200: - if isinstance(response.response_text, dict): - case_id = response.response_text.get("id") - if case_id: - self.environment.vlog(f"Created BDD test case ID: {case_id}") - return case_id, "" - else: - return None, "Response missing 'id' field" - else: - return None, "Unexpected response format" - else: - error_msg = response.error_message or f"Failed to create BDD test case (HTTP {response.status_code})" - return None, error_msg + return self.bdd_handler.add_case_bdd(section_id, title, bdd_content, template_id, tags) diff --git a/trcli/api/api_utils.py b/trcli/api/api_utils.py new file mode 100644 index 0000000..c42d80b --- /dev/null +++ b/trcli/api/api_utils.py @@ -0,0 +1,285 @@ +""" +API Utilities - Shared utilities for API handlers + +This module provides common utilities to reduce code duplication across handlers: +- Reference parsing and validation +- Response validation +- Type definitions for better type safety +""" + +from beartype.typing import List, Tuple, Optional, Literal +from typing_extensions import TypedDict + + +# ============================================================================ +# Type Definitions for Better Type Safety +# ============================================================================ + + +class TestRailResponse(TypedDict, total=False): + """Type definition for TestRail API responses""" + + id: int + name: str + title: str + suite_id: int + section_id: int + case_id: int + refs: str + error: str + + +# Literal types for strategy parameters +ReferenceStrategy = Literal["add", "update", "delete", "append", "replace"] + + +# ============================================================================ +# Reference Utilities +# ============================================================================ + + +def parse_references(refs_string: str) -> List[str]: + """ + Parse a comma-separated reference string into a list of cleaned references. + + Args: + refs_string: Comma-separated string of references (e.g., "REF-1, REF-2, REF-3") + + Returns: + List of cleaned, non-empty reference strings + + Example: + >>> parse_references("REF-1, , REF-2 ,REF-3") + ['REF-1', 'REF-2', 'REF-3'] + """ + if not refs_string: + return [] + return [ref.strip() for ref in refs_string.split(",") if ref.strip()] + + +def deduplicate_references(references: List[str]) -> List[str]: + """ + Deduplicate a list of references while preserving order. + + Args: + references: List of reference strings + + Returns: + List of unique references in original order + + Example: + >>> deduplicate_references(['REF-1', 'REF-2', 'REF-1', 'REF-3']) + ['REF-1', 'REF-2', 'REF-3'] + """ + seen = set() + result = [] + for ref in references: + ref_clean = ref.strip() + if ref_clean and ref_clean not in seen: + result.append(ref_clean) + seen.add(ref_clean) + return result + + +def join_references(references: List[str]) -> str: + """ + Join a list of references into a comma-separated string. + + Args: + references: List of reference strings + + Returns: + Comma-separated string of references + + Example: + >>> join_references(['REF-1', 'REF-2', 'REF-3']) + 'REF-1,REF-2,REF-3' + """ + return ",".join(references) + + +def validate_references_length(refs_string: str, max_length: int) -> Tuple[bool, Optional[str]]: + """ + Validate that a reference string doesn't exceed the maximum length. + + Args: + refs_string: Comma-separated string of references + max_length: Maximum allowed length + + Returns: + Tuple of (is_valid, error_message) + - is_valid: True if length is valid, False otherwise + - error_message: None if valid, error description if invalid + + Example: + >>> validate_references_length("REF-1,REF-2", 2000) + (True, None) + >>> validate_references_length("X" * 2001, 2000) + (False, "Combined references length (2001 characters) exceeds 2000 character limit") + """ + length = len(refs_string) + if length > max_length: + return False, f"Combined references length ({length} characters) exceeds {max_length} character limit" + return True, None + + +def merge_references(existing_refs: str, new_refs: str, strategy: ReferenceStrategy = "add") -> str: + """ + Merge existing and new references based on the specified strategy. + + Args: + existing_refs: Current comma-separated references + new_refs: New comma-separated references to merge + strategy: How to merge references: + - 'add'/'append': Add new refs to existing, avoiding duplicates + - 'update'/'replace': Replace all existing refs with new refs + - 'delete': Remove specified refs from existing + + Returns: + Merged comma-separated reference string + + Examples: + >>> merge_references("REF-1,REF-2", "REF-3,REF-4", "add") + 'REF-1,REF-2,REF-3,REF-4' + >>> merge_references("REF-1,REF-2", "REF-3", "update") + 'REF-3' + >>> merge_references("REF-1,REF-2,REF-3", "REF-2", "delete") + 'REF-1,REF-3' + """ + if strategy in ("update", "replace"): + # Replace all references with new ones + return new_refs + + elif strategy == "delete": + if not new_refs: + # Delete all references + return "" + # Delete specific references + existing_list = parse_references(existing_refs) + refs_to_delete = set(parse_references(new_refs)) + remaining = [ref for ref in existing_list if ref not in refs_to_delete] + return join_references(remaining) + + else: # strategy in ('add', 'append') + # Add new references to existing ones, avoiding duplicates + if not existing_refs: + return new_refs + + existing_list = parse_references(existing_refs) + new_list = parse_references(new_refs) + + # Combine, avoiding duplicates while preserving order + combined = existing_list + [ref for ref in new_list if ref not in existing_list] + return join_references(combined) + + +def calculate_reference_changes(existing_refs: str, new_refs: str) -> Tuple[List[str], List[str]]: + """ + Calculate which references will be added and which are duplicates. + + Args: + existing_refs: Current comma-separated references + new_refs: New comma-separated references to process + + Returns: + Tuple of (added_refs, skipped_refs) + - added_refs: References that will be newly added + - skipped_refs: References that already exist (duplicates) + + Example: + >>> calculate_reference_changes("REF-1,REF-2", "REF-2,REF-3") + (['REF-3'], ['REF-2']) + """ + existing_list = parse_references(existing_refs) + new_list = deduplicate_references(parse_references(new_refs)) + + added_refs = [ref for ref in new_list if ref not in existing_list] + skipped_refs = [ref for ref in new_list if ref in existing_list] + + return added_refs, skipped_refs + + +# ============================================================================ +# Response Validation Utilities +# ============================================================================ + + +def check_response_error(response, default_error_msg: str = "API request failed") -> Optional[str]: + """ + Check if a response contains an error and return the error message. + + Args: + response: API response object with error_message attribute + default_error_msg: Default message if error_message is empty + + Returns: + Error message string if error exists, None otherwise + + Example: + >>> response = MockResponse(error_message="Field not found") + >>> check_response_error(response) + 'Field not found' + """ + if hasattr(response, "error_message") and response.error_message: + return response.error_message + return None + + +def validate_response_field( + response_data: dict, field_name: str, error_prefix: str = "Response" +) -> Tuple[bool, Optional[str]]: + """ + Validate that a required field exists in the response data. + + Args: + response_data: Dictionary containing response data + field_name: Name of the required field + error_prefix: Prefix for error message + + Returns: + Tuple of (is_valid, error_message) + - is_valid: True if field exists, False otherwise + - error_message: None if valid, error description if invalid + + Example: + >>> validate_response_field({"id": 123, "name": "Test"}, "id") + (True, None) + >>> validate_response_field({"name": "Test"}, "id") + (False, "Response missing 'id' field") + """ + if field_name in response_data: + return True, None + return False, f"{error_prefix} missing '{field_name}' field" + + +# ============================================================================ +# Common Patterns +# ============================================================================ + + +def safe_get_nested(data: dict, *keys, default=None): + """ + Safely get a nested value from a dictionary. + + Args: + data: Dictionary to search + *keys: Sequence of keys to traverse + default: Default value if key path not found + + Returns: + Value at the key path, or default if not found + + Example: + >>> data = {"user": {"profile": {"name": "John"}}} + >>> safe_get_nested(data, "user", "profile", "name") + 'John' + >>> safe_get_nested(data, "user", "invalid", "key", default="N/A") + 'N/A' + """ + current = data + for key in keys: + if isinstance(current, dict) and key in current: + current = current[key] + else: + return default + return current diff --git a/trcli/api/bdd_handler.py b/trcli/api/bdd_handler.py new file mode 100644 index 0000000..6a3c49b --- /dev/null +++ b/trcli/api/bdd_handler.py @@ -0,0 +1,224 @@ +""" +BddHandler - Handles all BDD (Behavior-Driven Development) related operations for TestRail + +It manages all BDD operations including: +- Uploading .feature files +- Retrieving BDD test cases +- Getting BDD template IDs +- Creating BDD test cases +""" + +from beartype.typing import List, Tuple + +from trcli.api.api_client import APIClient +from trcli.cli import Environment + + +class BddHandler: + """Handles all BDD-related operations for TestRail""" + + def __init__(self, client: APIClient, environment: Environment): + """ + Initialize the BddHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + """ + self.client = client + self.environment = environment + + def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: + """ + Upload .feature file to TestRail BDD endpoint + + Creates TestRail test case from Gherkin .feature content. + The Gherkin content is sent in the request body as plain text. + + Args: + section_id: TestRail section ID where test case will be created + feature_content: Raw .feature file content (Gherkin syntax) + + Returns: + Tuple of (case_ids, error_message) + - case_ids: List containing the created test case ID + - error_message: Empty string on success, error details on failure + """ + # Send Gherkin content as file upload (multipart/form-data) + # TestRail expects the .feature file as an attachment + self.environment.vlog(f"Uploading .feature file to add_bdd/{section_id}") + + files = {"attachment": ("feature.feature", feature_content, "text/plain")} + + response = self.client.send_post(f"add_bdd/{section_id}", payload=None, files=files) + + if response.status_code == 200: + # Response is a test case object with 'id' field + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + return [case_id], "" + else: + return [], "Response missing 'id' field" + else: + return [], "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to upload feature file (HTTP {response.status_code})" + return [], error_msg + + def get_bdd(self, case_id: int) -> Tuple[str, str]: + """ + Retrieve BDD test case as .feature file content + + Args: + case_id: TestRail test case ID + + Returns: + Tuple of (feature_content, error_message) + - feature_content: .feature file content (Gherkin syntax) + - error_message: Empty string on success, error details on failure + """ + self.environment.vlog(f"Retrieving BDD test case from get_bdd/{case_id}") + response = self.client.send_get(f"get_bdd/{case_id}") + + if response.status_code == 200: + # TestRail returns raw Gherkin text (not JSON) + # APIClient treats non-JSON as error and stores str(response.content) + if isinstance(response.response_text, dict): + # Some versions might return JSON with 'feature' field + feature_content = response.response_text.get("feature", "") + elif isinstance(response.response_text, str) and response.response_text.startswith("b'"): + # APIClient converted bytes to string representation: "b'text'" + # Need to extract the actual content + try: + # Remove b' prefix and ' suffix, then decode escape sequences + feature_content = response.response_text[2:-1].encode().decode("unicode_escape") + except (ValueError, AttributeError): + feature_content = response.response_text + else: + # Plain text response + feature_content = response.response_text + + return feature_content, "" + else: + error_msg = response.error_message or f"Failed to retrieve BDD test case (HTTP {response.status_code})" + return "", error_msg + + def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: + """ + Get the BDD template ID for a project + + Args: + project_id: TestRail project ID + + Returns: + Tuple of (template_id, error_message) + - template_id: BDD template ID if found, None otherwise + - error_message: Empty string on success, error details on failure + + API Endpoint: GET /api/v2/get_templates/{project_id} + """ + self.environment.vlog(f"Getting templates for project {project_id}") + response = self.client.send_get(f"get_templates/{project_id}") + + if response.status_code == 200: + templates = response.response_text + if isinstance(templates, list): + self.environment.vlog(f"Retrieved {len(templates)} template(s) from TestRail") + + # Log all available templates for debugging + if templates: + self.environment.vlog("Available templates:") + for template in templates: + template_id = template.get("id") + template_name = template.get("name", "") + self.environment.vlog(f" - ID {template_id}: '{template_name}'") + + # Look for BDD template by name + for template in templates: + template_name = template.get("name", "").strip() + template_name_lower = template_name.lower() + template_id = template.get("id") + + self.environment.vlog(f"Checking template '{template_name}' (ID: {template_id})") + self.environment.vlog(f" Lowercase: '{template_name_lower}'") + + # Check for BDD template (support both US and UK spellings) + if ( + "behavior" in template_name_lower + or "behaviour" in template_name_lower + or "bdd" in template_name_lower + ): + self.environment.vlog(f" ✓ MATCH: This is the BDD template!") + self.environment.log(f"Found BDD template: '{template_name}' (ID: {template_id})") + return template_id, "" + else: + self.environment.vlog(f" ✗ No match: Does not contain 'behavior', 'behaviour', or 'bdd'") + + # Build detailed error message with available templates + error_parts = ["BDD template not found. Please enable BDD template in TestRail project settings."] + if templates: + template_list = ", ".join([f"'{t.get('name', 'Unknown')}'" for t in templates]) + error_parts.append(f"Available templates: {template_list}") + error_parts.append("The BDD template name should contain 'behavior', 'behaviour', or 'bdd'.") + else: + error_parts.append("No templates are available in this project.") + + return None, "\n".join(error_parts) + else: + return None, "Unexpected response format from get_templates" + else: + error_msg = response.error_message or f"Failed to get templates (HTTP {response.status_code})" + return None, error_msg + + def add_case_bdd( + self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None + ) -> Tuple[int, str]: + """ + Create a BDD test case with Gherkin content + + Args: + section_id: TestRail section ID where test case will be created + title: Test case title (scenario name) + bdd_content: Gherkin scenario content + template_id: BDD template ID + tags: Optional list of tags (for refs field) + + Returns: + Tuple of (case_id, error_message) + - case_id: Created test case ID if successful, None otherwise + - error_message: Empty string on success, error details on failure + """ + self.environment.vlog(f"Creating BDD test case '{title}' in section {section_id}") + + # Build request body + # Note: custom_testrail_bdd_scenario expects an array of lines, not a single string + bdd_lines = bdd_content.split("\n") if bdd_content else [] + + body = { + "title": title, + "template_id": template_id, + "custom_testrail_bdd_scenario": bdd_lines, + } + + # Add tags as references if provided + if tags: + # Filter out @C tags (case IDs) and format others + ref_tags = [tag for tag in tags if not tag.upper().startswith("@C")] + if ref_tags: + body["refs"] = ", ".join(ref_tags) + + response = self.client.send_post(f"add_case/{section_id}", body) + + if response.status_code == 200: + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + self.environment.vlog(f"Created BDD test case ID: {case_id}") + return case_id, "" + else: + return None, "Response missing 'id' field" + else: + return None, "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to create BDD test case (HTTP {response.status_code})" + return None, error_msg diff --git a/trcli/api/case_handler.py b/trcli/api/case_handler.py new file mode 100644 index 0000000..4a6bf06 --- /dev/null +++ b/trcli/api/case_handler.py @@ -0,0 +1,217 @@ +""" +CaseHandler - Handles all test case-related operations for TestRail + +This class was extracted from ApiRequestHandler to follow the Single Responsibility Principle. +It manages all test case operations including: +- Adding test cases +- Updating case references +- Updating case automation IDs +- Deleting test cases +- Case helper operations +""" + +from concurrent.futures import ThreadPoolExecutor +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient, APIClientResult +from trcli.api.api_utils import ( + deduplicate_references, + join_references, + parse_references, + validate_references_length, +) +from trcli.cli import Environment +from trcli.constants import OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID +from trcli.data_classes.data_parsers import MatchersParser +from trcli.data_classes.dataclass_testrail import TestRailCase +from trcli.data_providers.api_data_provider import ApiDataProvider +from trcli.settings import MAX_WORKERS_ADD_CASE + + +class CaseHandler: + """Handles all test case-related operations for TestRail""" + + MAX_CASE_REFERENCES_LENGTH = 2000 # TestRail character limit for case refs field + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + handle_futures_callback, + retrieve_results_callback, + ): + """ + Initialize the CaseHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for case data + :param handle_futures_callback: Callback to handle concurrent futures + :param retrieve_results_callback: Callback to retrieve results after cancellation + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.handle_futures = handle_futures_callback + self.retrieve_results_after_cancelling = retrieve_results_callback + # Store active automation ID field (set by parent) + self._active_automation_id_field = None + + def add_cases(self) -> Tuple[List[dict], str]: + """ + Add cases that doesn't have ID in DataProvider. + Runs update_data in data_provider for successfully created resources. + + :returns: Tuple with list of dict created resources and error string. + """ + add_case_data = self.data_provider.add_cases() + responses = [] + error_message = "" + with self.environment.get_progress_bar( + results_amount=len(add_case_data), prefix="Adding test cases" + ) as progress_bar: + with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_CASE) as executor: + futures = { + executor.submit( + self._add_case_and_update_data, + body, + ): body + for body in add_case_data + } + responses, error_message = self.handle_futures( + futures=futures, action_string="add_case", progress_bar=progress_bar + ) + if error_message: + # When error_message is present we cannot be sure that responses contains all added items. + # Iterate through futures to get all responses from done tasks (not cancelled) + responses = self.retrieve_results_after_cancelling(futures) + returned_resources = [ + { + "case_id": response.response_text["id"], + "section_id": response.response_text["section_id"], + "title": response.response_text["title"], + } + for response in responses + ] + return returned_resources, error_message + + def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: + """ + Helper method to add a single case and update its data + + :param case: TestRailCase object to add + :returns: APIClientResult + """ + case_body = case.to_dict() + active_field = self._active_automation_id_field + if active_field == UPDATED_SYSTEM_NAME_AUTOMATION_ID and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: + case_body[UPDATED_SYSTEM_NAME_AUTOMATION_ID] = case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) + if self.environment.case_matcher != MatchersParser.AUTO and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: + case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) + response = self.client.send_post(f"add_case/{case_body.pop('section_id')}", case_body) + if response.status_code == 200: + case.case_id = response.response_text["id"] + case.result.case_id = response.response_text["id"] + case.section_id = response.response_text["section_id"] + return response + + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: + """ + Update existing case references with values from JUnit properties. + + :param case_id: ID of the test case + :param junit_refs: References from JUnit testrail_case_field property + :param strategy: 'append' or 'replace' + :returns: Tuple with (success, error_message, added_refs, skipped_refs) + """ + if not junit_refs or not junit_refs.strip(): + return True, None, [], [] # No references to process + + # Parse and deduplicate JUnit references using utility function + junit_ref_list = deduplicate_references(parse_references(junit_refs)) + + if not junit_ref_list: + return False, "No valid references found in JUnit property", [], [] + + # Get current case data + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.error_message: + return False, case_response.error_message, [], [] + + existing_refs = case_response.response_text.get("refs", "") or "" + + if strategy == "replace": + # Replace strategy: use JUnit refs as-is + new_refs = join_references(junit_ref_list) + added_refs = junit_ref_list + skipped_refs = [] + else: + # Append strategy: combine with existing refs, avoiding duplicates + existing_ref_list = parse_references(existing_refs) + + # Determine which references are new vs duplicates + added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] + skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] + + # If no new references to add, return current state + if not added_refs: + return True, None, added_refs, skipped_refs + + # Combine references + combined_list = existing_ref_list + added_refs + new_refs = join_references(combined_list) + + # Validate 2000 character limit for test case references + is_valid, error_msg = validate_references_length(new_refs, self.MAX_CASE_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg, [], [] + + # Update the case + update_data = {"refs": new_refs} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.error_message: + return False, update_response.error_message, [], [] + + return True, None, added_refs, skipped_refs + + def delete_cases(self, suite_id: int, added_cases: List[Dict]) -> Tuple[Dict, str]: + """ + Delete cases given add_cases response + + :param suite_id: suite id + :param added_cases: List of cases to delete + :returns: Tuple with dict created resources and error string. + """ + body = {"case_ids": [case["case_id"] for case in added_cases]} + response = self.client.send_post(f"delete_cases/{suite_id}", payload=body) + return response.response_text, response.error_message + + def update_case_automation_id(self, case_id: int, automation_id: str) -> Tuple[bool, str]: + """ + Update the automation_id field of a test case + + Args: + case_id: TestRail test case ID + automation_id: Automation ID value to set + + Returns: + Tuple of (success, error_message) + - success: True if update succeeded, False otherwise + - error_message: Empty string on success, error details on failure + """ + self.environment.vlog(f"Setting automation_id '{automation_id}' on case {case_id}") + + update_data = {"custom_automation_id": automation_id} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.status_code == 200: + return True, "" + else: + error_msg = ( + update_response.error_message or f"Failed to update automation_id (HTTP {update_response.status_code})" + ) + return False, error_msg diff --git a/trcli/api/case_matcher.py b/trcli/api/case_matcher.py new file mode 100644 index 0000000..801c5be --- /dev/null +++ b/trcli/api/case_matcher.py @@ -0,0 +1,249 @@ +""" +CaseMatcherFactory - Strategy pattern implementation for TestRail case matching + +Matching Strategies: +- AutomationIdMatcher: Matches cases by automation_id field +- NameMatcher: Matches cases by name (requires case_id in test data) +- PropertyMatcher: Matches cases by custom property (requires case_id in test data) +""" + +import html +from abc import ABC, abstractmethod +from beartype.typing import Tuple, List, Dict, Set + +from trcli.cli import Environment +from trcli.constants import OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID +from trcli.data_classes.data_parsers import MatchersParser +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class CaseMatcher(ABC): + """Abstract base class for case matching strategies""" + + def __init__(self, environment: Environment, data_provider: ApiDataProvider): + """ + Initialize the case matcher + + :param environment: Environment configuration + :param data_provider: Data provider for accessing test case data + """ + self.environment = environment + self.data_provider = data_provider + + @abstractmethod + def check_missing_cases( + self, + project_id: int, + suite_id: int, + suites_data: TestRailSuite, + get_all_cases_callback, + validate_case_ids_callback, + ) -> Tuple[bool, str]: + """ + Check for missing test cases using the specific matching strategy + + :param project_id: TestRail project ID + :param suite_id: TestRail suite ID + :param suites_data: Test suite data from provider + :param get_all_cases_callback: Callback to fetch all cases from TestRail + :param validate_case_ids_callback: Callback to validate case IDs exist + :returns: Tuple (has_missing_cases, error_message) + """ + pass + + +class AutomationIdMatcher(CaseMatcher): + """Matches test cases by automation_id field""" + + def check_missing_cases( + self, + project_id: int, + suite_id: int, + suites_data: TestRailSuite, + get_all_cases_callback, + validate_case_ids_callback, + ) -> Tuple[bool, str]: + """ + Match cases using automation_id field + + :param project_id: TestRail project ID + :param suite_id: TestRail suite ID + :param suites_data: Test suite data from provider + :param get_all_cases_callback: Callback to fetch all cases from TestRail + :param validate_case_ids_callback: Callback to validate case IDs exist + :returns: Tuple (has_missing_cases, error_message) + """ + missing_cases_number = 0 + + # Fetch all cases from TestRail + returned_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return False, error_message + + # Build lookup dictionary: automation_id -> case data + test_cases_by_aut_id = {} + for case in returned_cases: + aut_case_id = case.get(OLD_SYSTEM_NAME_AUTOMATION_ID) or case.get(UPDATED_SYSTEM_NAME_AUTOMATION_ID) + if aut_case_id: + aut_case_id = html.unescape(aut_case_id) + test_cases_by_aut_id[aut_case_id] = case + + # Match test cases from report with TestRail cases + test_case_data = [] + for section in suites_data.testsections: + for test_case in section.testcases: + aut_id = test_case.custom_automation_id + if aut_id in test_cases_by_aut_id.keys(): + case = test_cases_by_aut_id[aut_id] + test_case_data.append( + { + "case_id": case["id"], + "section_id": case["section_id"], + "title": case["title"], + OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, + } + ) + else: + missing_cases_number += 1 + + # Update data provider with matched cases + self.data_provider.update_data(case_data=test_case_data) + + if missing_cases_number: + self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") + + return missing_cases_number > 0, "" + + +class IdBasedMatcher(CaseMatcher): + """Base class for matchers that rely on case_id being present in test data (NAME, PROPERTY)""" + + def check_missing_cases( + self, + project_id: int, + suite_id: int, + suites_data: TestRailSuite, + get_all_cases_callback, + validate_case_ids_callback, + ) -> Tuple[bool, str]: + """ + Validate that case IDs exist in TestRail + + For NAME/PROPERTY matchers, the test data must already contain case_id. + This method validates those IDs exist in TestRail. + + :param project_id: TestRail project ID + :param suite_id: TestRail suite ID + :param suites_data: Test suite data from provider + :param get_all_cases_callback: Callback to fetch all cases from TestRail + :param validate_case_ids_callback: Callback to validate case IDs exist + :returns: Tuple (has_missing_cases, error_message) + """ + missing_cases_number = 0 + nonexistent_ids = [] + case_ids_to_validate = set() + + # Collect all unique case IDs that need validation + for section in suites_data.testsections: + for test_case in section.testcases: + if not test_case.case_id: + missing_cases_number += 1 + else: + case_ids_to_validate.add(int(test_case.case_id)) + + total_tests_in_report = missing_cases_number + len(case_ids_to_validate) + + if missing_cases_number: + self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") + + # Smart validation strategy based on report size + # Threshold: 1000 cases (same as skip validation threshold for consistency) + if case_ids_to_validate: + # Skip validation for large reports with all IDs (most efficient) + if missing_cases_number == 0 and total_tests_in_report >= 1000: + # All tests have IDs and report is large: Skip validation (trust IDs) + self.environment.log( + f"Skipping validation of {len(case_ids_to_validate)} case IDs " + f"(all tests have IDs, trusting they exist). " + f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." + ) + nonexistent_ids = [] + + # Fetch all for large reports with missing IDs + elif total_tests_in_report >= 1000: + # Large report (>=1000 cases) with some missing IDs: Fetch all cases and validate locally + # This is more efficient than individual validation for large batches + self.environment.log( + f"Large report detected ({total_tests_in_report} cases). " + f"Fetching all cases from TestRail for efficient validation..." + ) + returned_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return False, error_message + + # Build lookup dictionary from fetched cases + all_case_ids = {case["id"] for case in returned_cases} + + # Validate locally (O(1) lookup) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in all_case_ids] + + if nonexistent_ids: + self.environment.elog( + f"Nonexistent case IDs found in the report file: {nonexistent_ids[:20]}" + f"{' ...' if len(nonexistent_ids) > 20 else ''}" + ) + return False, "Case IDs not in TestRail project or suite were detected in the report file." + + # Individual validation for small reports + else: + # Small report (<1000 cases): Use individual validation + # This is more efficient for small batches + self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") + validated_ids = validate_case_ids_callback(suite_id, list(case_ids_to_validate)) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] + + if nonexistent_ids: + self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") + return False, "Case IDs not in TestRail project or suite were detected in the report file." + + return missing_cases_number > 0, "" + + +class NameMatcher(IdBasedMatcher): + """Matches test cases by name (case_id must be present in test data)""" + + pass + + +class PropertyMatcher(IdBasedMatcher): + """Matches test cases by custom property (case_id must be present in test data)""" + + pass + + +class CaseMatcherFactory: + """Factory for creating appropriate case matcher based on configuration""" + + @staticmethod + def create_matcher( + matcher_type: MatchersParser, environment: Environment, data_provider: ApiDataProvider + ) -> CaseMatcher: + """ + Create the appropriate case matcher based on the matcher type + + :param matcher_type: Type of matcher to create (AUTO, NAME, PROPERTY). If None, defaults to AUTO. + :param environment: Environment configuration + :param data_provider: Data provider for accessing test case data + :returns: Concrete CaseMatcher instance + :raises ValueError: If matcher_type is not recognized + """ + # Default to AUTO if matcher_type is None (e.g., for parse_openapi command) + if matcher_type is None or matcher_type == MatchersParser.AUTO: + return AutomationIdMatcher(environment, data_provider) + elif matcher_type == MatchersParser.NAME: + return NameMatcher(environment, data_provider) + elif matcher_type == MatchersParser.PROPERTY: + return PropertyMatcher(environment, data_provider) + else: + raise ValueError(f"Unknown matcher type: {matcher_type}") diff --git a/trcli/api/label_manager.py b/trcli/api/label_manager.py new file mode 100644 index 0000000..e6f444b --- /dev/null +++ b/trcli/api/label_manager.py @@ -0,0 +1,644 @@ +""" +LabelManager - Handles all label-related operations for TestRail + +It manages all label operations including: +- Creating, retrieving, updating, and deleting labels +- Adding labels to test cases and tests +- Filtering cases and tests by labels +- Retrieving labels for specific tests +""" + +from beartype.typing import List, Union, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment + + +class LabelManager: + """Handles all label-related operations for TestRail""" + + MAX_LABELS_PER_ENTITY = 10 # TestRail limit + MAX_LABEL_TITLE_LENGTH = 20 # TestRail limit + + def __init__(self, client: APIClient, environment: Environment): + """ + Initialize the LabelManager + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + """ + self.client = client + self.environment = environment + + def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: + """ + Add a new label to the project + + :param project_id: ID of the project + :param title: Title of the label (max 20 characters) + :returns: Tuple with created label data and error string + """ + payload = {"title": title} + response = self.client.send_post(f"add_label/{project_id}", payload=payload) + return response.response_text, response.error_message + + def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: + """ + Update an existing label + + :param label_id: ID of the label to update + :param project_id: ID of the project + :param title: New title for the label (max 20 characters) + :returns: Tuple with updated label data and error string + """ + payload = {"project_id": project_id, "title": title} + response = self.client.send_post(f"update_label/{label_id}", payload=payload) + return response.response_text, response.error_message + + def get_label(self, label_id: int) -> Tuple[dict, str]: + """ + Get a specific label by ID + + :param label_id: ID of the label to retrieve + :returns: Tuple with label data and error string + """ + response = self.client.send_get(f"get_label/{label_id}") + return response.response_text, response.error_message + + def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: + """ + Get all labels for a project with pagination + + :param project_id: ID of the project + :param offset: Offset for pagination + :param limit: Limit for pagination + :returns: Tuple with labels data (including pagination info) and error string + """ + params = [] + if offset > 0: + params.append(f"offset={offset}") + if limit != 250: + params.append(f"limit={limit}") + + url = f"get_labels/{project_id}" + if params: + url += "&" + "&".join(params) + + response = self.client.send_get(url) + return response.response_text, response.error_message + + def delete_label(self, label_id: int) -> Tuple[bool, str]: + """ + Delete a single label + + :param label_id: ID of the label to delete + :returns: Tuple with success status and error string + """ + response = self.client.send_post(f"delete_label/{label_id}") + success = response.status_code == 200 + return success, response.error_message + + def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: + """ + Delete multiple labels + + :param label_ids: List of label IDs to delete + :returns: Tuple with success status and error string + """ + payload = {"label_ids": label_ids} + response = self.client.send_post("delete_labels", payload=payload) + success = response.status_code == 200 + return success, response.error_message + + def add_labels_to_cases( + self, + case_ids: List[int], + title: str, + project_id: int, + suite_id: int = None, + get_all_cases_callback=None, + ) -> Tuple[dict, str]: + """ + Add a label to multiple test cases + + :param case_ids: List of test case IDs + :param title: Label title (max 20 characters) + :param project_id: Project ID for validation + :param suite_id: Suite ID (optional) + :param get_all_cases_callback: Callback function to get all cases (injected dependency) + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} + + # Check if project is multi-suite by getting all cases without suite_id + all_cases_no_suite, error_message = get_all_cases_callback(project_id, None) + if error_message: + return results, error_message + + # Check if project has multiple suites + suite_ids = set() + for case in all_cases_no_suite: + if "suite_id" in case and case["suite_id"]: + suite_ids.add(case["suite_id"]) + + # If project has multiple suites and no suite_id provided, require it + if len(suite_ids) > 1 and suite_id is None: + return results, "This project is multisuite, suite id is required" + + # Get all cases to validate that the provided case IDs exist + all_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return results, error_message + + # Create a set of existing case IDs for quick lookup + existing_case_ids = {case["id"] for case in all_cases} + + # Validate case IDs and separate valid from invalid ones + invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] + valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] + + # Record invalid case IDs + for case_id in invalid_case_ids: + results["case_not_found"].append(case_id) + + # If no valid case IDs, return early + if not valid_case_ids: + return results, "" + + # Check if label exists or create it + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Find existing label with the same title + label_id = None + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + + # Collect case data and validate constraints + cases_to_update = [] + for case_id in valid_case_ids: + # Get current case to check existing labels + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + results["failed_cases"].append( + {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} + ) + continue + + case_data = case_response.response_text + current_labels = case_data.get("labels", []) + + # Check if label already exists on this case + if any(label.get("id") == label_id for label in current_labels): + results["successful_cases"].append( + {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} + ) + continue + + # Check maximum labels limit + if len(current_labels) >= self.MAX_LABELS_PER_ENTITY: + results["max_labels_reached"].append(case_id) + continue + + # Prepare case for update + existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] + updated_label_ids = existing_label_ids + [label_id] + cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) + + # Update cases using appropriate endpoint + if len(cases_to_update) == 1: + # Single case: use update_case/{case_id} + case_info = cases_to_update[0] + case_update_data = {"labels": case_info["labels"]} + + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + elif len(cases_to_update) > 1: + # Multiple cases: use update_cases/{suite_id} + # Need to determine suite_id from the cases + case_suite_id = suite_id + if not case_suite_id: + # Get suite_id from the first case if not provided + first_case = all_cases[0] if all_cases else None + case_suite_id = first_case.get("suite_id") if first_case else None + + if not case_suite_id: + # Fall back to individual updates if no suite_id available + for case_info in cases_to_update: + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + + if update_response.status_code == 200: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + else: + # Batch update using update_cases/{suite_id} + batch_update_data = { + "case_ids": [case_info["case_id"] for case_info in cases_to_update], + "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases + } + + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) + + if batch_response.status_code == 200: + for case_info in cases_to_update: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + # If batch update fails, fall back to individual updates + for case_info in cases_to_update: + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + + if update_response.status_code == 200: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + + return results, "" + + def get_cases_by_label( + self, + project_id: int, + suite_id: int = None, + label_ids: List[int] = None, + label_title: str = None, + get_all_cases_callback=None, + ) -> Tuple[List[dict], str]: + """ + Get test cases filtered by label ID or title + + :param project_id: Project ID + :param suite_id: Suite ID (optional) + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :param get_all_cases_callback: Callback function to get all cases (injected dependency) + :returns: Tuple with list of matching cases and error string + """ + # Get all cases first + all_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return [], error_message + + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Filter cases that have any of the target labels + matching_cases = [] + for case in all_cases: + case_labels = case.get("labels", []) + case_label_ids = [label.get("id") for label in case_labels] + + # Check if any of the target label IDs are present in this case + if any(label_id in case_label_ids for label_id in target_label_ids): + matching_cases.append(case) + + return matching_cases, "" + + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: + """ + Add labels to multiple tests + + :param test_ids: List of test IDs + :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) + :param project_id: Project ID for validation + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + + # Normalize titles to a list + if isinstance(titles, str): + title_list = [titles] + else: + title_list = titles + + # At this point, title_list should already be validated by the CLI + # Just ensure we have clean titles + title_list = [title.strip() for title in title_list if title.strip()] + + if not title_list: + return {}, "No valid labels provided" + + # Validate test IDs by getting run information for each test + valid_test_ids = [] + for test_id in test_ids: + # Get test information to validate it exists + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results["test_not_found"].append(test_id) + continue + + test_data = test_response.response_text + # Validate that the test belongs to the correct project + run_id = test_data.get("run_id") + if run_id: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + run_data = run_response.response_text + if run_data.get("project_id") == project_id: + valid_test_ids.append(test_id) + else: + results["test_not_found"].append(test_id) + else: + results["test_not_found"].append(test_id) + else: + results["test_not_found"].append(test_id) + + # If no valid test IDs, return early + if not valid_test_ids: + return results, "" + + # Check if labels exist or create them + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Process each title to get/create label IDs + label_ids = [] + label_id_to_title = {} # Map label IDs to their titles + for title in title_list: + # Find existing label with the same title + label_id = None + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + + if label_id: + label_ids.append(label_id) + label_id_to_title[label_id] = title + + # Collect test data and validate constraints + tests_to_update = [] + for test_id in valid_test_ids: + # Get current test to check existing labels + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results["failed_tests"].append( + {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} + ) + continue + + test_data = test_response.response_text + current_labels = test_data.get("labels", []) + current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + + new_label_ids = [] + already_exists_titles = [] + + for label_id in label_ids: + if label_id not in current_label_ids: + new_label_ids.append(label_id) + else: + if label_id in label_id_to_title: + already_exists_titles.append(label_id_to_title[label_id]) + + if not new_label_ids: + results["successful_tests"].append( + { + "test_id": test_id, + "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", + } + ) + continue + + # Check maximum labels limit + if len(current_label_ids) + len(new_label_ids) > self.MAX_LABELS_PER_ENTITY: + results["max_labels_reached"].append(test_id) + continue + + # Prepare test for update + updated_label_ids = current_label_ids + new_label_ids + + new_label_titles = [] + for label_id in new_label_ids: + if label_id in label_id_to_title: + new_label_titles.append(label_id_to_title[label_id]) + + tests_to_update.append( + { + "test_id": test_id, + "labels": updated_label_ids, + "new_labels": new_label_ids, + "new_label_titles": new_label_titles, + } + ) + + # Update tests using appropriate endpoint + if len(tests_to_update) == 1: + # Single test: use update_test/{test_id} + test_info = tests_to_update[0] + test_update_data = {"labels": test_info["labels"]} + + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get("new_label_titles", []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) + else: + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + else: + # Multiple tests: use individual updates to ensure each test gets its specific labels + for test_info in tests_to_update: + test_update_data = {"labels": test_info["labels"]} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get("new_label_titles", []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) + else: + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + + return results, "" + + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: + """ + Get tests filtered by label ID or title from specific runs + + :param project_id: Project ID + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) + :returns: Tuple with list of matching tests and error string + """ + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Get runs for the project (either all runs or specific run IDs) + if run_ids: + # Use specific run IDs - validate they exist by getting run details + runs = [] + for run_id in run_ids: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + runs.append(run_response.response_text) + else: + return [], f"Run ID {run_id} not found or inaccessible" + else: + # Get all runs for the project + runs_response = self.client.send_get(f"get_runs/{project_id}") + if runs_response.status_code != 200: + return [], runs_response.error_message + + runs_data = runs_response.response_text + runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + + # Collect all tests from all runs + matching_tests = [] + for run in runs: + run_id = run.get("id") + if not run_id: + continue + + # Get tests for this run + tests_response = self.client.send_get(f"get_tests/{run_id}") + if tests_response.status_code != 200: + continue # Skip this run if we can't get tests + + tests_data = tests_response.response_text + tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + + # Filter tests that have any of the target labels + for test in tests: + test_labels = test.get("labels", []) + test_label_ids = [label.get("id") for label in test_labels] + + # Check if any of the target label IDs are present in this test + if any(label_id in test_label_ids for label_id in target_label_ids): + matching_tests.append(test) + + return matching_tests, "" + + def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: + """ + Get labels for specific tests + + :param test_ids: List of test IDs to get labels for + :returns: Tuple with list of test label information and error string + """ + results = [] + + for test_id in test_ids: + # Get test information + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) + continue + + test_data = test_response.response_text + test_labels = test_data.get("labels", []) + + results.append( + { + "test_id": test_id, + "title": test_data.get("title", "Unknown"), + "status_id": test_data.get("status_id"), + "labels": test_labels, + "error": None, + } + ) + + return results, "" diff --git a/trcli/api/reference_manager.py b/trcli/api/reference_manager.py new file mode 100644 index 0000000..19c4e26 --- /dev/null +++ b/trcli/api/reference_manager.py @@ -0,0 +1,134 @@ +""" +ReferenceManager - Handles all reference-related operations for TestRail test cases + +It manages all reference operations including: +- Adding references to test cases +- Updating references on test cases +- Deleting references from test cases +""" + +from beartype.typing import List, Tuple, Optional + +from trcli.api.api_client import APIClient +from trcli.api.api_utils import ( + deduplicate_references, + join_references, + merge_references, + validate_references_length, + check_response_error, +) +from trcli.cli import Environment + + +class ReferenceManager: + """Handles all reference-related operations for TestRail test cases""" + + MAX_REFERENCES_LENGTH = 2000 # TestRail character limit for refs field + + def __init__(self, client: APIClient, environment: Environment): + """ + Initialize the ReferenceManager + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + """ + self.client = client + self.environment = environment + + def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + """ + Add references to a test case (appends to existing references) + + :param case_id: ID of the test case + :param references: List of references to add + :returns: Tuple with success status and error string + """ + # Get current test case to retrieve existing references + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + error = check_response_error(case_response) + return False, ( + f"Failed to retrieve test case {case_id}: {error}" + if error + else f"Failed to retrieve test case {case_id}" + ) + + existing_refs = case_response.response_text.get("refs", "") or "" + + # Deduplicate and merge with existing references + deduplicated_input = deduplicate_references(references) + new_refs_string = merge_references(existing_refs, join_references(deduplicated_input), strategy="add") + + # Validate total character limit + is_valid, error_msg = validate_references_length(new_refs_string, self.MAX_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg + + # Update the test case with new references + update_response = self.client.send_post(f"update_case/{case_id}", {"refs": new_refs_string}) + + if update_response.status_code == 200: + return True, "" + return False, update_response.error_message or "Failed to update references" + + def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + """ + Update references on a test case by replacing existing ones + + :param case_id: ID of the test case + :param references: List of references to replace existing ones + :returns: Tuple with success status and error string + """ + # Deduplicate and join references + deduplicated_refs = deduplicate_references(references) + new_refs_string = join_references(deduplicated_refs) + + # Validate total character limit + is_valid, error_msg = validate_references_length(new_refs_string, self.MAX_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg + + # Update the test case with new references + update_response = self.client.send_post(f"update_case/{case_id}", {"refs": new_refs_string}) + + if update_response.status_code == 200: + return True, "" + return False, update_response.error_message or "Failed to update references" + + def delete_case_references(self, case_id: int, specific_references: Optional[List[str]] = None) -> Tuple[bool, str]: + """ + Delete all or specific references from a test case + + :param case_id: ID of the test case + :param specific_references: List of specific references to delete (None to delete all) + :returns: Tuple with success status and error string + """ + if specific_references is None: + # Delete all references by setting refs to empty string + new_refs_string = "" + else: + # Get current test case to retrieve existing references + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + error = check_response_error(case_response) + return False, ( + f"Failed to retrieve test case {case_id}: {error}" + if error + else f"Failed to retrieve test case {case_id}" + ) + + existing_refs = case_response.response_text.get("refs", "") or "" + + if not existing_refs: + # No references to delete + return True, "" + + # Use utility to delete specific references + new_refs_string = merge_references(existing_refs, join_references(specific_references), strategy="delete") + + # Update the test case + update_response = self.client.send_post(f"update_case/{case_id}", {"refs": new_refs_string}) + + if update_response.status_code == 200: + return True, "" + return False, update_response.error_message or "Failed to delete references" diff --git a/trcli/api/result_handler.py b/trcli/api/result_handler.py new file mode 100644 index 0000000..7317614 --- /dev/null +++ b/trcli/api/result_handler.py @@ -0,0 +1,178 @@ +""" +ResultHandler - Handles all test result-related operations for TestRail + +It manages all test result operations including: +- Adding test results +- Uploading attachments to results +- Retrieving results after cancellation +""" + +import os +from concurrent.futures import ThreadPoolExecutor, as_completed +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_providers.api_data_provider import ApiDataProvider +from trcli.settings import MAX_WORKERS_ADD_RESULTS + + +class ResultHandler: + """Handles all test result-related operations for TestRail""" + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_tests_in_run_callback, + handle_futures_callback, + ): + """ + Initialize the ResultHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for result data + :param get_all_tests_in_run_callback: Callback to fetch all tests in a run + :param handle_futures_callback: Callback to handle concurrent futures + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_tests_in_run = get_all_tests_in_run_callback + self.handle_futures = handle_futures_callback + + def upload_attachments(self, report_results: List[Dict], results: List[Dict], run_id: int): + """ + Getting test result id and upload attachments for it. + + :param report_results: List of test results with attachments from report + :param results: List of created results from TestRail + :param run_id: Run ID + """ + tests_in_run, error = self.__get_all_tests_in_run(run_id) + if not error: + failed_uploads = [] + for report_result in report_results: + case_id = report_result["case_id"] + test_id = next((test["id"] for test in tests_in_run if test["case_id"] == case_id), None) + result_id = next((result["id"] for result in results if result["test_id"] == test_id), None) + for file_path in report_result.get("attachments"): + try: + with open(file_path, "rb") as file: + response = self.client.send_post( + f"add_attachment_to_result/{result_id}", files={"attachment": file} + ) + + # Check if upload was successful + if response.status_code != 200: + file_name = os.path.basename(file_path) + + # Handle 413 Request Entity Too Large specifically + if response.status_code == 413: + error_msg = FAULT_MAPPING["attachment_too_large"].format( + file_name=file_name, case_id=case_id + ) + self.environment.elog(error_msg) + failed_uploads.append(f"{file_name} (case {case_id})") + else: + # Handle other HTTP errors + error_msg = FAULT_MAPPING["attachment_upload_failed"].format( + file_path=file_name, + case_id=case_id, + error_message=response.error_message or f"HTTP {response.status_code}", + ) + self.environment.elog(error_msg) + failed_uploads.append(f"{file_name} (case {case_id})") + except FileNotFoundError: + self.environment.elog(f"Attachment file not found: {file_path} (case {case_id})") + failed_uploads.append(f"{file_path} (case {case_id})") + except Exception as ex: + file_name = os.path.basename(file_path) if os.path.exists(file_path) else file_path + self.environment.elog(f"Error uploading attachment '{file_name}' for case {case_id}: {ex}") + failed_uploads.append(f"{file_name} (case {case_id})") + + # Provide a summary if there were failed uploads + if failed_uploads: + self.environment.log(f"\nWarning: {len(failed_uploads)} attachment(s) failed to upload.") + else: + self.environment.elog(f"Unable to upload attachments due to API request error: {error}") + + def add_results(self, run_id: int) -> Tuple[List, str, int]: + """ + Adds one or more new test results. + + :param run_id: run id + :returns: Tuple with dict created resources, error string, and results count. + """ + responses = [] + error_message = "" + # Get pre-validated user IDs if available + user_ids = getattr(self.environment, "_validated_user_ids", []) + + add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) + # Get assigned count from data provider + assigned_count = getattr(self.data_provider, "_assigned_count", 0) + + results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) + + with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: + with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: + futures = { + executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body + for body in add_results_data_chunks + } + responses, error_message = self.handle_futures( + futures=futures, + action_string="add_results", + progress_bar=progress_bar, + ) + if error_message: + # When error_message is present we cannot be sure that responses contains all added items. + # Iterate through futures to get all responses from done tasks (not cancelled) + responses = ResultHandler.retrieve_results_after_cancelling(futures) + responses = [response.response_text for response in responses] + results = [result for results_list in responses for result in results_list] + report_results_w_attachments = [] + for results_data_chunk in add_results_data_chunks: + for test_result in results_data_chunk["results"]: + if test_result["attachments"]: + report_results_w_attachments.append(test_result) + if report_results_w_attachments: + attachments_count = 0 + for result in report_results_w_attachments: + attachments_count += len(result["attachments"]) + self.environment.log( + f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." + ) + self.upload_attachments(report_results_w_attachments, results, run_id) + else: + self.environment.log(f"No attachments found to upload.") + + # Log assignment results if assignment was performed + if user_ids: + total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) + if assigned_count > 0: + self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") + else: + self.environment.log(f"Assigning failed results: 0/0, Done.") + + return responses, error_message, progress_bar.n + + @staticmethod + def retrieve_results_after_cancelling(futures) -> list: + """ + Retrieve results from futures after cancellation has been triggered. + + :param futures: Dictionary of futures + :returns: List of successful responses + """ + responses = [] + for future in as_completed(futures): + if not future.cancelled(): + response = future.result() + if not response.error_message: + responses.append(response) + return responses diff --git a/trcli/api/run_handler.py b/trcli/api/run_handler.py new file mode 100644 index 0000000..b735315 --- /dev/null +++ b/trcli/api/run_handler.py @@ -0,0 +1,292 @@ +""" +RunHandler - Handles all test run-related operations for TestRail + +It manages all test run operations including: +- Creating test runs +- Updating test runs +- Managing run references +- Closing and deleting runs +""" + +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.api.api_utils import ( + deduplicate_references, + join_references, + merge_references, + parse_references, + validate_references_length, +) +from trcli.cli import Environment +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class RunHandler: + """Handles all test run-related operations for TestRail""" + + MAX_RUN_REFERENCES_LENGTH = 250 # TestRail character limit for run refs field + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_tests_in_run_callback, + ): + """ + Initialize the RunHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for run data + :param get_all_tests_in_run_callback: Callback to fetch all tests in a run + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_tests_in_run = get_all_tests_in_run_callback + + def add_run( + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, + ) -> Tuple[int, str]: + """ + Creates a new test run. + + :param project_id: project_id + :param run_name: run name + :param milestone_id: milestone id + :param start_date: start date + :param end_date: end date + :param plan_id: plan id (if adding to plan) + :param config_ids: configuration ids + :param assigned_to_id: user id to assign + :param include_all: include all cases + :param refs: references + :param case_ids: specific case ids + :returns: Tuple with run id and error string. + """ + add_run_data = self.data_provider.add_run( + run_name, + case_ids=case_ids, + start_date=start_date, + end_date=end_date, + milestone_id=milestone_id, + assigned_to_id=assigned_to_id, + include_all=include_all, + refs=refs, + ) + if not plan_id: + response = self.client.send_post(f"add_run/{project_id}", add_run_data) + run_id = response.response_text.get("id") + else: + if config_ids: + add_run_data["config_ids"] = config_ids + entry_data = { + "name": add_run_data["name"], + "suite_id": add_run_data["suite_id"], + "config_ids": config_ids, + "runs": [add_run_data], + } + else: + entry_data = add_run_data + response = self.client.send_post(f"add_plan_entry/{plan_id}", entry_data) + run_id = response.response_text["runs"][0]["id"] + return run_id, response.error_message + + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: + """ + Updates an existing run + + :param run_id: run id + :param run_name: run name + :param start_date: start date + :param end_date: end date + :param milestone_id: milestone id + :param refs: references to manage + :param refs_action: action to perform ('add', 'update', 'delete') + :returns: Tuple with run and error string. + """ + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.error_message: + return None, run_response.error_message + + existing_description = run_response.response_text.get("description", "") + existing_refs = run_response.response_text.get("refs", "") + + add_run_data = self.data_provider.add_run( + run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id + ) + add_run_data["description"] = existing_description # Retain the current description + + # Handle references based on action + if refs is not None: + updated_refs = self._manage_references(existing_refs, refs, refs_action) + add_run_data["refs"] = updated_refs + else: + add_run_data["refs"] = existing_refs # Keep existing refs if none provided + + existing_include_all = run_response.response_text.get("include_all", False) + add_run_data["include_all"] = existing_include_all + + if not existing_include_all: + # Only manage explicit case_ids when include_all=False + run_tests, error_message = self.__get_all_tests_in_run(run_id) + if error_message: + return None, f"Failed to get tests in run: {error_message}" + run_case_ids = [test["case_id"] for test in run_tests] + report_case_ids = add_run_data["case_ids"] + joint_case_ids = list(set(report_case_ids + run_case_ids)) + add_run_data["case_ids"] = joint_case_ids + else: + # include_all=True: TestRail includes all suite cases automatically + # Do NOT send case_ids array (TestRail ignores it anyway) + add_run_data.pop("case_ids", None) + + plan_id = run_response.response_text["plan_id"] + config_ids = run_response.response_text["config_ids"] + if not plan_id: + update_response = self.client.send_post(f"update_run/{run_id}", add_run_data) + elif plan_id and config_ids: + update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", add_run_data) + else: + response = self.client.send_get(f"get_plan/{plan_id}") + entry_id = next( + ( + run["entry_id"] + for entry in response.response_text["entries"] + for run in entry["runs"] + if run["id"] == run_id + ), + None, + ) + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", add_run_data) + run_response = self.client.send_get(f"get_run/{run_id}") + return run_response.response_text, update_response.error_message + + def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> str: + """ + Manage references based on the specified action. + + :param existing_refs: current references in the run + :param new_refs: new references to process + :param action: 'add', 'update', or 'delete' + :returns: updated references string + """ + # Use shared utility function for reference management + return merge_references(existing_refs or "", new_refs, strategy=action) + + def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: + """ + Append references to a test run, avoiding duplicates. + + :param run_id: ID of the test run + :param references: List of references to append + :returns: Tuple with (run_data, added_refs, skipped_refs, error_message) + """ + # Get current run data + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.error_message: + return None, [], [], run_response.error_message + + existing_refs = run_response.response_text.get("refs", "") or "" + + # Deduplicate input references using utility function + deduplicated_input = deduplicate_references(references) + + # Parse existing references and calculate changes + existing_list = parse_references(existing_refs) + added_refs = [ref for ref in deduplicated_input if ref not in existing_list] + skipped_refs = [ref for ref in deduplicated_input if ref in existing_list] + + # If no new references to add, return current state + if not added_refs: + return run_response.response_text, added_refs, skipped_refs, None + + # Combine references using utility function + combined_refs = merge_references(existing_refs, join_references(deduplicated_input), strategy="add") + + # Validate character limit + is_valid, error_msg = validate_references_length(combined_refs, self.MAX_RUN_REFERENCES_LENGTH) + if not is_valid: + return None, [], [], error_msg + + update_data = {"refs": combined_refs} + + # Determine the correct API endpoint based on plan membership + plan_id = run_response.response_text.get("plan_id") + config_ids = run_response.response_text.get("config_ids") + + if not plan_id: + # Standalone run + update_response = self.client.send_post(f"update_run/{run_id}", update_data) + elif plan_id and config_ids: + # Run in plan with configurations + update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", update_data) + else: + # Run in plan without configurations - need to use plan entry endpoint + plan_response = self.client.send_get(f"get_plan/{plan_id}") + if plan_response.error_message: + return None, [], [], f"Failed to get plan details: {plan_response.error_message}" + + # Find the entry_id for this run + entry_id = None + for entry in plan_response.response_text.get("entries", []): + for run in entry.get("runs", []): + if run["id"] == run_id: + entry_id = entry["id"] + break + if entry_id: + break + + if not entry_id: + return None, [], [], f"Could not find plan entry for run {run_id}" + + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) + + if update_response.error_message: + return None, [], [], update_response.error_message + + updated_run_response = self.client.send_get(f"get_run/{run_id}") + return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message + + def close_run(self, run_id: int) -> Tuple[dict, str]: + """ + Closes an existing test run and archives its tests & results. + + :param run_id: run id + :returns: Tuple with dict created resources and error string. + """ + body = {"run_id": run_id} + response = self.client.send_post(f"close_run/{run_id}", body) + return response.response_text, response.error_message + + def delete_run(self, run_id: int) -> Tuple[dict, str]: + """ + Delete run given run id + + :param run_id: run id + :returns: Tuple with dict created resources and error string. + """ + response = self.client.send_post(f"delete_run/{run_id}", payload={}) + return response.response_text, response.error_message diff --git a/trcli/api/section_handler.py b/trcli/api/section_handler.py new file mode 100644 index 0000000..b47c5f1 --- /dev/null +++ b/trcli/api/section_handler.py @@ -0,0 +1,140 @@ +""" +SectionHandler - Handles all section-related operations for TestRail + +It manages all section operations including: +- Checking for missing sections +- Adding new sections +- Deleting sections +""" + +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class SectionHandler: + """Handles all section-related operations for TestRail""" + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_sections_callback, + ): + """ + Initialize the SectionHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for updating section data + :param get_all_sections_callback: Callback to fetch all sections from TestRail + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_sections = get_all_sections_callback + + def check_missing_section_ids(self, project_id: int, suite_id: int, suites_data: TestRailSuite) -> Tuple[bool, str]: + """ + Check what section id's are missing in DataProvider. + + :param project_id: project_id + :param suite_id: suite_id + :param suites_data: Test suite data from provider + :returns: Tuple with list missing section ID and error string. + """ + returned_sections, error_message = self.__get_all_sections(project_id, suite_id) + if not error_message: + missing_test_sections = False + sections_by_id = {section["id"]: section for section in returned_sections} + sections_by_name = {section["name"]: section for section in returned_sections} + section_data = [] + for section in suites_data.testsections: + if self.environment.section_id: + if section.section_id in sections_by_id.keys(): + section_json = sections_by_id[section.section_id] + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) + else: + missing_test_sections = True + if section.name in sections_by_name.keys(): + section_json = sections_by_name[section.name] + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) + else: + missing_test_sections = True + self.data_provider.update_data(section_data=section_data) + return missing_test_sections, error_message + else: + return False, error_message + + def add_sections(self, project_id: int, verify_callback) -> Tuple[List[Dict], str]: + """ + Add sections that doesn't have ID in DataProvider. + Runs update_data in data_provider for successfully created resources. + + :param project_id: project_id + :param verify_callback: callback to verify returned data matches request + :returns: Tuple with list of dict created resources and error string. + """ + add_sections_data = self.data_provider.add_sections_data() + responses = [] + error_message = "" + for body in add_sections_data: + response = self.client.send_post(f"add_section/{project_id}", body) + if not response.error_message: + responses.append(response) + if not verify_callback(body, response.response_text): + responses.append(response) + error_message = FAULT_MAPPING["data_verification_error"] + break + else: + error_message = response.error_message + break + returned_resources = [ + { + "section_id": response.response_text["id"], + "suite_id": response.response_text["suite_id"], + "name": response.response_text["name"], + } + for response in responses + ] + ( + self.data_provider.update_data(section_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) + return returned_resources, error_message + + def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: + """ + Delete section given add_sections response + + :param added_sections: List of sections to delete + :returns: Tuple with dict created resources and error string. + """ + responses = [] + error_message = "" + for section in added_sections: + response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) + if not response.error_message: + responses.append(response.response_text) + else: + error_message = response.error_message + break + return responses, error_message diff --git a/trcli/api/suite_handler.py b/trcli/api/suite_handler.py new file mode 100644 index 0000000..40beaa5 --- /dev/null +++ b/trcli/api/suite_handler.py @@ -0,0 +1,163 @@ +""" +SuiteHandler - Handles all suite-related operations for TestRail + +It manages all suite operations including: +- Checking if suites exist +- Resolving suite IDs by name +- Getting suite IDs for projects +- Adding new suites +- Deleting suites +""" + +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class SuiteHandler: + """Handles all suite-related operations for TestRail""" + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_suites_callback, + ): + """ + Initialize the SuiteHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for updating suite data + :param get_all_suites_callback: Callback to fetch all suites from TestRail + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_suites = get_all_suites_callback + + def check_suite_id(self, project_id: int, suite_id: int) -> Tuple[bool, str]: + """ + Check if suite exists using get_suites endpoint + + :param project_id: project id + :param suite_id: suite id to check + :returns: Tuple (exists, error_message) + """ + suites_data, error = self.__get_all_suites(project_id) + if not error: + available_suites = [suite for suite in suites_data if suite["id"] == suite_id] + return ( + (True, "") + if len(available_suites) > 0 + else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) + ) + else: + return None, error + + def resolve_suite_id_using_name(self, project_id: int, suite_name: str) -> Tuple[int, str]: + """ + Get suite ID matching suite name or returns -1 if unable to match any suite. + + :param project_id: project id + :param suite_name: suite name to match + :returns: tuple with id of the suite and error message + """ + suite_id = -1 + suites_data, error = self.__get_all_suites(project_id) + if not error: + for suite in suites_data: + if suite["name"] == suite_name: + suite_id = suite["id"] + self.data_provider.update_data([{"suite_id": suite["id"], "name": suite["name"]}]) + break + return ( + (suite_id, "") + if suite_id != -1 + else (-1, FAULT_MAPPING["missing_suite_by_name"].format(suite_name=suite_name)) + ) + else: + return -1, error + + def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: + """ + Get suite IDs for requested project_id. + + :param project_id: project id + :returns: tuple with list of suite ids and error string + """ + available_suites = [] + returned_resources = [] + suites_data, error = self.__get_all_suites(project_id) + if not error: + for suite in suites_data: + available_suites.append(suite["id"]) + returned_resources.append( + { + "suite_id": suite["id"], + "name": suite["name"], + } + ) + if returned_resources: + self.data_provider.update_data(suite_data=returned_resources) + else: + print("Update skipped") + return ( + (available_suites, "") + if len(available_suites) > 0 + else ([], FAULT_MAPPING["no_suites_found"].format(project_id=project_id)) + ) + else: + return [], error + + def add_suites(self, project_id: int, verify_callback) -> Tuple[List[Dict], str]: + """ + Adds suites that doesn't have ID's in DataProvider. + Runs update_data in data_provider for successfully created resources. + + :param project_id: project_id + :param verify_callback: callback to verify returned data matches request + :returns: Tuple with list of dict created resources and error string. + """ + add_suite_data = self.data_provider.add_suites_data() + responses = [] + error_message = "" + for body in add_suite_data: + response = self.client.send_post(f"add_suite/{project_id}", body) + if not response.error_message: + responses.append(response) + if not verify_callback(body, response.response_text): + responses.append(response) + error_message = FAULT_MAPPING["data_verification_error"] + break + else: + error_message = response.error_message + break + + returned_resources = [ + { + "suite_id": response.response_text["id"], + "name": response.response_text["name"], + } + for response in responses + ] + ( + self.data_provider.update_data(suite_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) + return returned_resources, error_message + + def delete_suite(self, suite_id: int) -> Tuple[dict, str]: + """ + Delete suite given suite id + + :param suite_id: suite id + :returns: Tuple with dict created resources and error string. + """ + response = self.client.send_post(f"delete_suite/{suite_id}", payload={}) + return response.response_text, response.error_message From 63634a7a09c9edde1a3d73a95929594b1387fad9 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 23 Dec 2025 18:02:23 +0800 Subject: [PATCH 25/33] TRCLI-213 Update affected unit tests --- tests/test_api_request_handler_labels.py | 50 +++++++++++++++--------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index 702f46d..9a731c6 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -286,8 +286,10 @@ def setup_method(self): def test_add_labels_to_cases_success(self): """Test successful addition of labels to test cases""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" - ) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object( + self.labels_handler.label_manager, "add_label" + ) as mock_add_label, patch.object( self.labels_handler.client, "send_get" ) as mock_send_get, patch.object( self.labels_handler.client, "send_post" @@ -307,12 +309,16 @@ def test_add_labels_to_cases_success(self): # Mock get_case responses mock_send_get.side_effect = [ - MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}), # Case 1 - MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}), # Case 2 + MagicMock( + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}, error_message="" + ), # Case 1 + MagicMock( + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}, error_message="" + ), # Case 2 ] # Mock update_cases batch response (for multiple cases) - mock_send_post.return_value = MagicMock(status_code=200) + mock_send_post.return_value = MagicMock(status_code=200, error_message="") # Test the method results, error_message = self.labels_handler.add_labels_to_cases( @@ -340,8 +346,10 @@ def test_add_labels_to_cases_success(self): def test_add_labels_to_cases_single_case(self): """Test adding labels to a single test case using update_case endpoint""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" - ) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object( + self.labels_handler.label_manager, "add_label" + ) as mock_add_label, patch.object( self.labels_handler.client, "send_get" ) as mock_send_get, patch.object( self.labels_handler.client, "send_post" @@ -358,11 +366,11 @@ def test_add_labels_to_cases_single_case(self): # Mock get_case response mock_send_get.return_value = MagicMock( - status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"} + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}, error_message="" ) # Mock update_case response (for single case) - mock_send_post.return_value = MagicMock(status_code=200) + mock_send_post.return_value = MagicMock(status_code=200, error_message="") # Test the method with single case results, error_message = self.labels_handler.add_labels_to_cases( @@ -389,8 +397,10 @@ def test_add_labels_to_cases_single_case(self): def test_add_labels_to_cases_existing_label(self): """Test adding labels when label already exists""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" - ) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object( + self.labels_handler.label_manager, "add_label" + ) as mock_add_label, patch.object( self.labels_handler.client, "send_get" ) as mock_send_get, patch.object( self.labels_handler.client, "send_post" @@ -404,11 +414,11 @@ def test_add_labels_to_cases_existing_label(self): # Mock get_case response mock_send_get.return_value = MagicMock( - status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"} + status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}, error_message="" ) # Mock add_label_to_case response - mock_send_post.return_value = MagicMock(status_code=200) + mock_send_post.return_value = MagicMock(status_code=200, error_message="") # Test the method results, error_message = self.labels_handler.add_labels_to_cases( @@ -428,7 +438,7 @@ def test_add_labels_to_cases_existing_label(self): def test_add_labels_to_cases_max_labels_reached(self): """Test handling of maximum labels limit (10)""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" + self.labels_handler.label_manager, "get_labels" ) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get: # Mock __get_all_cases response (case exists) @@ -439,7 +449,9 @@ def test_add_labels_to_cases_max_labels_reached(self): # Mock get_case response with 10 existing labels (different from test-label) existing_labels = [{"id": i, "title": f"label-{i}"} for i in range(1, 11)] - mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": existing_labels}) + mock_send_get.return_value = MagicMock( + status_code=200, response_text={"labels": existing_labels}, error_message="" + ) # Test the method results, error_message = self.labels_handler.add_labels_to_cases( @@ -459,7 +471,7 @@ def test_add_labels_to_cases_max_labels_reached(self): def test_add_labels_to_cases_label_already_on_case(self): """Test handling when label already exists on case""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" + self.labels_handler.label_manager, "get_labels" ) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get: # Mock __get_all_cases response (case exists) @@ -470,7 +482,7 @@ def test_add_labels_to_cases_label_already_on_case(self): # Mock get_case response with the label already present mock_send_get.return_value = MagicMock( - status_code=200, response_text={"labels": [{"id": 5, "title": "test-label"}]} + status_code=200, response_text={"labels": [{"id": 5, "title": "test-label"}]}, error_message="" ) # Test the method @@ -540,7 +552,7 @@ def test_get_cases_by_label_with_label_ids(self): def test_get_cases_by_label_with_title(self): """Test getting cases by label title""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" + self.labels_handler.label_manager, "get_labels" ) as mock_get_labels: # Mock labels response @@ -568,7 +580,7 @@ def test_get_cases_by_label_with_title(self): def test_get_cases_by_label_title_not_found(self): """Test getting cases by non-existent label title""" with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( - self.labels_handler, "get_labels" + self.labels_handler.label_manager, "get_labels" ) as mock_get_labels: # Mock labels response (no matching label) From 101d123c1ec0bfa3c32e962a480f402007afb8ad Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 20 Jan 2026 17:45:15 +0800 Subject: [PATCH 26/33] TRCLI-5: Updated centralized core logging --- trcli/api/api_client.py | 4 +-- trcli/cli.py | 69 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 67 insertions(+), 6 deletions(-) diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index 1742b38..3f26c16 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -224,7 +224,7 @@ def __get_proxy_headers(self) -> Dict[str, str]: # Add Proxy-Authorization header headers["Proxy-Authorization"] = f"Basic {user_pass_encoded}" - print(f"Proxy authentication header added: {headers['Proxy-Authorization']}") + self.verbose_logging_function("Proxy authentication configured") return headers @@ -256,7 +256,7 @@ def _get_proxies_for_request(self, url: str) -> Dict[str, str]: if isinstance(self.noproxy, str): self.noproxy = self.noproxy.split(",") if host in self.noproxy: - print(f"Bypassing proxy for host: {host}") + self.verbose_logging_function(f"Bypassing proxy for host: {host}") return None # Ensure proxy has a scheme (either http or https) diff --git a/trcli/cli.py b/trcli/cli.py index 6d3e8ba..58d84a3 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -20,6 +20,10 @@ from trcli.data_classes.data_parsers import FieldsParser from trcli.settings import DEFAULT_API_CALL_TIMEOUT, DEFAULT_BATCH_SIZE +# Import structured logging infrastructure +from trcli.logging import get_logger +from trcli.logging.config import LoggingConfig + CONTEXT_SETTINGS = dict(auto_envvar_prefix="TR_CLI") trcli_folder = Path(__file__).parent @@ -79,6 +83,20 @@ def __init__(self, cmd="parse_junit"): self.proxy_user = None self.parallel_pagination = None + # Structured logger - lazy initialization + self._logger = None + + @property + def logger(self): + """Get structured logger for this environment. + + Lazy initialization - logger is created on first access. + Returns a StructuredLogger instance for the current command. + """ + if self._logger is None: + self._logger = get_logger(f"trcli.{self.cmd}") + return self._logger + @property def case_fields(self): return self._case_fields @@ -104,24 +122,57 @@ def result_fields(self, result_fields: Union[List[str], dict]): self._result_fields = fields_dict def log(self, msg: str, new_line=True, *args): - """Logs a message to stdout only if silent mode is disabled.""" + """Logs a message to stdout only if silent mode is disabled. + + Also logs to structured logger for observability. + """ if not self.silent: if args: msg %= args click.echo(msg, file=sys.stdout, nl=new_line) + # Also log to structured logger (backward compatible) + try: + self.logger.info(msg) + except Exception: + # Silently fail if structured logging has issues + pass + def vlog(self, msg: str, *args): - """Logs a message to stdout only if the verbose option is enabled.""" + """Logs a message to stdout only if the verbose option is enabled. + + Also logs to structured logger at DEBUG level. + """ if self.verbose: - self.log(msg, *args) + if args: + msg %= args + click.echo(msg, file=sys.stdout) + + # Also log to structured logger + try: + self.logger.debug(msg) + except Exception: + # Silently fail if structured logging has issues + pass @staticmethod def elog(msg: str, new_line=True, *args): - """Logs a message to stderr.""" + """Logs a message to stderr. + + Also logs to structured logger at ERROR level. + """ if args: msg %= args click.echo(msg, file=sys.stderr, nl=new_line) + # Also log to structured logger + try: + error_logger = get_logger("trcli.error") + error_logger.error(msg) + except Exception: + # Silently fail if structured logging has issues + pass + def get_progress_bar(self, results_amount: int, prefix: str): disabled = True if self.silent else False return tqdm( @@ -361,3 +412,13 @@ def cli(environment: Environment, context: click.core.Context, *args, **kwargs): environment.parse_config_file(context) environment.set_parameters(context) + + # Initialize structured logging system + # This reads configuration from: + # 1. Environment variables (TRCLI_LOG_LEVEL, TRCLI_LOG_FORMAT, etc.) + # 2. Config file (if 'logging' section exists) + try: + LoggingConfig.setup_logging(environment.config) + except Exception as e: + # Fallback to stderr if logging setup fails - don't block execution + click.echo(f"Warning: Failed to initialize logging: {e}", file=sys.stderr) From 7fa293d2f874b043775337f0d506727d1b47d6c0 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 20 Jan 2026 17:46:54 +0800 Subject: [PATCH 27/33] TRCLI-5: Added centralized core logging module with unit tests, updated README for logging and observability --- README.md | 158 +++++++ tests/test_logging/__init__.py | 1 + tests/test_logging/test_config.py | 234 +++++++++++ tests/test_logging/test_file_handler.py | 261 ++++++++++++ tests/test_logging/test_integration.py | 351 ++++++++++++++++ tests/test_logging/test_structured_logger.py | 369 +++++++++++++++++ trcli/logging/__init__.py | 56 +++ trcli/logging/config.py | 305 ++++++++++++++ trcli/logging/file_handler.py | 255 ++++++++++++ trcli/logging/structured_logger.py | 409 +++++++++++++++++++ 10 files changed, 2399 insertions(+) create mode 100644 tests/test_logging/__init__.py create mode 100644 tests/test_logging/test_config.py create mode 100644 tests/test_logging/test_file_handler.py create mode 100644 tests/test_logging/test_integration.py create mode 100644 tests/test_logging/test_structured_logger.py create mode 100644 trcli/logging/__init__.py create mode 100644 trcli/logging/config.py create mode 100644 trcli/logging/file_handler.py create mode 100644 trcli/logging/structured_logger.py diff --git a/README.md b/README.md index 393f3d6..1d19f5d 100644 --- a/README.md +++ b/README.md @@ -1778,6 +1778,164 @@ You can also enable this feature globally by setting `ENABLE_PARALLEL_PAGINATION - This is an experimental feature - please report any issues you encounter +Logging and Observability +-------------------------- + +The TestRail CLI includes a comprehensive logging infrastructure designed specifically for CLI tools. It provides structured logging with automatic credential sanitization, making it easy to integrate with CI/CD pipelines and observability tools. + +### Key Features + +- **Structured Logging**: JSON (NDJSON) and text formats for machine-parseable logs +- **Credential Sanitization**: Automatic masking of sensitive fields (passwords, API keys, tokens) +- **File Rotation**: Automatic log rotation based on file size with configurable backup counts +- **Flexible Configuration**: CLI flags, environment variables, YAML config files + +#### Automatic Logging + +TRCLI now automatically logs all operations using structured logging. Simply configure using environment variables: + +```bash +# Enable JSON logs on stderr (default) +export TRCLI_LOG_LEVEL=INFO +export TRCLI_LOG_FORMAT=json + +# Run any TRCLI command - logging happens automatically +trcli parse_junit --file report.xml --project "My Project" \ + --host https://example.testrail.io --username user --password pass +``` + +#### Direct API Usage (Advanced) + +For custom integrations or scripts: + +```python +from trcli.cli import Environment +from trcli.logging.config import LoggingConfig + +# Initialize logging (reads from env vars) +LoggingConfig.setup_logging() + +# Use TRCLI Environment with automatic logging +env = Environment("my_command") +env.logger.info("Operation completed", items=100, duration=1.5) + +# Or get a standalone logger +from trcli.logging import get_logger +logger = get_logger("trcli.custom") +logger.info("Custom operation", status="success") +``` + +**JSON Output:** +```json +{"timestamp":"2024-01-20T10:15:30Z","level":"INFO","logger":"trcli.module","message":"Operation completed","items":100,"duration":1.5} +``` + +#### Configuration + +Configure logging using environment variables: + +```bash +# Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) +export TRCLI_LOG_LEVEL=INFO + +# Set output format (json or text) +export TRCLI_LOG_FORMAT=json + +# Set output destination (stderr, stdout, or file) +export TRCLI_LOG_OUTPUT=stderr + +# For file output, specify path and rotation settings +export TRCLI_LOG_FILE=/var/log/trcli/app.log +export TRCLI_LOG_MAX_BYTES=10485760 # 10MB +export TRCLI_LOG_BACKUP_COUNT=5 + +# Run TRCLI +trcli parse_junit --file report.xml +``` + +Or use a YAML configuration file: + +```yaml +# trcli_config.yml +logging: + level: INFO + format: json + output: file + file_path: /var/log/trcli/app.log + max_bytes: 10485760 + backup_count: 5 +``` + +Then reference it: +```bash +trcli -c trcli_config.yml parse_junit --file report.xml +``` + +#### Automatic Credential Sanitization + +Credentials are automatically sanitized in logs to prevent security leaks: + +```python +logger.info("Auth configured", + password="secret123", + api_key="sk_live_abc123", + token="bearer_xyz" +) + +# Output automatically sanitizes: +# {"password":"se***23","api_key":"sk***23","token":"be***yz"} +``` + +**Protected Fields:** +- `password`, `passwd`, `pwd` +- `api_key`, `apikey`, `key` +- `token`, `auth_token`, `access_token` +- `secret`, `credential` + +#### CI/CD Integration + +Output JSON logs for easy parsing in CI/CD pipelines: + +```bash +# Output JSON logs for parsing +export TRCLI_LOG_FORMAT=json +trcli parse_junit --file report.xml 2>&1 | tee logs.json + +# Parse logs with jq +cat logs.json | jq 'select(.level == "ERROR")' +cat logs.json | jq 'select(.duration_seconds > 30)' +cat logs.json | jq '.run_id' | sort | uniq +``` + +#### File Logging with Rotation + +For long-running processes, enable file logging with automatic rotation: + +```bash +# Via environment variables +export TRCLI_LOG_FILE=/var/log/trcli/app.log +export TRCLI_LOG_MAX_BYTES=10485760 # 10MB +export TRCLI_LOG_BACKUP_COUNT=5 + +trcli parse_junit --file report.xml +``` + +**Benefits:** +- Long-running processes don't fill disk +- Automatic cleanup of old logs +- Easy log management + +### Environment Variables Reference + +| Variable | Description | Values | Default | +|----------|-------------|--------|---------| +| `TRCLI_LOG_LEVEL` | Minimum log level | DEBUG, INFO, WARNING, ERROR, CRITICAL | INFO | +| `TRCLI_LOG_FORMAT` | Output format | json, text | json | +| `TRCLI_LOG_OUTPUT` | Output destination | stderr, stdout, file | stderr | +| `TRCLI_LOG_FILE` | Log file path (when output=file) | File path | None | +| `TRCLI_LOG_MAX_BYTES` | Max file size before rotation | Bytes | 10485760 | +| `TRCLI_LOG_BACKUP_COUNT` | Number of backup files to keep | Integer | 5 | + Contributing ------------ Interested in contributing and helping improve the TestRail CLI client? Please start by looking into [CONTRIBUTING.md](https://github.com/gurock/trcli/blob/main/CONTRIBUTING.md) and creating an issue. diff --git a/tests/test_logging/__init__.py b/tests/test_logging/__init__.py new file mode 100644 index 0000000..1f47ca2 --- /dev/null +++ b/tests/test_logging/__init__.py @@ -0,0 +1 @@ +"""Tests for TRCLI logging infrastructure""" diff --git a/tests/test_logging/test_config.py b/tests/test_logging/test_config.py new file mode 100644 index 0000000..2c77280 --- /dev/null +++ b/tests/test_logging/test_config.py @@ -0,0 +1,234 @@ +""" +Unit tests for config.py + +Tests configuration loading functionality including: +- Environment variable loading +- File configuration +- Variable substitution +- Validation +""" + +import unittest +import tempfile +import os +from pathlib import Path +from trcli.logging.config import LoggingConfig + + +class TestLoggingConfig(unittest.TestCase): + """Test LoggingConfig class""" + + def setUp(self): + """Set up test fixtures""" + self.original_env = os.environ.copy() + self.temp_dir = tempfile.mkdtemp() + + def tearDown(self): + """Restore environment""" + os.environ.clear() + os.environ.update(self.original_env) + + # Clean up temp files + import shutil + + shutil.rmtree(self.temp_dir, ignore_errors=True) + + def test_default_config(self): + """Test default configuration values""" + config = LoggingConfig.load() + + self.assertEqual(config["level"], "INFO") + self.assertEqual(config["format"], "json") + self.assertEqual(config["output"], "stderr") + self.assertIsNone(config["file_path"]) + self.assertEqual(config["max_bytes"], 10485760) + self.assertEqual(config["backup_count"], 5) + + def test_env_var_overrides(self): + """Test environment variable overrides""" + os.environ["TRCLI_LOG_LEVEL"] = "DEBUG" + os.environ["TRCLI_LOG_FORMAT"] = "text" + os.environ["TRCLI_LOG_OUTPUT"] = "stdout" + os.environ["TRCLI_LOG_FILE"] = "/tmp/test.log" + + config = LoggingConfig.load() + + self.assertEqual(config["level"], "DEBUG") + self.assertEqual(config["format"], "text") + self.assertEqual(config["output"], "stdout") + self.assertEqual(config["file_path"], "/tmp/test.log") + + def test_env_var_numeric_overrides(self): + """Test numeric environment variable overrides""" + os.environ["TRCLI_LOG_MAX_BYTES"] = "5242880" + os.environ["TRCLI_LOG_BACKUP_COUNT"] = "10" + + config = LoggingConfig.load() + + self.assertEqual(config["max_bytes"], 5242880) + self.assertEqual(config["backup_count"], 10) + + def test_env_var_substitution(self): + """Test environment variable substitution in config values""" + os.environ["ENVIRONMENT"] = "production" + os.environ["TRCLI_LOG_FILE"] = "/var/log/${ENVIRONMENT}/trcli.log" + + config = LoggingConfig.load() + + self.assertEqual(config["file_path"], "/var/log/production/trcli.log") + + def test_env_var_substitution_missing_var(self): + """Test that missing env vars are left unchanged""" + os.environ["TRCLI_LOG_FILE"] = "/var/log/${MISSING_VAR}/trcli.log" + + config = LoggingConfig.load() + + # Should remain unchanged + self.assertEqual(config["file_path"], "/var/log/${MISSING_VAR}/trcli.log") + + def test_yaml_config_file(self): + """Test loading from YAML config file""" + config_file = Path(self.temp_dir) / "config.yml" + config_file.write_text( + """ +logging: + level: DEBUG + format: text + output: file + file_path: /tmp/trcli_test.log + max_bytes: 1048576 + backup_count: 3 +""" + ) + + config = LoggingConfig.load(str(config_file)) + + self.assertEqual(config["level"], "DEBUG") + self.assertEqual(config["format"], "text") + self.assertEqual(config["output"], "file") + self.assertEqual(config["file_path"], "/tmp/trcli_test.log") + self.assertEqual(config["max_bytes"], 1048576) + self.assertEqual(config["backup_count"], 3) + + def test_simple_config_file(self): + """Test loading from simple key=value config file""" + config_file = Path(self.temp_dir) / "config.txt" + config_file.write_text( + """ +# This is a comment +level=DEBUG +format=text +output=file +file_path=/tmp/test.log +max_bytes=1048576 +backup_count=3 +""" + ) + + config = LoggingConfig.load(str(config_file)) + + # Should fall back to simple parser (PyYAML might not be installed in tests) + # Values should still be loaded + self.assertIn("level", config) + + def test_config_precedence(self): + """Test that env vars override config file""" + config_file = Path(self.temp_dir) / "config.yml" + config_file.write_text( + """ +logging: + level: INFO + format: json +""" + ) + + os.environ["TRCLI_LOG_LEVEL"] = "DEBUG" + + config = LoggingConfig.load(str(config_file)) + + # Env var should override file + self.assertEqual(config["level"], "DEBUG") + # File value should be used for format + self.assertEqual(config["format"], "json") + + def test_nonexistent_config_file(self): + """Test that nonexistent config file doesn't cause error""" + config = LoggingConfig.load("/nonexistent/config.yml") + + # Should return defaults + self.assertEqual(config["level"], "INFO") + + def test_invalid_config_file(self): + """Test that invalid config file doesn't crash""" + config_file = Path(self.temp_dir) / "invalid.yml" + config_file.write_text("}{invalid yaml][") + + config = LoggingConfig.load(str(config_file)) + + # Should return defaults + self.assertEqual(config["level"], "INFO") + + def test_validate_valid_config(self): + """Test validation of valid config""" + config = {"level": "INFO", "format": "json", "output": "stderr"} + + is_valid, error = LoggingConfig.validate(config) + + self.assertTrue(is_valid) + self.assertEqual(error, "") + + def test_validate_invalid_level(self): + """Test validation rejects invalid log level""" + config = {"level": "INVALID", "format": "json", "output": "stderr"} + + is_valid, error = LoggingConfig.validate(config) + + self.assertFalse(is_valid) + self.assertIn("Invalid log level", error) + + def test_validate_invalid_format(self): + """Test validation rejects invalid format""" + config = {"level": "INFO", "format": "xml", "output": "stderr"} + + is_valid, error = LoggingConfig.validate(config) + + self.assertFalse(is_valid) + self.assertIn("Invalid format", error) + + def test_validate_invalid_output(self): + """Test validation rejects invalid output""" + config = {"level": "INFO", "format": "json", "output": "network"} + + is_valid, error = LoggingConfig.validate(config) + + self.assertFalse(is_valid) + self.assertIn("Invalid output", error) + + def test_validate_file_output_missing_path(self): + """Test validation requires file_path when output is file""" + config = {"level": "INFO", "format": "json", "output": "file", "file_path": None} + + is_valid, error = LoggingConfig.validate(config) + + self.assertFalse(is_valid) + self.assertIn("file_path required", error) + + def test_validate_file_output_with_path(self): + """Test validation passes when file_path is provided""" + config = {"level": "INFO", "format": "json", "output": "file", "file_path": "/tmp/test.log"} + + is_valid, error = LoggingConfig.validate(config) + + self.assertTrue(is_valid) + self.assertEqual(error, "") + + def test_case_insensitive_level(self): + """Test that log level is case-insensitive""" + for level in ["debug", "DEBUG", "Debug"]: + config = {"level": level, "format": "json", "output": "stderr"} + is_valid, error = LoggingConfig.validate(config) + self.assertTrue(is_valid, f"Level '{level}' should be valid") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_logging/test_file_handler.py b/tests/test_logging/test_file_handler.py new file mode 100644 index 0000000..35c7560 --- /dev/null +++ b/tests/test_logging/test_file_handler.py @@ -0,0 +1,261 @@ +""" +Unit tests for file_handler.py + +Tests file logging functionality including: +- File creation +- Log rotation +- Thread safety +- Cleanup +""" + +import unittest +import tempfile +import shutil +from pathlib import Path +from trcli.logging.file_handler import RotatingFileHandler, MultiFileHandler + + +class TestRotatingFileHandler(unittest.TestCase): + """Test RotatingFileHandler class""" + + def setUp(self): + """Set up test fixtures""" + self.temp_dir = tempfile.mkdtemp() + self.log_file = Path(self.temp_dir) / "test.log" + + def tearDown(self): + """Clean up temp directory""" + shutil.rmtree(self.temp_dir, ignore_errors=True) + + def test_handler_initialization(self): + """Test handler is initialized correctly""" + handler = RotatingFileHandler(str(self.log_file), max_bytes=1024, backup_count=3) + + self.assertEqual(handler.filepath, self.log_file) + self.assertEqual(handler.max_bytes, 1024) + self.assertEqual(handler.backup_count, 3) + + handler.close() + + def test_creates_log_directory(self): + """Test that handler creates log directory if it doesn't exist""" + nested_log = Path(self.temp_dir) / "subdir" / "logs" / "test.log" + + handler = RotatingFileHandler(str(nested_log)) + handler.write("Test message\n") + handler.close() + + # Directory should be created + self.assertTrue(nested_log.parent.exists()) + self.assertTrue(nested_log.exists()) + + def test_writes_to_file(self): + """Test that handler writes to file""" + handler = RotatingFileHandler(str(self.log_file)) + + handler.write("Test message 1\n") + handler.write("Test message 2\n") + handler.close() + + # File should contain messages + content = self.log_file.read_text() + self.assertIn("Test message 1", content) + self.assertIn("Test message 2", content) + + def test_rotation_on_size(self): + """Test that file rotates when max size is reached""" + handler = RotatingFileHandler(str(self.log_file), max_bytes=100, backup_count=3) # Small size for testing + + # Write enough data to trigger rotation + for i in range(20): + handler.write(f"Log message {i} with some content\n") + + handler.close() + + # Backup files should exist + backup1 = Path(f"{self.log_file}.1") + self.assertTrue(backup1.exists(), "Backup file .1 should exist") + + def test_backup_count_limit(self): + """Test that only backup_count backup files are kept""" + handler = RotatingFileHandler(str(self.log_file), max_bytes=50, backup_count=2) # Very small for testing + + # Write lots of data to trigger multiple rotations + for i in range(50): + handler.write(f"Log message {i} with content to fill up space\n") + + handler.close() + + # Check backup files + backup1 = Path(f"{self.log_file}.1") + backup2 = Path(f"{self.log_file}.2") + backup3 = Path(f"{self.log_file}.3") + + self.assertTrue(backup1.exists(), "Backup .1 should exist") + self.assertTrue(backup2.exists(), "Backup .2 should exist") + self.assertFalse(backup3.exists(), "Backup .3 should not exist (exceeds backup_count)") + + def test_flush(self): + """Test flush method""" + handler = RotatingFileHandler(str(self.log_file)) + + handler.write("Test message\n") + handler.flush() + + # File should be written immediately + content = self.log_file.read_text() + self.assertIn("Test message", content) + + handler.close() + + def test_context_manager(self): + """Test handler works as context manager""" + with RotatingFileHandler(str(self.log_file)) as handler: + handler.write("Test message\n") + + # File should be closed and content written + self.assertTrue(self.log_file.exists()) + content = self.log_file.read_text() + self.assertIn("Test message", content) + + def test_multiple_writes_same_file(self): + """Test multiple writes to same file""" + handler = RotatingFileHandler(str(self.log_file)) + + messages = [f"Message {i}\n" for i in range(10)] + for msg in messages: + handler.write(msg) + + handler.close() + + content = self.log_file.read_text() + for msg in messages: + self.assertIn(msg.strip(), content) + + def test_unicode_content(self): + """Test writing Unicode content""" + handler = RotatingFileHandler(str(self.log_file)) + + handler.write("Message with émojis: 🎉 ✅ 🚀\n") + handler.write("Chinese: 你好世界\n") + handler.write("Arabic: مرحبا بالعالم\n") + + handler.close() + + content = self.log_file.read_text() + self.assertIn("🎉", content) + self.assertIn("你好世界", content) + self.assertIn("مرحبا بالعالم", content) + + def test_close_multiple_times(self): + """Test that closing multiple times doesn't cause errors""" + handler = RotatingFileHandler(str(self.log_file)) + handler.write("Test\n") + + # Close multiple times + handler.close() + handler.close() + handler.close() + + # Should not raise any errors + + def test_rotation_preserves_content(self): + """Test that rotation preserves all content""" + handler = RotatingFileHandler(str(self.log_file), max_bytes=200, backup_count=5) + + messages = [] + for i in range(30): + msg = f"Log entry number {i} with some additional content\n" + messages.append(msg) + handler.write(msg) + + handler.close() + + # Collect all content from all files + all_content = "" + if self.log_file.exists(): + all_content += self.log_file.read_text() + + for i in range(1, 6): + backup = Path(f"{self.log_file}.{i}") + if backup.exists(): + all_content += backup.read_text() + + # All messages should be somewhere + for msg in messages: + self.assertIn(msg.strip().split()[0], all_content) # At least the beginning + + +class TestMultiFileHandler(unittest.TestCase): + """Test MultiFileHandler class""" + + def setUp(self): + """Set up test fixtures""" + self.temp_dir = tempfile.mkdtemp() + self.log_file1 = Path(self.temp_dir) / "test1.log" + self.log_file2 = Path(self.temp_dir) / "test2.log" + + def tearDown(self): + """Clean up""" + shutil.rmtree(self.temp_dir, ignore_errors=True) + + def test_writes_to_multiple_files(self): + """Test that handler writes to all files""" + handler1 = RotatingFileHandler(str(self.log_file1)) + handler2 = RotatingFileHandler(str(self.log_file2)) + + multi = MultiFileHandler([handler1, handler2]) + multi.write("Test message\n") + multi.close() + + # Both files should have the message + content1 = self.log_file1.read_text() + content2 = self.log_file2.read_text() + + self.assertIn("Test message", content1) + self.assertIn("Test message", content2) + + def test_continues_on_handler_failure(self): + """Test that failure in one handler doesn't stop others""" + handler1 = RotatingFileHandler(str(self.log_file1)) + + # Create a handler that will fail on write, not initialization + # We'll create a valid handler, write to it (opens file), then close it to cause write failures + import os + + temp_log = Path(self.temp_dir) / "temp.log" + handler2 = RotatingFileHandler(str(temp_log)) + handler2.write("init\n") # This opens the file + handler2._file.close() # Close the file to cause write failures + + handler3 = RotatingFileHandler(str(self.log_file2)) + + multi = MultiFileHandler([handler1, handler2, handler3]) + multi.write("Test message\n") + multi.close() + + # Files 1 and 3 should still work + self.assertTrue(self.log_file1.exists()) + self.assertTrue(self.log_file2.exists()) + + content1 = self.log_file1.read_text() + content2 = self.log_file2.read_text() + + self.assertIn("Test message", content1) + self.assertIn("Test message", content2) + + def test_context_manager(self): + """Test multi-handler works as context manager""" + handler1 = RotatingFileHandler(str(self.log_file1)) + handler2 = RotatingFileHandler(str(self.log_file2)) + + with MultiFileHandler([handler1, handler2]) as multi: + multi.write("Test message\n") + + # Both files should be closed and written + self.assertTrue(self.log_file1.exists()) + self.assertTrue(self.log_file2.exists()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_logging/test_integration.py b/tests/test_logging/test_integration.py new file mode 100644 index 0000000..6b3bccb --- /dev/null +++ b/tests/test_logging/test_integration.py @@ -0,0 +1,351 @@ +""" +Integration tests for logging infrastructure + +Tests end-to-end scenarios combining multiple components: +- Logger + File Handler +- Logger + Config +- Complete workflow tests +""" + +import unittest +import tempfile +import shutil +import json +import os +from pathlib import Path +from io import StringIO + +from trcli.logging import get_logger +from trcli.logging.structured_logger import LoggerFactory, LogLevel +from trcli.logging.file_handler import RotatingFileHandler +from trcli.logging.config import LoggingConfig + + +class TestEndToEndLogging(unittest.TestCase): + """Test complete end-to-end logging scenarios""" + + def setUp(self): + """Set up test fixtures""" + self.temp_dir = tempfile.mkdtemp() + self.log_file = Path(self.temp_dir) / "test.log" + LoggerFactory.reset() + + def tearDown(self): + """Clean up""" + shutil.rmtree(self.temp_dir, ignore_errors=True) + LoggerFactory.reset() + + def test_json_logging_to_file(self): + """Test JSON logging to file""" + # Setup file handler + file_handler = RotatingFileHandler(str(self.log_file)) + LoggerFactory.configure(level="INFO", format_style="json", stream=file_handler) + + # Log some messages + logger = get_logger("test.integration") + logger.info("Message 1", field1="value1") + logger.info("Message 2", field2="value2") + + file_handler.close() + + # Read and parse JSON logs + content = self.log_file.read_text() + lines = content.strip().split("\n") + + self.assertEqual(len(lines), 2) + + log1 = json.loads(lines[0]) + log2 = json.loads(lines[1]) + + self.assertEqual(log1["message"], "Message 1") + self.assertEqual(log1["field1"], "value1") + self.assertEqual(log2["message"], "Message 2") + self.assertEqual(log2["field2"], "value2") + + def test_text_logging_to_file(self): + """Test text logging to file""" + file_handler = RotatingFileHandler(str(self.log_file)) + LoggerFactory.configure(level="INFO", format_style="text", stream=file_handler) + + logger = get_logger("test.integration") + logger.info("Test message", status="ok") + + file_handler.close() + + content = self.log_file.read_text() + self.assertIn("[INFO]", content) + self.assertIn("Test message", content) + self.assertIn("status=ok", content) + + def test_correlation_id_workflow(self): + """Test complete workflow with correlation IDs""" + output = StringIO() + LoggerFactory.configure(level="INFO", format_style="json", stream=output) + + logger = get_logger("test.workflow") + + # Simulate request workflow + correlation_id = "abc-123-def" + ctx_logger = logger.with_context(correlation_id=correlation_id) + + ctx_logger.info("Request received", endpoint="/api/upload") + ctx_logger.info("Processing started", items=100) + ctx_logger.info("Processing completed", status="success") + + output.seek(0) + lines = output.getvalue().strip().split("\n") + + # All logs should have correlation_id + for line in lines: + log_entry = json.loads(line) + self.assertEqual(log_entry["correlation_id"], correlation_id) + + def test_credential_sanitization_workflow(self): + """Test credential sanitization in realistic scenario""" + output = StringIO() + LoggerFactory.configure(format_style="json", stream=output) + + logger = get_logger("test.security") + + # Simulate authentication flow + logger.info( + "Connecting to API", + host="api.example.com", + username="admin", + password="secretPassword123", # Should be sanitized + api_key="sk_live_abc123def456", # Should be sanitized + ) + + logger.info("API request", endpoint="/api/data", token="bearer_xyz789") # Should be sanitized + + output.seek(0) + content = output.getvalue() + + # Ensure credentials are not exposed + self.assertNotIn("secretPassword123", content) + self.assertNotIn("sk_live_abc123def456", content) + self.assertNotIn("bearer_xyz789", content) + + # But ensure masking indicators are present + self.assertIn("***", content) + + def test_multi_logger_scenario(self): + """Test multiple loggers with different configurations""" + output = StringIO() + LoggerFactory.configure(level="INFO", format_style="json", stream=output) + + api_logger = get_logger("test.api") + db_logger = get_logger("test.database") + auth_logger = get_logger("test.auth") + + api_logger.info("API request", endpoint="/api/users") + db_logger.info("Database query", table="users", operation="SELECT") + auth_logger.info("User login", user="admin") + + output.seek(0) + lines = output.getvalue().strip().split("\n") + + self.assertEqual(len(lines), 3) + + loggers_used = [json.loads(line)["logger"] for line in lines] + self.assertIn("test.api", loggers_used) + self.assertIn("test.database", loggers_used) + self.assertIn("test.auth", loggers_used) + + def test_log_rotation_workflow(self): + """Test log rotation in realistic scenario""" + file_handler = RotatingFileHandler(str(self.log_file), max_bytes=500, backup_count=3) # Small for testing + + LoggerFactory.configure(format_style="json", stream=file_handler) + logger = get_logger("test.rotation") + + # Write enough logs to trigger rotation + for i in range(50): + logger.info(f"Log entry {i}", entry_num=i, data="x" * 20) + + file_handler.close() + + # Check that rotation occurred + main_log = self.log_file + backup1 = Path(f"{self.log_file}.1") + + self.assertTrue(main_log.exists()) + self.assertTrue(backup1.exists(), "Rotation should have created backup files") + + def test_error_handling_workflow(self): + """Test error logging with exception info""" + output = StringIO() + LoggerFactory.configure(format_style="json", stream=output) + + logger = get_logger("test.errors") + + logger.info("Starting operation", operation_id=123) + + try: + # Simulate error + result = 1 / 0 + except ZeroDivisionError: + logger.error("Operation failed", exc_info=True, operation_id=123) + + logger.info("Cleanup completed", operation_id=123) + + output.seek(0) + lines = output.getvalue().strip().split("\n") + + # Find the error log + error_log = None + for line in lines: + log_entry = json.loads(line) + if log_entry["level"] == "ERROR": + error_log = log_entry + break + + self.assertIsNotNone(error_log) + self.assertIn("exception", error_log) + self.assertEqual(error_log["exception"]["type"], "ZeroDivisionError") + + +class TestConfigIntegration(unittest.TestCase): + """Test integration with configuration system""" + + def setUp(self): + """Set up test fixtures""" + self.temp_dir = tempfile.mkdtemp() + self.original_env = os.environ.copy() + LoggerFactory.reset() + + def tearDown(self): + """Clean up""" + shutil.rmtree(self.temp_dir, ignore_errors=True) + os.environ.clear() + os.environ.update(self.original_env) + LoggerFactory.reset() + + def test_config_file_integration(self): + """Test loading configuration and using it""" + config_file = Path(self.temp_dir) / "config.yml" + log_file = Path(self.temp_dir) / "app.log" + + config_file.write_text( + f""" +logging: + level: DEBUG + format: json + output: file + file_path: {log_file} + max_bytes: 10485760 + backup_count: 5 +""" + ) + + # Load and apply configuration + LoggingConfig.setup_logging(str(config_file)) + + # Use logger + logger = get_logger("test.config") + logger.debug("Debug message") + logger.info("Info message") + + # Close any open file handlers + for handler_logger in LoggerFactory._loggers.values(): + if hasattr(handler_logger.output_stream, "close"): + handler_logger.output_stream.close() + + # Check that logs were written + self.assertTrue(log_file.exists()) + + content = log_file.read_text() + self.assertIn("Debug message", content) + self.assertIn("Info message", content) + + def test_env_var_integration(self): + """Test environment variable configuration""" + log_file = Path(self.temp_dir) / "env_test.log" + + os.environ["TRCLI_LOG_LEVEL"] = "WARNING" + os.environ["TRCLI_LOG_FORMAT"] = "json" + os.environ["TRCLI_LOG_OUTPUT"] = "file" + os.environ["TRCLI_LOG_FILE"] = str(log_file) + + LoggingConfig.setup_logging() + + logger = get_logger("test.env") + logger.debug("Debug - should not appear") + logger.info("Info - should not appear") + logger.warning("Warning - should appear") + logger.error("Error - should appear") + + # Close file handler + for handler_logger in LoggerFactory._loggers.values(): + if hasattr(handler_logger.output_stream, "close"): + handler_logger.output_stream.close() + + content = log_file.read_text() + + # Only WARNING and above should be logged + self.assertNotIn("Debug", content) + self.assertNotIn("Info", content) + self.assertIn("Warning", content) + self.assertIn("Error", content) + + +class TestRealWorldScenarios(unittest.TestCase): + """Test real-world usage scenarios""" + + def setUp(self): + """Set up test fixtures""" + self.output = StringIO() + LoggerFactory.reset() + LoggerFactory.configure(format_style="json", stream=self.output) + + def tearDown(self): + """Clean up""" + LoggerFactory.reset() + + def test_ci_cd_pipeline_scenario(self): + """Test typical CI/CD usage""" + logger = get_logger("trcli.ci") + + # Simulate test result upload + logger.info("Test results processing started", run_id=12345, project="MyProject", total_tests=150) + + logger.info("Uploading to TestRail", endpoint="https://example.testrail.com", run_id=12345) + + logger.info("Upload completed", run_id=12345, uploaded=150, failed=0, duration_seconds=12.5, status="success") + + self.output.seek(0) + logs = [json.loads(line) for line in self.output.getvalue().strip().split("\n")] + + # Verify logs are parseable and contain expected data + self.assertEqual(len(logs), 3) + self.assertTrue(all(log["level"] == "INFO" for log in logs)) + self.assertEqual(logs[-1]["status"], "success") + + def test_concurrent_uploads_scenario(self): + """Test handling concurrent operations with correlation IDs""" + logger = get_logger("trcli.concurrent") + + # Simulate two concurrent uploads + upload1_logger = logger.with_context(correlation_id="upload-1", run_id=100) + upload2_logger = logger.with_context(correlation_id="upload-2", run_id=200) + + upload1_logger.info("Upload started") + upload2_logger.info("Upload started") + upload1_logger.info("Upload progress", percent=50) + upload2_logger.info("Upload progress", percent=30) + upload1_logger.info("Upload completed") + upload2_logger.info("Upload completed") + + self.output.seek(0) + logs = [json.loads(line) for line in self.output.getvalue().strip().split("\n")] + + # Verify each upload can be traced + upload1_logs = [log for log in logs if log.get("correlation_id") == "upload-1"] + upload2_logs = [log for log in logs if log.get("correlation_id") == "upload-2"] + + self.assertEqual(len(upload1_logs), 3) + self.assertEqual(len(upload2_logs), 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_logging/test_structured_logger.py b/tests/test_logging/test_structured_logger.py new file mode 100644 index 0000000..2f8b525 --- /dev/null +++ b/tests/test_logging/test_structured_logger.py @@ -0,0 +1,369 @@ +""" +Unit tests for structured_logger.py + +Tests the core logging functionality including: +- Log levels +- Structured fields +- Credential sanitization +- Context propagation +- Output formats (JSON and text) +""" + +import unittest +import json +import sys +from io import StringIO +from trcli.logging.structured_logger import StructuredLogger, LoggerFactory, LogLevel + + +class TestLogLevel(unittest.TestCase): + """Test LogLevel enum""" + + def test_log_levels(self): + """Test that log levels have correct values""" + self.assertEqual(LogLevel.DEBUG, 10) + self.assertEqual(LogLevel.INFO, 20) + self.assertEqual(LogLevel.WARNING, 30) + self.assertEqual(LogLevel.ERROR, 40) + self.assertEqual(LogLevel.CRITICAL, 50) + + def test_log_level_ordering(self): + """Test that log levels are properly ordered""" + self.assertLess(LogLevel.DEBUG, LogLevel.INFO) + self.assertLess(LogLevel.INFO, LogLevel.WARNING) + self.assertLess(LogLevel.WARNING, LogLevel.ERROR) + self.assertLess(LogLevel.ERROR, LogLevel.CRITICAL) + + +class TestStructuredLogger(unittest.TestCase): + """Test StructuredLogger class""" + + def setUp(self): + """Set up test fixtures""" + self.output = StringIO() + self.logger = StructuredLogger( + "test.logger", level=LogLevel.DEBUG, output_stream=self.output, format_style="json" + ) + + def tearDown(self): + """Clean up""" + self.output.close() + + def test_logger_initialization(self): + """Test logger is initialized correctly""" + self.assertEqual(self.logger.name, "test.logger") + self.assertEqual(self.logger.level, LogLevel.DEBUG) + self.assertEqual(self.logger.format_style, "json") + + def test_log_level_filtering(self): + """Test that log level filtering works""" + # Set level to INFO + self.logger.level = LogLevel.INFO + + # DEBUG should not log + self.logger.debug("Debug message") + self.assertEqual(self.output.getvalue(), "") + + # INFO should log + self.logger.info("Info message") + self.assertIn("Info message", self.output.getvalue()) + + def test_json_format(self): + """Test JSON output format""" + self.logger.info("Test message", field1="value1", field2=123) + + output = self.output.getvalue() + log_entry = json.loads(output.strip()) + + self.assertEqual(log_entry["level"], "INFO") + self.assertEqual(log_entry["logger"], "test.logger") + self.assertEqual(log_entry["message"], "Test message") + self.assertEqual(log_entry["field1"], "value1") + self.assertEqual(log_entry["field2"], 123) + self.assertIn("timestamp", log_entry) + + def test_text_format(self): + """Test text output format""" + text_logger = StructuredLogger( + "test.logger", level=LogLevel.INFO, output_stream=self.output, format_style="text" + ) + + text_logger.info("Test message", field1="value1") + + output = self.output.getvalue() + self.assertIn("[INFO]", output) + self.assertIn("test.logger", output) + self.assertIn("Test message", output) + self.assertIn("field1=value1", output) + + def test_credential_sanitization(self): + """Test that sensitive fields are sanitized""" + self.logger.info( + "Auth configured", + password="secret123", + api_key="sk_live_abc123def456", + token="bearer_xyz789", + username="admin", # Not sensitive + ) + + output = self.output.getvalue() + log_entry = json.loads(output.strip()) + + # Sensitive fields should be masked + self.assertNotEqual(log_entry["password"], "secret123") + self.assertIn("***", log_entry["password"]) + + self.assertNotEqual(log_entry["api_key"], "sk_live_abc123def456") + self.assertIn("***", log_entry["api_key"]) + + self.assertNotEqual(log_entry["token"], "bearer_xyz789") + self.assertIn("***", log_entry["token"]) + + # Non-sensitive field should not be masked + self.assertEqual(log_entry["username"], "admin") + + def test_context_propagation(self): + """Test that context fields are included in logs""" + ctx_logger = self.logger.with_context(correlation_id="abc-123", request_id=456) + + ctx_logger.info("Test message") + + output = self.output.getvalue() + log_entry = json.loads(output.strip()) + + self.assertEqual(log_entry["correlation_id"], "abc-123") + self.assertEqual(log_entry["request_id"], 456) + + def test_context_inheritance(self): + """Test that context is inherited in new logger instance""" + ctx_logger = self.logger.with_context(user="admin") + ctx_logger2 = ctx_logger.with_context(action="upload") + + ctx_logger2.info("Test message") + + output = self.output.getvalue() + log_entry = json.loads(output.strip()) + + # Both context fields should be present + self.assertEqual(log_entry["user"], "admin") + self.assertEqual(log_entry["action"], "upload") + + def test_set_context(self): + """Test set_context method""" + self.logger.set_context(correlation_id="xyz-789") + self.logger.info("Message 1") + + self.logger.set_context(request_id=123) + self.logger.info("Message 2") + + output = self.output.getvalue() + lines = output.strip().split("\n") + + log1 = json.loads(lines[0]) + log2 = json.loads(lines[1]) + + # First log should have correlation_id + self.assertEqual(log1["correlation_id"], "xyz-789") + + # Second log should have both (set_context updates, doesn't replace) + self.assertEqual(log2["correlation_id"], "xyz-789") + self.assertEqual(log2["request_id"], 123) + + def test_clear_context(self): + """Test clear_context method""" + self.logger.set_context(correlation_id="abc-123") + self.logger.info("Message 1") + + self.logger.clear_context() + self.logger.info("Message 2") + + output = self.output.getvalue() + lines = output.strip().split("\n") + + log1 = json.loads(lines[0]) + log2 = json.loads(lines[1]) + + # First log should have context + self.assertIn("correlation_id", log1) + + # Second log should not have context + self.assertNotIn("correlation_id", log2) + + def test_exception_logging(self): + """Test exception info is included when exc_info=True""" + try: + raise ValueError("Test error") + except ValueError: + self.logger.error("Error occurred", exc_info=True) + + output = self.output.getvalue() + log_entry = json.loads(output.strip()) + + self.assertEqual(log_entry["level"], "ERROR") + self.assertIn("exception", log_entry) + self.assertEqual(log_entry["exception"]["type"], "ValueError") + self.assertEqual(log_entry["exception"]["message"], "Test error") + self.assertIn("traceback", log_entry["exception"]) + + def test_all_log_levels(self): + """Test all log level methods""" + self.logger.debug("Debug message") + self.logger.info("Info message") + self.logger.warning("Warning message") + self.logger.error("Error message") + self.logger.critical("Critical message") + + output = self.output.getvalue() + lines = output.strip().split("\n") + + self.assertEqual(len(lines), 5) + + levels = [json.loads(line)["level"] for line in lines] + self.assertEqual(levels, ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]) + + +class TestLoggerFactory(unittest.TestCase): + """Test LoggerFactory class""" + + def setUp(self): + """Reset factory before each test""" + LoggerFactory.reset() + + def tearDown(self): + """Reset factory after each test""" + LoggerFactory.reset() + + def test_factory_defaults(self): + """Test factory default configuration""" + logger = LoggerFactory.get_logger("test") + + self.assertEqual(logger.level, LogLevel.INFO) + self.assertEqual(logger.format_style, "json") + self.assertEqual(logger.output_stream, sys.stderr) + + def test_factory_configure(self): + """Test factory configuration""" + output = StringIO() + LoggerFactory.configure(level="DEBUG", format_style="text", stream=output) + + logger = LoggerFactory.get_logger("test") + + self.assertEqual(logger.level, LogLevel.DEBUG) + self.assertEqual(logger.format_style, "text") + self.assertEqual(logger.output_stream, output) + + def test_factory_invalid_level(self): + """Test that invalid log level raises error""" + with self.assertRaises(ValueError): + LoggerFactory.configure(level="INVALID") + + def test_factory_invalid_format(self): + """Test that invalid format raises error""" + with self.assertRaises(ValueError): + LoggerFactory.configure(format_style="xml") + + def test_factory_caches_loggers(self): + """Test that factory caches logger instances""" + logger1 = LoggerFactory.get_logger("test") + logger2 = LoggerFactory.get_logger("test") + + # Should be the same instance + self.assertIs(logger1, logger2) + + def test_factory_different_names(self): + """Test that different names create different loggers""" + logger1 = LoggerFactory.get_logger("test1") + logger2 = LoggerFactory.get_logger("test2") + + # Should be different instances + self.assertIsNot(logger1, logger2) + self.assertEqual(logger1.name, "test1") + self.assertEqual(logger2.name, "test2") + + def test_factory_updates_existing_loggers(self): + """Test that configure updates existing loggers""" + logger = LoggerFactory.get_logger("test") + original_level = logger.level + + # Reconfigure + LoggerFactory.configure(level="DEBUG") + + # Existing logger should be updated + self.assertNotEqual(logger.level, original_level) + self.assertEqual(logger.level, LogLevel.DEBUG) + + def test_factory_reset(self): + """Test factory reset clears cache""" + LoggerFactory.get_logger("test1") + LoggerFactory.get_logger("test2") + + # Reset + LoggerFactory.reset() + + # Check that defaults are restored + self.assertEqual(LoggerFactory._default_level, LogLevel.INFO) + self.assertEqual(LoggerFactory._default_format, "json") + self.assertEqual(len(LoggerFactory._loggers), 0) + + +class TestCredentialSanitization(unittest.TestCase): + """Test credential sanitization in detail""" + + def setUp(self): + """Set up test fixtures""" + self.output = StringIO() + self.logger = StructuredLogger("test", output_stream=self.output, format_style="json") + + def tearDown(self): + """Clean up""" + self.output.close() + + def test_sanitize_password(self): + """Test password sanitization""" + self.logger.info("Test", password="secret123") + + log_entry = json.loads(self.output.getvalue().strip()) + self.assertNotEqual(log_entry["password"], "secret123") + self.assertIn("***", log_entry["password"]) + + def test_sanitize_api_key(self): + """Test API key sanitization""" + self.logger.info("Test", api_key="sk_live_123456") + + log_entry = json.loads(self.output.getvalue().strip()) + self.assertNotEqual(log_entry["api_key"], "sk_live_123456") + + def test_sanitize_token(self): + """Test token sanitization""" + self.logger.info("Test", token="bearer_xyz789") + + log_entry = json.loads(self.output.getvalue().strip()) + self.assertNotEqual(log_entry["token"], "bearer_xyz789") + + def test_sanitize_short_password(self): + """Test short password sanitization""" + self.logger.info("Test", password="123") + + log_entry = json.loads(self.output.getvalue().strip()) + self.assertEqual(log_entry["password"], "***") + + def test_no_sanitization_non_sensitive(self): + """Test that non-sensitive fields are not sanitized""" + self.logger.info("Test", username="admin", host="example.com", port=8080) + + log_entry = json.loads(self.output.getvalue().strip()) + self.assertEqual(log_entry["username"], "admin") + self.assertEqual(log_entry["host"], "example.com") + self.assertEqual(log_entry["port"], 8080) + + def test_sanitize_in_context(self): + """Test that context fields are also sanitized""" + ctx_logger = self.logger.with_context(password="secret") + ctx_logger.info("Test") + + log_entry = json.loads(self.output.getvalue().strip()) + self.assertNotEqual(log_entry["password"], "secret") + + +if __name__ == "__main__": + unittest.main() diff --git a/trcli/logging/__init__.py b/trcli/logging/__init__.py new file mode 100644 index 0000000..7224baa --- /dev/null +++ b/trcli/logging/__init__.py @@ -0,0 +1,56 @@ +""" +TRCLI Logging Module - Core Edition + +Zero-dependency, vendor-neutral logging infrastructure for TRCLI. +Simplified to include only essential features for CLI tools. + +Provides: +- Structured logging (NDJSON and text formats) +- File output with automatic rotation +- Flexible configuration (file, env vars, CLI flags) +- Credential sanitization +- Correlation ID support +- Zero external dependencies + +Usage: + from trcli.logging import get_logger + + logger = get_logger("trcli.module") + logger.info("Operation completed", duration=1.5, items=100) + +Configuration: + # Via environment variables + export TRCLI_LOG_LEVEL=DEBUG + export TRCLI_LOG_FORMAT=json + export TRCLI_LOG_FILE=/var/log/trcli/app.log + + # Via configuration file + from trcli.logging.config import LoggingConfig + LoggingConfig.setup_logging(config_path="trcli_config.yml") +""" + +from trcli.logging.structured_logger import LoggerFactory, StructuredLogger, LogLevel + +__all__ = [ + "LoggerFactory", + "StructuredLogger", + "LogLevel", + "get_logger", +] + + +def get_logger(name: str) -> StructuredLogger: + """ + Get a logger instance with default configuration. + + Args: + name: Logger name (usually module path like "trcli.api.client") + + Returns: + StructuredLogger instance + + Example: + logger = get_logger("trcli.api") + logger.info("Request completed", status_code=200, duration=1.5) + """ + return LoggerFactory.get_logger(name) diff --git a/trcli/logging/config.py b/trcli/logging/config.py new file mode 100644 index 0000000..df34271 --- /dev/null +++ b/trcli/logging/config.py @@ -0,0 +1,305 @@ +""" +Configuration System - Simple logging configuration for TRCLI + +Provides centralized configuration loading from multiple sources +with precedence handling and environment variable substitution. +""" + +import os +import sys +import re +from pathlib import Path +from typing import Dict, Any, Optional + + +class LoggingConfig: + """ + Centralized logging configuration for TRCLI. + + Reads from file, environment variables, or CLI flags with + proper precedence handling. + + Example configuration file (trcli_config.yml): + logging: + level: INFO + format: json # json or text + output: file # stderr, stdout, file + file_path: /var/log/trcli/app.log + max_bytes: 10485760 # 10MB + backup_count: 5 + """ + + DEFAULT_CONFIG = { + "level": "INFO", + "format": "json", # json or text + "output": "stderr", # stderr, stdout, file + "file_path": None, + "max_bytes": 10485760, # 10MB + "backup_count": 5, + } + + @classmethod + def load(cls, config_path: Optional[str] = None) -> Dict[str, Any]: + """ + Load configuration from multiple sources. + + Precedence: CLI > Environment > File > Default + + Args: + config_path: Path to configuration file + + Returns: + Configuration dictionary + + Example: + config = LoggingConfig.load("trcli_config.yml") + """ + config = cls.DEFAULT_CONFIG.copy() + + # 1. Load from file + if config_path and Path(config_path).exists(): + file_config = cls._load_from_file(config_path) + if file_config and "logging" in file_config: + config.update(file_config["logging"]) + + # 2. Override with environment variables + config = cls._apply_env_overrides(config) + + # 3. Substitute environment variables in values + config = cls._substitute_env_vars(config) + + return config + + @classmethod + def _load_from_file(cls, config_path: str) -> Optional[Dict[str, Any]]: + """ + Load configuration from YAML file. + + Args: + config_path: Path to configuration file + + Returns: + Configuration dictionary or None if error + """ + try: + # Try to import yaml + import yaml + + with open(config_path) as f: + return yaml.safe_load(f) + except ImportError: + # YAML not available, try simple parsing + sys.stderr.write( + "Warning: PyYAML not installed, using simple config parser. " + "Install PyYAML for full configuration support.\n" + ) + return cls._load_simple_config(config_path) + except Exception as e: + sys.stderr.write(f"Error loading config file {config_path}: {e}\n") + return None + + @classmethod + def _load_simple_config(cls, config_path: str) -> Optional[Dict[str, Any]]: + """ + Load configuration using simple key=value parser. + + Fallback for when PyYAML is not available. + + Args: + config_path: Path to configuration file + + Returns: + Configuration dictionary or None if error + """ + try: + config = {"logging": {}} + with open(config_path) as f: + for line in f: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith("#"): + continue + # Parse key=value + if "=" in line: + key, value = line.split("=", 1) + key = key.strip() + value = value.strip().strip('"').strip("'") + # Convert known numeric values + if key in ["max_bytes", "backup_count"]: + try: + value = int(value) + except ValueError: + pass + config["logging"][key] = value + return config + except Exception as e: + sys.stderr.write(f"Error parsing config file {config_path}: {e}\n") + return None + + @classmethod + def _apply_env_overrides(cls, config: Dict[str, Any]) -> Dict[str, Any]: + """ + Apply environment variable overrides. + + Environment variables: + TRCLI_LOG_LEVEL: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) + TRCLI_LOG_FORMAT: Output format (json, text) + TRCLI_LOG_OUTPUT: Output destination (stderr, stdout, file) + TRCLI_LOG_FILE: Log file path + TRCLI_LOG_MAX_BYTES: Max log file size before rotation + TRCLI_LOG_BACKUP_COUNT: Number of backup files to keep + + Args: + config: Configuration dictionary + + Returns: + Updated configuration dictionary + """ + # Simple overrides + env_mappings = { + "TRCLI_LOG_LEVEL": "level", + "TRCLI_LOG_FORMAT": "format", + "TRCLI_LOG_OUTPUT": "output", + "TRCLI_LOG_FILE": "file_path", + } + + for env_var, config_key in env_mappings.items(): + if env_var in os.environ: + config[config_key] = os.environ[env_var] + + # Numeric overrides + if "TRCLI_LOG_MAX_BYTES" in os.environ: + try: + config["max_bytes"] = int(os.environ["TRCLI_LOG_MAX_BYTES"]) + except ValueError: + pass + + if "TRCLI_LOG_BACKUP_COUNT" in os.environ: + try: + config["backup_count"] = int(os.environ["TRCLI_LOG_BACKUP_COUNT"]) + except ValueError: + pass + + return config + + @classmethod + def _substitute_env_vars(cls, config: Any) -> Any: + """ + Recursively substitute environment variables in configuration. + + Supports ${VAR_NAME} syntax. + + Example: + file_path: /var/log/${ENVIRONMENT}/trcli.log + With ENVIRONMENT=production, becomes: + file_path: /var/log/production/trcli.log + + Args: + config: Configuration value (string, dict, list, etc.) + + Returns: + Configuration with substituted values + """ + if isinstance(config, str): + # Substitute environment variables + def replace_env(match): + var_name = match.group(1) + return os.environ.get(var_name, match.group(0)) + + return re.sub(r"\$\{([^}]+)\}", replace_env, config) + + elif isinstance(config, dict): + return {k: cls._substitute_env_vars(v) for k, v in config.items()} + + elif isinstance(config, list): + return [cls._substitute_env_vars(item) for item in config] + + else: + return config + + @classmethod + def setup_logging(cls, config_path: Optional[str] = None, **overrides): + """ + Setup logging based on configuration. + + Args: + config_path: Path to configuration file + **overrides: Configuration overrides (e.g., level="DEBUG") + + Example: + LoggingConfig.setup_logging( + config_path="trcli_config.yml", + level="DEBUG", + format="text" + ) + """ + from trcli.logging.structured_logger import LoggerFactory + from trcli.logging.file_handler import RotatingFileHandler + + # Load configuration + config = cls.load(config_path) + config.update(overrides) + + # Determine output stream + output_type = config.get("output", "stderr") + + if output_type == "stdout": + stream = sys.stdout + elif output_type == "stderr": + stream = sys.stderr + elif output_type == "file": + file_path = config.get("file_path") + if not file_path: + sys.stderr.write("Warning: file output selected but no file_path specified, using stderr\n") + stream = sys.stderr + else: + stream = RotatingFileHandler( + file_path, max_bytes=config.get("max_bytes", 10485760), backup_count=config.get("backup_count", 5) + ) + else: + stream = sys.stderr + + # Configure logger factory + LoggerFactory.configure( + level=config.get("level", "INFO"), format_style=config.get("format", "json"), stream=stream + ) + + @classmethod + def validate(cls, config: Dict[str, Any]) -> tuple: + """ + Validate configuration. + + Args: + config: Configuration dictionary + + Returns: + Tuple of (is_valid, error_message) + + Example: + is_valid, error = LoggingConfig.validate(config) + if not is_valid: + print(f"Invalid configuration: {error}") + """ + # Validate log level + valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] + level = config.get("level", "INFO").upper() + if level not in valid_levels: + return False, f"Invalid log level '{level}'. Must be one of: {', '.join(valid_levels)}" + + # Validate format + valid_formats = ["json", "text"] + format_style = config.get("format", "json") + if format_style not in valid_formats: + return False, f"Invalid format '{format_style}'. Must be one of: {', '.join(valid_formats)}" + + # Validate output + valid_outputs = ["stderr", "stdout", "file"] + output = config.get("output", "stderr") + if output not in valid_outputs: + return False, f"Invalid output '{output}'. Must be one of: {', '.join(valid_outputs)}" + + # Validate file output config + if output == "file" and not config.get("file_path"): + return False, "file_path required when output is 'file'" + + return True, "" diff --git a/trcli/logging/file_handler.py b/trcli/logging/file_handler.py new file mode 100644 index 0000000..5aa0147 --- /dev/null +++ b/trcli/logging/file_handler.py @@ -0,0 +1,255 @@ +""" +File Handler - Zero-dependency rotating file handler for TRCLI + +Provides file output with automatic rotation based on file size, +without requiring any external dependencies. + +Features: +- Automatic log rotation when file reaches max size +- Configurable number of backup files +- Thread-safe write operations +- Automatic directory creation +- Zero external dependencies (Python stdlib only) + +Usage: + from trcli.logging.file_handler import RotatingFileHandler + + handler = RotatingFileHandler( + filepath="/var/log/trcli/app.log", + max_bytes=10485760, # 10MB + backup_count=5 + ) + + handler.write("Log message\n") + handler.close() +""" + +import os +from pathlib import Path +from threading import Lock +from typing import Optional + + +class RotatingFileHandler: + """ + Simple rotating file handler without external dependencies. + + Rotates log files when they reach a specified size, keeping a + configurable number of backup files. + + Example: + handler = RotatingFileHandler("/var/log/trcli/app.log", max_bytes=10485760) + handler.write('{"timestamp": "2024-01-20", "message": "Test"}\n') + handler.close() + """ + + def __init__( + self, filepath: str, max_bytes: int = 10485760, backup_count: int = 5, encoding: str = "utf-8" # 10MB + ): + """ + Initialize rotating file handler. + + Args: + filepath: Path to log file + max_bytes: Maximum file size before rotation (default: 10MB) + backup_count: Number of backup files to keep (default: 5) + encoding: File encoding (default: utf-8) + """ + self.filepath = Path(filepath) + self.max_bytes = max_bytes + self.backup_count = backup_count + self.encoding = encoding + self._file = None + self._lock = Lock() + self._ensure_directory() + + def _ensure_directory(self): + """Create log directory if it doesn't exist""" + self.filepath.parent.mkdir(parents=True, exist_ok=True) + + def write(self, content: str): + """ + Write content to file with automatic rotation. + + Args: + content: Content to write (should include newline if needed) + + Example: + handler.write("Log entry\n") + """ + with self._lock: + # Check if rotation needed before writing + if self._should_rotate(): + self._rotate() + + # Open file if not already open + if self._file is None or self._file.closed: + self._file = open(self.filepath, "a", encoding=self.encoding) + + # Write content + self._file.write(content) + self._file.flush() + os.fsync(self._file.fileno()) # Ensure data is written to disk + + def _should_rotate(self) -> bool: + """ + Check if file should be rotated based on size. + + Returns: + True if rotation needed, False otherwise + """ + # If file doesn't exist, no rotation needed + if not self.filepath.exists(): + return False + + # Check file size + try: + file_size = self.filepath.stat().st_size + return file_size >= self.max_bytes + except OSError: + # If we can't check size, assume no rotation needed + return False + + def _rotate(self): + """ + Rotate log files. + + Closes current file, renames existing backup files, and + moves current file to .1. + + Rotation pattern: + app.log -> app.log.1 + app.log.1 -> app.log.2 + app.log.2 -> app.log.3 + ... + app.log.N -> deleted (if N >= backup_count) + """ + # Close current file + if self._file and not self._file.closed: + self._file.close() + self._file = None + + # Delete oldest backup if it exists + oldest_backup = Path(f"{self.filepath}.{self.backup_count}") + if oldest_backup.exists(): + try: + oldest_backup.unlink() + except OSError: + pass # Ignore errors deleting old backups + + # Rotate existing backup files + for i in range(self.backup_count - 1, 0, -1): + src = Path(f"{self.filepath}.{i}") + dst = Path(f"{self.filepath}.{i + 1}") + if src.exists(): + try: + src.replace(dst) + except OSError: + pass # Ignore errors during rotation + + # Move current file to .1 + if self.filepath.exists(): + try: + self.filepath.replace(Path(f"{self.filepath}.1")) + except OSError: + pass # Ignore errors moving current file + + def flush(self): + """ + Flush file buffer. + + Ensures all buffered data is written to disk. + """ + with self._lock: + if self._file and not self._file.closed: + self._file.flush() + os.fsync(self._file.fileno()) + + def close(self): + """ + Close file handle. + + Should be called when done writing to ensure data is flushed. + """ + with self._lock: + if self._file and not self._file.closed: + self._file.flush() + self._file.close() + self._file = None + + def __enter__(self): + """Context manager entry""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit""" + self.close() + + def __del__(self): + """Destructor - ensure file is closed""" + try: + self.close() + except Exception: + pass # Ignore errors in destructor + + +class MultiFileHandler: + """ + Write to multiple files simultaneously. + + Useful for writing to multiple locations (e.g., local file + shared storage). + + Example: + handler = MultiFileHandler([ + RotatingFileHandler("/var/log/trcli/app.log"), + RotatingFileHandler("/mnt/shared/logs/app.log") + ]) + handler.write("Log entry\n") + """ + + def __init__(self, handlers: list): + """ + Initialize multi-file handler. + + Args: + handlers: List of file handlers + """ + self.handlers = handlers + + def write(self, content: str): + """ + Write content to all handlers. + + Args: + content: Content to write + """ + for handler in self.handlers: + try: + handler.write(content) + except Exception: + # Continue writing to other handlers even if one fails + pass + + def flush(self): + """Flush all handlers""" + for handler in self.handlers: + try: + handler.flush() + except Exception: + pass + + def close(self): + """Close all handlers""" + for handler in self.handlers: + try: + handler.close() + except Exception: + pass + + def __enter__(self): + """Context manager entry""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit""" + self.close() diff --git a/trcli/logging/structured_logger.py b/trcli/logging/structured_logger.py new file mode 100644 index 0000000..439692c --- /dev/null +++ b/trcli/logging/structured_logger.py @@ -0,0 +1,409 @@ +""" +Structured Logger - Zero-dependency structured logging for TRCLI + +Provides structured logging with NDJSON (Newline-Delimited JSON) output format, +compatible with all major log aggregation platforms (ELK, Splunk, CloudWatch, etc.) + +Features: +- Standard log levels (DEBUG, INFO, WARNING, ERROR, CRITICAL) +- Structured fields (queryable, filterable, aggregatable) +- Correlation ID support for request tracing +- Context propagation (automatic field inheritance) +- Human-readable and JSON output formats +- Zero external dependencies (Python stdlib only) + +Usage: + from trcli.logging.structured_logger import LoggerFactory + + logger = LoggerFactory.get_logger("trcli.api") + + # Simple logging + logger.info("Operation completed", duration=1.5, count=100) + + # With correlation context + ctx_logger = logger.with_context(correlation_id="abc-123") + ctx_logger.info("Processing started") + ctx_logger.info("Processing finished") +""" + +import json +import sys +import traceback +from datetime import datetime, timezone +from typing import Dict, Any, Optional, TextIO +from enum import IntEnum + + +class LogLevel(IntEnum): + """Standard log levels compatible with Python logging and syslog""" + + DEBUG = 10 + INFO = 20 + WARNING = 30 + ERROR = 40 + CRITICAL = 50 + + +class StructuredLogger: + """ + Zero-dependency structured logger using standard Python libraries only. + Outputs NDJSON format compatible with all major observability platforms. + + Example: + logger = StructuredLogger("trcli.api", level=LogLevel.INFO) + logger.info("Request completed", status_code=200, duration_ms=150) + + # Output: + # {"timestamp":"2024-01-20T10:15:30.123456Z","level":"INFO","logger":"trcli.api","message":"Request completed","status_code":200,"duration_ms":150} + """ + + def __init__( + self, name: str, level: LogLevel = LogLevel.INFO, output_stream: TextIO = None, format_style: str = "json" + ): + """ + Initialize structured logger. + + Args: + name: Logger name (usually module path) + level: Minimum log level to output + output_stream: Output stream (default: sys.stderr) + format_style: Output format - "json" or "text" + """ + self.name = name + self.level = level + self.output_stream = output_stream or sys.stderr + self.format_style = format_style + self._context: Dict[str, Any] = {} + self._sensitive_keys = { + "password", + "passwd", + "pwd", + "secret", + "token", + "api_key", + "apikey", + "authorization", + "auth", + "credential", + "key", + } + + def _should_log(self, level: LogLevel) -> bool: + """Check if message should be logged based on level""" + return level.value >= self.level.value + + def _sanitize_value(self, key: str, value: Any) -> Any: + """ + Sanitize sensitive values to prevent credential leakage. + + Args: + key: Field name + value: Field value + + Returns: + Sanitized value (original or masked) + """ + # Check if key contains sensitive terms + key_lower = str(key).lower() + for sensitive_key in self._sensitive_keys: + if sensitive_key in key_lower: + # Mask sensitive data + if isinstance(value, str): + if len(value) <= 4: + return "***" + # Show first 2 and last 2 chars + return f"{value[:2]}***{value[-2:]}" + return "***REDACTED***" + + return value + + def _format_log(self, level: LogLevel, message: str, extra: Optional[Dict[str, Any]] = None) -> str: + """ + Format log entry according to configured style. + + Args: + level: Log level + message: Log message + extra: Additional structured fields + + Returns: + Formatted log string + """ + log_entry = { + "timestamp": datetime.now(timezone.utc).isoformat(), + "level": level.name, + "logger": self.name, + "message": message, + } + + # Add context (correlation IDs, etc.) + if self._context: + for key, value in self._context.items(): + log_entry[key] = self._sanitize_value(key, value) + + # Add extra fields + if extra: + for key, value in extra.items(): + log_entry[key] = self._sanitize_value(key, value) + + if self.format_style == "json": + # NDJSON: one JSON object per line + return json.dumps(log_entry, default=str) + else: + # Human-readable format + timestamp = log_entry["timestamp"] + level_str = f"[{log_entry['level']}]".ljust(10) + logger_str = self.name.ljust(20) + msg = log_entry["message"] + + # Format extra fields + extra_parts = [] + for key, value in log_entry.items(): + if key not in ["timestamp", "level", "logger", "message"]: + extra_parts.append(f"{key}={value}") + + extra_str = "" + if extra_parts: + extra_str = " | " + " ".join(extra_parts) + + return f"{timestamp} {level_str} {logger_str} | {msg}{extra_str}" + + def _write(self, level: LogLevel, message: str, extra: Optional[Dict[str, Any]] = None, exc_info: bool = False): + """ + Write log entry to output stream. + + Args: + level: Log level + message: Log message + extra: Additional structured fields + exc_info: Include exception traceback + """ + if not self._should_log(level): + return + + # Add exception info if requested + if exc_info: + if extra is None: + extra = {} + exc_type, exc_value, exc_tb = sys.exc_info() + if exc_type is not None: + extra["exception"] = { + "type": exc_type.__name__, + "message": str(exc_value), + "traceback": "".join(traceback.format_tb(exc_tb)), + } + + log_line = self._format_log(level, message, extra) + + try: + self.output_stream.write(log_line + "\n") + self.output_stream.flush() + except Exception: + # Fallback to stderr if output stream fails + if self.output_stream != sys.stderr: + sys.stderr.write(f"Logging error: failed to write to output stream\n") + sys.stderr.write(log_line + "\n") + + def debug(self, message: str, **extra): + """ + Log debug message. + + Args: + message: Log message + **extra: Additional structured fields + + Example: + logger.debug("Processing item", item_id=123, status="pending") + """ + self._write(LogLevel.DEBUG, message, extra) + + def info(self, message: str, **extra): + """ + Log info message. + + Args: + message: Log message + **extra: Additional structured fields + + Example: + logger.info("Operation completed", duration=1.5, items=100) + """ + self._write(LogLevel.INFO, message, extra) + + def warning(self, message: str, **extra): + """ + Log warning message. + + Args: + message: Log message + **extra: Additional structured fields + + Example: + logger.warning("Slow operation detected", duration=30.5, threshold=10.0) + """ + self._write(LogLevel.WARNING, message, extra) + + def error(self, message: str, exc_info: bool = False, **extra): + """ + Log error message. + + Args: + message: Log message + exc_info: Include exception traceback + **extra: Additional structured fields + + Example: + logger.error("Upload failed", exc_info=True, run_id=12345) + """ + self._write(LogLevel.ERROR, message, extra, exc_info=exc_info) + + def critical(self, message: str, exc_info: bool = False, **extra): + """ + Log critical message. + + Args: + message: Log message + exc_info: Include exception traceback + **extra: Additional structured fields + + Example: + logger.critical("System failure", exc_info=True, component="api") + """ + self._write(LogLevel.CRITICAL, message, extra, exc_info=exc_info) + + def with_context(self, **context) -> "StructuredLogger": + """ + Return a logger with additional context fields. + + Context fields are automatically added to all log entries from + the returned logger instance. + + Args: + **context: Context fields to add + + Returns: + New logger instance with context + + Example: + ctx_logger = logger.with_context(correlation_id="abc-123", user="admin") + ctx_logger.info("Request started") + ctx_logger.info("Request completed") + # Both logs will include correlation_id and user fields + """ + new_logger = StructuredLogger(self.name, self.level, self.output_stream, self.format_style) + new_logger._context = {**self._context, **context} + new_logger._sensitive_keys = self._sensitive_keys + return new_logger + + def set_context(self, **context): + """ + Set context for all subsequent logs from this logger. + + Args: + **context: Context fields to add + + Example: + logger.set_context(request_id="req-123") + logger.info("Processing") + # Log will include request_id + """ + self._context.update(context) + + def clear_context(self): + """ + Clear all context fields. + + Example: + logger.clear_context() + """ + self._context = {} + + def set_level(self, level: LogLevel): + """ + Set minimum log level. + + Args: + level: New log level + + Example: + logger.set_level(LogLevel.DEBUG) + """ + self.level = level + + +class LoggerFactory: + """ + Factory for creating loggers with consistent configuration. + + Provides centralized configuration for all loggers in the application. + """ + + _default_level = LogLevel.INFO + _default_format = "json" + _default_stream = sys.stderr + _loggers: Dict[str, StructuredLogger] = {} + + @classmethod + def configure(cls, level: str = "INFO", format_style: str = "json", stream: TextIO = None): + """ + Configure default logger settings. + + Args: + level: Log level name (DEBUG, INFO, WARNING, ERROR, CRITICAL) + format_style: Output format - "json" or "text" + stream: Output stream (default: sys.stderr) + + Example: + LoggerFactory.configure(level="DEBUG", format_style="text") + """ + level_upper = level.upper() + if level_upper in LogLevel.__members__: + cls._default_level = LogLevel[level_upper] + else: + raise ValueError(f"Invalid log level: {level}. Must be one of: DEBUG, INFO, WARNING, ERROR, CRITICAL") + + if format_style not in ["json", "text"]: + raise ValueError(f"Invalid format style: {format_style}. Must be 'json' or 'text'") + + cls._default_format = format_style + + if stream: + cls._default_stream = stream + + # Update existing loggers + for logger in cls._loggers.values(): + logger.level = cls._default_level + logger.format_style = cls._default_format + logger.output_stream = cls._default_stream + + @classmethod + def get_logger(cls, name: str) -> StructuredLogger: + """ + Get a logger instance with default configuration. + + Returns cached logger if already created for this name. + + Args: + name: Logger name (usually module path) + + Returns: + StructuredLogger instance + + Example: + logger = LoggerFactory.get_logger("trcli.api") + """ + if name not in cls._loggers: + cls._loggers[name] = StructuredLogger(name, cls._default_level, cls._default_stream, cls._default_format) + return cls._loggers[name] + + @classmethod + def reset(cls): + """ + Reset factory to defaults and clear all cached loggers. + + Useful for testing. + """ + cls._default_level = LogLevel.INFO + cls._default_format = "json" + cls._default_stream = sys.stderr + cls._loggers = {} From 34a2620a705f565cc51ce593a6b0c5da786f1739 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 4 Feb 2026 17:55:28 +0800 Subject: [PATCH 28/33] TRCLI-5: Included correct package in setup.py for logging --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 10c9653..f1946bb 100644 --- a/setup.py +++ b/setup.py @@ -12,6 +12,7 @@ "trcli.data_providers", "trcli.data_classes", "trcli.api", + "trcli.logging", ], include_package_data=True, install_requires=[ From f98133cbdff88bbc8c35eff4ee8f8d7f1b4548b6 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 6 Feb 2026 16:30:44 +0800 Subject: [PATCH 29/33] release/1.13.0 Updated changelog for 1.13.0 release --- CHANGELOG.MD | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 25c070d..516011d 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -10,11 +10,23 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb _released 12-01-2025 -### Fixed - - Added new BDD/Gherkin parser command parse_bdd for behavioral driven development-related testing +### Added + - **New Command: `parse_cucumber`** - Parse Cucumber JSON reports and upload to TestRail + - **New Command: `import_gherkin`** - Import Gherkin .feature files as BDD test cases with `--update` flag to update existing cases + - **New Command: `export_gherkin`** - Export TestRail BDD test cases as Gherkin .feature files + - **Enhanced `parse_junit` command**: Added `--special-parser bdd` option to parse JUnit XML as BDD scenarios with custom case and result field support + - **Centralized Logging Module**: Structured logging framework with JSON/text formatting, file rotation, and configuration options for better production observability. + +### Fixed/Improved + - Improved caching to reduce redundant API calls during command execution, combined with existing N+1 query optimizations to improve performance for large report processing. + - Decomposed monolithic class (ApiRequestHandler) into specialized handler modules. + +## [1.12.6] + +_released 01-05-2026 ### Added - - **BDD Support for parse_junit**: Added `--special-parser bdd` option to group multiple JUnit scenarios into a single TestRail BDD test case; supports case ID extraction, BDD case validation and result aggregation + - Allow parse_junit to update custom case fields in the same test run when using --update-existing-cases ## [1.12.5] From 48659c74f706deca527eb18575379963400e15c3 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 6 Feb 2026 16:54:05 +0800 Subject: [PATCH 30/33] Fix: Restore refactored api_request_handler with TRCLI-210 --- trcli/api/api_request_handler.py | 1826 ++++++------------------------ trcli/api/case_handler.py | 122 +- 2 files changed, 444 insertions(+), 1504 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index d1cd4e3..e1337b8 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,10 +1,20 @@ -import html, json, os +import os import time from concurrent.futures import ThreadPoolExecutor, as_completed -from beartype.typing import List, Union, Tuple, Dict +from beartype.typing import List, Union, Tuple, Dict, Optional from trcli.api.api_client import APIClient, APIClientResult from trcli.api.api_response_verify import ApiResponseVerify +from trcli.api.api_cache import RequestCache +from trcli.api.label_manager import LabelManager +from trcli.api.reference_manager import ReferenceManager +from trcli.api.case_matcher import CaseMatcherFactory +from trcli.api.suite_handler import SuiteHandler +from trcli.api.section_handler import SectionHandler +from trcli.api.result_handler import ResultHandler +from trcli.api.run_handler import RunHandler +from trcli.api.bdd_handler import BddHandler +from trcli.api.case_handler import CaseHandler from trcli.cli import Environment from trcli.constants import ( ProjectErrors, @@ -45,6 +55,43 @@ def __init__( ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) + # Initialize session-scoped cache for API responses + self._cache = RequestCache(max_size=512) + # Initialize specialized managers + self.label_manager = LabelManager(api_client, environment) + self.reference_manager = ReferenceManager(api_client, environment) + self.suite_handler = SuiteHandler( + api_client, environment, self.data_provider, get_all_suites_callback=self.__get_all_suites + ) + self.section_handler = SectionHandler( + api_client, environment, self.data_provider, get_all_sections_callback=self.__get_all_sections + ) + self.result_handler = ResultHandler( + api_client, + environment, + self.data_provider, + get_all_tests_in_run_callback=self.__get_all_tests_in_run, + handle_futures_callback=self.handle_futures, + ) + self.run_handler = RunHandler( + api_client, environment, self.data_provider, get_all_tests_in_run_callback=self.__get_all_tests_in_run + ) + self.bdd_handler = BddHandler(api_client, environment) + self.case_handler = CaseHandler( + api_client, + environment, + self.data_provider, + handle_futures_callback=self.handle_futures, + retrieve_results_callback=ApiRequestHandler.retrieve_results_after_cancelling, + ) + + # BDD case cache for feature name matching (shared by CucumberParser and JunitParser) + # Structure: {"{project_id}_{suite_id}": {normalized_name: [case_dict, case_dict, ...]}} + self._bdd_case_cache = {} + + # Cache for resolved BDD field names (resolved from TestRail API) + self._bdd_case_field_name = None # BDD Scenarios field (type_id=13) + self._bdd_result_field_name = None # BDD Scenario Results field (type_id=14) def check_automation_id_field(self, project_id: int) -> Union[str, None]: """ @@ -67,11 +114,13 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: return FAULT_MAPPING["automation_id_unavailable"] if not automation_id_field["configs"]: self._active_automation_id_field = automation_id_field["system_name"] + self.case_handler._active_automation_id_field = automation_id_field["system_name"] return None for config in automation_id_field["configs"]: context = config["context"] if context["is_global"] or project_id in context["project_ids"]: self._active_automation_id_field = automation_id_field["system_name"] + self.case_handler._active_automation_id_field = automation_id_field["system_name"] return None return FAULT_MAPPING["automation_id_unavailable"] else: @@ -125,334 +174,50 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project ) def check_suite_id(self, project_id: int) -> Tuple[bool, str]: - """ - Check if suite from DataProvider exist using get_suites endpoint - :project_id: project id - :returns: True if exists in suites. False if not. - """ suite_id = self.suites_data_from_provider.suite_id - suites_data, error = self.__get_all_suites(project_id) - if not error: - available_suites = [suite for suite in suites_data if suite["id"] == suite_id] - return ( - (True, "") - if len(available_suites) > 0 - else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ) - else: - return None, suites_data.error_message + return self.suite_handler.check_suite_id(project_id, suite_id) def resolve_suite_id_using_name(self, project_id: int) -> Tuple[int, str]: - """Get suite ID matching suite name on data provider or returns -1 if unable to match any suite. - :arg project_id: project id - :returns: tuple with id of the suite and error message""" - suite_id = -1 suite_name = self.suites_data_from_provider.name - suites_data, error = self.__get_all_suites(project_id) - if not error: - for suite in suites_data: - if suite["name"] == suite_name: - suite_id = suite["id"] - self.data_provider.update_data([{"suite_id": suite["id"], "name": suite["name"]}]) - break - return ( - (suite_id, "") - if suite_id != -1 - else (-1, FAULT_MAPPING["missing_suite_by_name"].format(suite_name=suite_name)) - ) - else: - return -1, error + return self.suite_handler.resolve_suite_id_using_name(project_id, suite_name) def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: - """Get suite IDs for requested project_id. - : project_id: project id - : returns: tuple with list of suite ids and error string""" - available_suites = [] - returned_resources = [] - suites_data, error = self.__get_all_suites(project_id) - if not error: - for suite in suites_data: - available_suites.append(suite["id"]) - returned_resources.append( - { - "suite_id": suite["id"], - "name": suite["name"], - } - ) - if returned_resources: - self.data_provider.update_data(suite_data=returned_resources) - else: - print("Update skipped") - return ( - (available_suites, "") - if len(available_suites) > 0 - else ([], FAULT_MAPPING["no_suites_found"].format(project_id=project_id)) - ) - else: - return [], error + return self.suite_handler.get_suite_ids(project_id) def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: - """ - Adds suites that doesn't have ID's in DataProvider. - Runs update_data in data_provider for successfully created resources. - :project_id: project_id - :returns: Tuple with list of dict created resources and error string. - """ - add_suite_data = self.data_provider.add_suites_data() - responses = [] - error_message = "" - for body in add_suite_data: - response = self.client.send_post(f"add_suite/{project_id}", body) - if not response.error_message: - responses.append(response) - if not self.response_verifier.verify_returned_data(body, response.response_text): - responses.append(response) - error_message = FAULT_MAPPING["data_verification_error"] - break - else: - error_message = response.error_message - break - - returned_resources = [ - { - "suite_id": response.response_text["id"], - "name": response.response_text["name"], - } - for response in responses - ] - ( - self.data_provider.update_data(suite_data=returned_resources) - if len(returned_resources) > 0 - else "Update skipped" - ) - return returned_resources, error_message + return self.suite_handler.add_suites(project_id, verify_callback=self.response_verifier.verify_returned_data) def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: - """ - Check what section id's are missing in DataProvider. - :project_id: project_id - :returns: Tuple with list missing section ID and error string. - """ suite_id = self.suites_data_from_provider.suite_id - returned_sections, error_message = self.__get_all_sections(project_id, suite_id) - if not error_message: - missing_test_sections = False - sections_by_id = {section["id"]: section for section in returned_sections} - sections_by_name = {section["name"]: section for section in returned_sections} - section_data = [] - for section in self.suites_data_from_provider.testsections: - if self.environment.section_id: - if section.section_id in sections_by_id.keys(): - section_json = sections_by_id[section.section_id] - section_data.append( - { - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - } - ) - else: - missing_test_sections = True - if section.name in sections_by_name.keys(): - section_json = sections_by_name[section.name] - section_data.append( - { - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - } - ) - else: - missing_test_sections = True - self.data_provider.update_data(section_data=section_data) - return missing_test_sections, error_message - else: - return False, error_message + return self.section_handler.check_missing_section_ids(project_id, suite_id, self.suites_data_from_provider) def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: - """ - Add sections that doesn't have ID in DataProvider. - Runs update_data in data_provider for successfully created resources. - :project_id: project_id - :returns: Tuple with list of dict created resources and error string. - """ - add_sections_data = self.data_provider.add_sections_data() - responses = [] - error_message = "" - for body in add_sections_data: - response = self.client.send_post(f"add_section/{project_id}", body) - if not response.error_message: - responses.append(response) - if not self.response_verifier.verify_returned_data(body, response.response_text): - responses.append(response) - error_message = FAULT_MAPPING["data_verification_error"] - break - else: - error_message = response.error_message - break - returned_resources = [ - { - "section_id": response.response_text["id"], - "suite_id": response.response_text["suite_id"], - "name": response.response_text["name"], - } - for response in responses - ] - ( - self.data_provider.update_data(section_data=returned_resources) - if len(returned_resources) > 0 - else "Update skipped" + return self.section_handler.add_sections( + project_id, verify_callback=self.response_verifier.verify_returned_data ) - return returned_resources, error_message def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: """ - Check what test cases id's are missing in DataProvider. + Check what test cases id's are missing in DataProvider using the configured matcher strategy. :project_id: project_id :returns: Tuple with list test case ID missing and error string. """ - missing_cases_number = 0 suite_id = self.suites_data_from_provider.suite_id - # Performance optimization: Only fetch all cases if using AUTO matcher - # NAME/PROPERTY matchers can validate case IDs individually - if self.environment.case_matcher == MatchersParser.AUTO: - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message - - if self.environment.case_matcher == MatchersParser.AUTO: - test_cases_by_aut_id = {} - for case in returned_cases: - aut_case_id = case.get(OLD_SYSTEM_NAME_AUTOMATION_ID) or case.get(UPDATED_SYSTEM_NAME_AUTOMATION_ID) - if aut_case_id: - aut_case_id = html.unescape(aut_case_id) - test_cases_by_aut_id[aut_case_id] = case - test_case_data = [] - for section in self.suites_data_from_provider.testsections: - for test_case in section.testcases: - aut_id = test_case.custom_automation_id - if aut_id in test_cases_by_aut_id.keys(): - case = test_cases_by_aut_id[aut_id] - test_case_data.append( - { - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, - } - ) - else: - missing_cases_number += 1 - self.data_provider.update_data(case_data=test_case_data) - if missing_cases_number: - self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") - else: - # For NAME or PROPERTY matcher we validate case IDs - nonexistent_ids = [] - case_ids_to_validate = set() - - # Collect all unique case IDs that need validation - for section in self.suites_data_from_provider.testsections: - for test_case in section.testcases: - if not test_case.case_id: - missing_cases_number += 1 - else: - case_ids_to_validate.add(int(test_case.case_id)) - - total_tests_in_report = missing_cases_number + len(case_ids_to_validate) - - if missing_cases_number: - self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") - - # Smart validation strategy based on report size - # Threshold: 1000 cases (same as skip validation threshold for consistency) - if case_ids_to_validate: - # Skip validation for large reports with all IDs (most efficient) - if missing_cases_number == 0 and total_tests_in_report >= 1000: - # All tests have IDs and report is large: Skip validation (trust IDs) - self.environment.log( - f"Skipping validation of {len(case_ids_to_validate)} case IDs " - f"(all tests have IDs, trusting they exist). " - f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." - ) - nonexistent_ids = [] - - # Fetch all for large reports with missing IDs - elif total_tests_in_report >= 1000: - # Large report (>=1000 cases) with some missing IDs: Fetch all cases and validate locally - # This is more efficient than individual validation for large batches - self.environment.log( - f"Large report detected ({total_tests_in_report} cases). " - f"Fetching all cases from TestRail for efficient validation..." - ) - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message - - # Build lookup dictionary from fetched cases - all_case_ids = {case["id"] for case in returned_cases} - - # Validate locally (O(1) lookup) - nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in all_case_ids] - - if nonexistent_ids: - self.environment.elog( - f"Nonexistent case IDs found in the report file: {nonexistent_ids[:20]}" - f"{' ...' if len(nonexistent_ids) > 20 else ''}" - ) - return False, "Case IDs not in TestRail project or suite were detected in the report file." + # Create appropriate matcher based on configuration (Strategy pattern) + matcher = CaseMatcherFactory.create_matcher(self.environment.case_matcher, self.environment, self.data_provider) - # Individual validation for small reports - else: - # Small report (<1000 cases): Use individual validation - # This is more efficient for small batches - self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") - validated_ids = self.__validate_case_ids_exist(suite_id, list(case_ids_to_validate)) - nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] - - if nonexistent_ids: - self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") - return False, "Case IDs not in TestRail project or suite were detected in the report file." - - return missing_cases_number > 0, "" + # Delegate to the matcher + return matcher.check_missing_cases( + project_id, + suite_id, + self.suites_data_from_provider, + get_all_cases_callback=self.__get_all_cases, + validate_case_ids_callback=self.__validate_case_ids_exist, + ) def add_cases(self) -> Tuple[List[dict], str]: - """ - Add cases that doesn't have ID in DataProvider. - Runs update_data in data_provider for successfully created resources. - :returns: Tuple with list of dict created resources and error string. - """ - add_case_data = self.data_provider.add_cases() - responses = [] - error_message = "" - with self.environment.get_progress_bar( - results_amount=len(add_case_data), prefix="Adding test cases" - ) as progress_bar: - with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_CASE) as executor: - futures = { - executor.submit( - self._add_case_and_update_data, - body, - ): body - for body in add_case_data - } - responses, error_message = self.handle_futures( - futures=futures, action_string="add_case", progress_bar=progress_bar - ) - if error_message: - # When error_message is present we cannot be sure that responses contains all added items. - # Iterate through futures to get all responses from done tasks (not cancelled) - responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) - returned_resources = [ - { - "case_id": response.response_text["id"], - "section_id": response.response_text["section_id"], - "title": response.response_text["title"], - } - for response in responses - ] - return returned_resources, error_message + return self.case_handler.add_cases() def add_run( self, @@ -468,39 +233,19 @@ def add_run( refs: str = None, case_ids: List[int] = None, ) -> Tuple[int, str]: - """ - Creates a new test run. - :project_id: project_id - :run_name: run name - :returns: Tuple with run id and error string. - """ - add_run_data = self.data_provider.add_run( + return self.run_handler.add_run( + project_id, run_name, - case_ids=case_ids, - start_date=start_date, - end_date=end_date, - milestone_id=milestone_id, - assigned_to_id=assigned_to_id, - include_all=include_all, - refs=refs, + milestone_id, + start_date, + end_date, + plan_id, + config_ids, + assigned_to_id, + include_all, + refs, + case_ids, ) - if not plan_id: - response = self.client.send_post(f"add_run/{project_id}", add_run_data) - run_id = response.response_text.get("id") - else: - if config_ids: - add_run_data["config_ids"] = config_ids - entry_data = { - "name": add_run_data["name"], - "suite_id": add_run_data["suite_id"], - "config_ids": config_ids, - "runs": [add_run_data], - } - else: - entry_data = add_run_data - response = self.client.send_post(f"add_plan_entry/{plan_id}", entry_data) - run_id = response.response_text["runs"][0]["id"] - return run_id, response.error_message def update_run( self, @@ -512,403 +257,24 @@ def update_run( refs: str = None, refs_action: str = "add", ) -> Tuple[dict, str]: - """ - Updates an existing run - :run_id: run id - :run_name: run name - :refs: references to manage - :refs_action: action to perform ('add', 'update', 'delete') - :returns: Tuple with run and error string. - """ - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.error_message: - return None, run_response.error_message - - existing_description = run_response.response_text.get("description", "") - existing_refs = run_response.response_text.get("refs", "") - - add_run_data = self.data_provider.add_run( - run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id - ) - add_run_data["description"] = existing_description # Retain the current description - - # Handle references based on action - if refs is not None: - updated_refs = self._manage_references(existing_refs, refs, refs_action) - add_run_data["refs"] = updated_refs - else: - add_run_data["refs"] = existing_refs # Keep existing refs if none provided - - existing_include_all = run_response.response_text.get("include_all", False) - add_run_data["include_all"] = existing_include_all - - if not existing_include_all: - # Only manage explicit case_ids when include_all=False - run_tests, error_message = self.__get_all_tests_in_run(run_id) - if error_message: - return None, f"Failed to get tests in run: {error_message}" - run_case_ids = [test["case_id"] for test in run_tests] - report_case_ids = add_run_data["case_ids"] - joint_case_ids = list(set(report_case_ids + run_case_ids)) - add_run_data["case_ids"] = joint_case_ids - else: - # include_all=True: TestRail includes all suite cases automatically - # Do NOT send case_ids array (TestRail ignores it anyway) - add_run_data.pop("case_ids", None) - - plan_id = run_response.response_text["plan_id"] - config_ids = run_response.response_text["config_ids"] - if not plan_id: - update_response = self.client.send_post(f"update_run/{run_id}", add_run_data) - elif plan_id and config_ids: - update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", add_run_data) - else: - response = self.client.send_get(f"get_plan/{plan_id}") - entry_id = next( - ( - run["entry_id"] - for entry in response.response_text["entries"] - for run in entry["runs"] - if run["id"] == run_id - ), - None, - ) - update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", add_run_data) - run_response = self.client.send_get(f"get_run/{run_id}") - return run_response.response_text, update_response.error_message + return self.run_handler.update_run(run_id, run_name, start_date, end_date, milestone_id, refs, refs_action) def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> str: - """ - Manage references based on the specified action. - :existing_refs: current references in the run - :new_refs: new references to process - :action: 'add', 'update', or 'delete' - :returns: updated references string - """ - if not existing_refs: - existing_refs = "" - - if action == "update": - # Replace all references with new ones - return new_refs - elif action == "delete": - if not new_refs: - # Delete all references - return "" - else: - # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(",") if ref.strip()] - updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ",".join(updated_list) - else: # action == 'add' (default) - # Add new references to existing ones - if not existing_refs: - return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(",") if ref.strip()] - # Avoid duplicates - combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ",".join(combined_list) + return self.run_handler._manage_references(existing_refs, new_refs, action) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: - """ - Append references to a test run, avoiding duplicates. - :param run_id: ID of the test run - :param references: List of references to append - :returns: Tuple with (run_data, added_refs, skipped_refs, error_message) - """ - # Get current run data - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.error_message: - return None, [], [], run_response.error_message - - existing_refs = run_response.response_text.get("refs", "") or "" - - # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] - # Deduplicate input references - new_list = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - new_list.append(ref_clean) - seen.add(ref_clean) - - # Determine which references are new vs duplicates - added_refs = [ref for ref in new_list if ref not in existing_list] - skipped_refs = [ref for ref in new_list if ref in existing_list] - - # If no new references to add, return current state - if not added_refs: - return run_response.response_text, added_refs, skipped_refs, None - - # Combine references - combined_list = existing_list + added_refs - combined_refs = ",".join(combined_list) - - if len(combined_refs) > 250: - return ( - None, - [], - [], - f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit", - ) - - update_data = {"refs": combined_refs} - - # Determine the correct API endpoint based on plan membership - plan_id = run_response.response_text.get("plan_id") - config_ids = run_response.response_text.get("config_ids") - - if not plan_id: - # Standalone run - update_response = self.client.send_post(f"update_run/{run_id}", update_data) - elif plan_id and config_ids: - # Run in plan with configurations - update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", update_data) - else: - # Run in plan without configurations - need to use plan entry endpoint - plan_response = self.client.send_get(f"get_plan/{plan_id}") - if plan_response.error_message: - return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - - # Find the entry_id for this run - entry_id = None - for entry in plan_response.response_text.get("entries", []): - for run in entry.get("runs", []): - if run["id"] == run_id: - entry_id = entry["id"] - break - if entry_id: - break - - if not entry_id: - return None, [], [], f"Could not find plan entry for run {run_id}" - - update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - - if update_response.error_message: - return None, [], [], update_response.error_message - - updated_run_response = self.client.send_get(f"get_run/{run_id}") - return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message + return self.run_handler.append_run_references(run_id, references) def update_existing_case_references( self, case_id: int, junit_refs: str, case_fields: dict = None, strategy: str = "append" ) -> Tuple[bool, str, List[str], List[str], List[str]]: - """ - Update existing case references and custom fields with values from JUnit properties. - :param case_id: ID of the test case - :param junit_refs: References from JUnit testrail_case_field property - :param case_fields: Dictionary of custom case fields to update (e.g., {'custom_preconds': 'value'}) - :param strategy: 'append' or 'replace' (applies to refs field only) - :returns: Tuple with (success, error_message, added_refs, skipped_refs, updated_fields) - """ - updated_fields = [] - - # Handle case where there are no refs but there are case fields to update - if (not junit_refs or not junit_refs.strip()) and not case_fields: - return True, None, [], [], [] # Nothing to process - - if not junit_refs or not junit_refs.strip(): - # No refs to process, but we have case fields to update - new_refs = None - added_refs = [] - skipped_refs = [] - else: - # Parse and validate JUnit references, deduplicating input - junit_ref_list = [] - seen = set() - for ref in junit_refs.split(","): - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - junit_ref_list.append(ref_clean) - seen.add(ref_clean) - - if not junit_ref_list: - # If we have case fields, continue; otherwise return error - if not case_fields: - return False, "No valid references found in JUnit property", [], [], [] - new_refs = None - added_refs = [] - skipped_refs = [] - else: - # Get current case data - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.error_message: - return False, case_response.error_message, [], [], [] - - existing_refs = case_response.response_text.get("refs", "") or "" - - if strategy == "replace": - # Replace strategy: use JUnit refs as-is - new_refs = ",".join(junit_ref_list) - added_refs = junit_ref_list - skipped_refs = [] - else: - # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = ( - [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] - ) + return self.case_handler.update_existing_case_references(case_id, junit_refs, case_fields, strategy) - # Determine which references are new vs duplicates - added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] - skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - - # If no new references to add and no case fields, return current state - if not added_refs and not case_fields: - return True, None, added_refs, skipped_refs, [] - - # Combine references - combined_list = existing_ref_list + added_refs - new_refs = ",".join(combined_list) - - # Validate 2000 character limit for test case references - if len(new_refs) > 2000: - return ( - False, - f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", - [], - [], - [], - ) - - # Build update data with refs and custom case fields - update_data = {} - if new_refs is not None: - update_data["refs"] = new_refs - - # Add custom case fields to the update - if case_fields: - for field_name, field_value in case_fields.items(): - # Skip special internal fields that shouldn't be updated - if field_name not in ["case_id", "section_id", "result"]: - update_data[field_name] = field_value - updated_fields.append(field_name) - - # Only update if we have data to send - if not update_data: - return True, None, added_refs, skipped_refs, updated_fields - - # Update the case - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.error_message: - return False, update_response.error_message, [], [], [] - - return True, None, added_refs, skipped_refs, updated_fields - - def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """Getting test result id and upload attachments for it.""" - tests_in_run, error = self.__get_all_tests_in_run(run_id) - if not error: - failed_uploads = [] - for report_result in report_results: - case_id = report_result["case_id"] - test_id = next((test["id"] for test in tests_in_run if test["case_id"] == case_id), None) - result_id = next((result["id"] for result in results if result["test_id"] == test_id), None) - for file_path in report_result.get("attachments"): - try: - with open(file_path, "rb") as file: - response = self.client.send_post( - f"add_attachment_to_result/{result_id}", files={"attachment": file} - ) - - # Check if upload was successful - if response.status_code != 200: - file_name = os.path.basename(file_path) - - # Handle 413 Request Entity Too Large specifically - if response.status_code == 413: - error_msg = FAULT_MAPPING["attachment_too_large"].format( - file_name=file_name, case_id=case_id - ) - self.environment.elog(error_msg) - failed_uploads.append(f"{file_name} (case {case_id})") - else: - # Handle other HTTP errors - error_msg = FAULT_MAPPING["attachment_upload_failed"].format( - file_path=file_name, - case_id=case_id, - error_message=response.error_message or f"HTTP {response.status_code}", - ) - self.environment.elog(error_msg) - failed_uploads.append(f"{file_name} (case {case_id})") - except FileNotFoundError: - self.environment.elog(f"Attachment file not found: {file_path} (case {case_id})") - failed_uploads.append(f"{file_path} (case {case_id})") - except Exception as ex: - file_name = os.path.basename(file_path) if os.path.exists(file_path) else file_path - self.environment.elog(f"Error uploading attachment '{file_name}' for case {case_id}: {ex}") - failed_uploads.append(f"{file_name} (case {case_id})") - - # Provide a summary if there were failed uploads - if failed_uploads: - self.environment.log(f"\nWarning: {len(failed_uploads)} attachment(s) failed to upload.") - else: - self.environment.elog(f"Unable to upload attachments due to API request error: {error}") + def upload_attachments(self, report_results: List[Dict], results: List[Dict], run_id: int): + return self.result_handler.upload_attachments(report_results, results, run_id) def add_results(self, run_id: int) -> Tuple[List, str, int]: - """ - Adds one or more new test results. - :run_id: run id - :returns: Tuple with dict created resources, error string, and results count. - """ - responses = [] - error_message = "" - # Get pre-validated user IDs if available - user_ids = getattr(self.environment, "_validated_user_ids", []) - - add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) - # Get assigned count from data provider - assigned_count = getattr(self.data_provider, "_assigned_count", 0) - - results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) - - with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: - with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: - futures = { - executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body - for body in add_results_data_chunks - } - responses, error_message = self.handle_futures( - futures=futures, - action_string="add_results", - progress_bar=progress_bar, - ) - if error_message: - # When error_message is present we cannot be sure that responses contains all added items. - # Iterate through futures to get all responses from done tasks (not cancelled) - responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) - responses = [response.response_text for response in responses] - results = [result for results_list in responses for result in results_list] - report_results_w_attachments = [] - for results_data_chunk in add_results_data_chunks: - for test_result in results_data_chunk["results"]: - if test_result["attachments"]: - report_results_w_attachments.append(test_result) - if report_results_w_attachments: - attachments_count = 0 - for result in report_results_w_attachments: - attachments_count += len(result["attachments"]) - self.environment.log( - f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." - ) - self.upload_attachments(report_results_w_attachments, results, run_id) - else: - self.environment.log(f"No attachments found to upload.") - - # Log assignment results if assignment was performed - if user_ids: - total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) - if assigned_count > 0: - self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") - else: - self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + return self.result_handler.add_results(run_id) def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -944,69 +310,27 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st return responses, error_message def close_run(self, run_id: int) -> Tuple[dict, str]: - """ - Closes an existing test run and archives its tests & results. - :run_id: run id - :returns: Tuple with dict created resources and error string. - """ - body = {"run_id": run_id} - response = self.client.send_post(f"close_run/{run_id}", body) - return response.response_text, response.error_message + return self.run_handler.close_run(run_id) def delete_suite(self, suite_id: int) -> Tuple[dict, str]: - """ - Delete suite given suite id - :suite_id: suite id - :returns: Tuple with dict created resources and error string. - """ - response = self.client.send_post(f"delete_suite/{suite_id}", payload={}) - return response.response_text, response.error_message + return self.suite_handler.delete_suite(suite_id) def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: - """ - Delete section given add_sections response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - responses = [] - error_message = "" - for section in added_sections: - response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) - if not response.error_message: - responses.append(response.response_text) - else: - error_message = response.error_message - break - return responses, error_message + return self.section_handler.delete_sections(added_sections) def delete_cases(self, suite_id: int, added_cases: List[Dict]) -> Tuple[Dict, str]: - """ - Delete cases given add_cases response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - body = {"case_ids": [case["case_id"] for case in added_cases]} - response = self.client.send_post(f"delete_cases/{suite_id}", payload=body) - return response.response_text, response.error_message + return self.case_handler.delete_cases(suite_id, added_cases) def delete_run(self, run_id) -> Tuple[dict, str]: - """ - Delete run given add_run response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - response = self.client.send_post(f"delete_run/{run_id}", payload={}) - return response.response_text, response.error_message + return self.run_handler.delete_run(run_id) @staticmethod def retrieve_results_after_cancelling(futures) -> list: - responses = [] - for future in as_completed(futures): - if not future.cancelled(): - response = future.result() - if not response.error_message: - responses.append(response) - return responses + """ + Retrieve results from futures after cancellation has been triggered. + Delegated to ResultHandler for backward compatibility. + """ + return ResultHandler.retrieve_results_after_cancelling(futures) def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ @@ -1061,18 +385,7 @@ def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: return None, f"API error (status {response.status_code}) when validating user: {email}" def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: - case_body = case.to_dict() - active_field = getattr(self, "_active_automation_id_field", None) - if active_field == UPDATED_SYSTEM_NAME_AUTOMATION_ID and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: - case_body[UPDATED_SYSTEM_NAME_AUTOMATION_ID] = case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) - if self.environment.case_matcher != MatchersParser.AUTO and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: - case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) - response = self.client.send_post(f"add_case/{case_body.pop('section_id')}", case_body) - if response.status_code == 200: - case.case_id = response.response_text["id"] - case.result.case_id = response.response_text["id"] - case.section_id = response.response_text["section_id"] - return response + return self.case_handler._add_case_and_update_data(case) def __cancel_running_futures(self, futures, action_string): self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") @@ -1081,36 +394,66 @@ def __cancel_running_futures(self, futures, action_string): def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ - Get all cases from all pages + Get all cases from all pages (with caching) """ - if suite_id is None: - return self.__get_all_entities("cases", f"get_cases/{project_id}") - else: - return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}") + cache_key = f"get_cases/{project_id}" + params = (project_id, suite_id) + + def fetch(): + if suite_id is None: + return self.__get_all_entities("cases", f"get_cases/{project_id}", entities=[]) + else: + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ - Get all sections from all pages + Get all sections from all pages (with caching) """ - return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}") + cache_key = f"get_sections/{project_id}" + params = (project_id, suite_id) + + def fetch(): + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ - Get all tests from all pages + Get all tests from all pages (with caching) """ - return self.__get_all_entities("tests", f"get_tests/{run_id}") + cache_key = f"get_tests/{run_id}" + params = (run_id,) + + def fetch(): + return self.__get_all_entities("tests", f"get_tests/{run_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_projects(self) -> Tuple[List[dict], str]: """ - Get all projects from all pages + Get all projects from all pages (with caching) """ - return self.__get_all_entities("projects", f"get_projects") + cache_key = "get_projects" + params = None + + def fetch(): + return self.__get_all_entities("projects", f"get_projects", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ - Get all suites from all pages + Get all suites from all pages (with caching) """ - return self.__get_all_entities("suites", f"get_suites/{project_id}") + cache_key = f"get_suites/{project_id}" + params = (project_id,) + + def fetch(): + return self.__get_all_entities("suites", f"get_suites/{project_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ @@ -1196,7 +539,6 @@ def __get_all_entities_parallel(self, entity: str, link: str) -> Tuple[List[Dict next_link = response.response_text["_links"]["next"] # Extract offset/limit from the link to calculate total pages - import re from urllib.parse import urlparse, parse_qs # Parse the next link to get offset and limit @@ -1425,726 +767,292 @@ def check_case_exists(case_id): return valid_ids - # Label management methods + # Label management methods (delegated to LabelManager for backward compatibility) def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: - """ - Add a new label to the project - :param project_id: ID of the project - :param title: Title of the label (max 20 characters) - :returns: Tuple with created label data and error string - """ - payload = {"title": title} - response = self.client.send_post(f"add_label/{project_id}", payload=payload) - return response.response_text, response.error_message + return self.label_manager.add_label(project_id, title) def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: - """ - Update an existing label - :param label_id: ID of the label to update - :param project_id: ID of the project - :param title: New title for the label (max 20 characters) - :returns: Tuple with updated label data and error string - """ - payload = {"project_id": project_id, "title": title} - response = self.client.send_post(f"update_label/{label_id}", payload=payload) - return response.response_text, response.error_message + return self.label_manager.update_label(label_id, project_id, title) def get_label(self, label_id: int) -> Tuple[dict, str]: - """ - Get a specific label by ID - :param label_id: ID of the label to retrieve - :returns: Tuple with label data and error string - """ - response = self.client.send_get(f"get_label/{label_id}") - return response.response_text, response.error_message + return self.label_manager.get_label(label_id) def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: - """ - Get all labels for a project with pagination - :param project_id: ID of the project - :param offset: Offset for pagination - :param limit: Limit for pagination - :returns: Tuple with labels data (including pagination info) and error string - """ - params = [] - if offset > 0: - params.append(f"offset={offset}") - if limit != 250: - params.append(f"limit={limit}") - - url = f"get_labels/{project_id}" - if params: - url += "&" + "&".join(params) - - response = self.client.send_get(url) - return response.response_text, response.error_message + return self.label_manager.get_labels(project_id, offset, limit) def delete_label(self, label_id: int) -> Tuple[bool, str]: - """ - Delete a single label - :param label_id: ID of the label to delete - :returns: Tuple with success status and error string - """ - response = self.client.send_post(f"delete_label/{label_id}") - success = response.status_code == 200 - return success, response.error_message + return self.label_manager.delete_label(label_id) def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: - """ - Delete multiple labels - :param label_ids: List of label IDs to delete - :returns: Tuple with success status and error string - """ - payload = {"label_ids": label_ids} - response = self.client.send_post("delete_labels", payload=payload) - success = response.status_code == 200 - return success, response.error_message + return self.label_manager.delete_labels(label_ids) def add_labels_to_cases( self, case_ids: List[int], title: str, project_id: int, suite_id: int = None ) -> Tuple[dict, str]: - """ - Add a label to multiple test cases - - :param case_ids: List of test case IDs - :param title: Label title (max 20 characters) - :param project_id: Project ID for validation - :param suite_id: Suite ID (optional) - :returns: Tuple with response data and error string - """ - # Initialize results structure - results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} - - # Check if project is multi-suite by getting all cases without suite_id - all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) - if error_message: - return results, error_message - - # Check if project has multiple suites - suite_ids = set() - for case in all_cases_no_suite: - if "suite_id" in case and case["suite_id"]: - suite_ids.add(case["suite_id"]) - - # If project has multiple suites and no suite_id provided, require it - if len(suite_ids) > 1 and suite_id is None: - return results, "This project is multisuite, suite id is required" - - # Get all cases to validate that the provided case IDs exist - all_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return results, error_message - - # Create a set of existing case IDs for quick lookup - existing_case_ids = {case["id"] for case in all_cases} - - # Validate case IDs and separate valid from invalid ones - invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] - valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - - # Record invalid case IDs - for case_id in invalid_case_ids: - results["case_not_found"].append(case_id) - - # If no valid case IDs, return early - if not valid_case_ids: - return results, "" - - # Check if label exists or create it - existing_labels, error_message = self.get_labels(project_id) - if error_message: - return results, error_message - - # Find existing label with the same title - label_id = None - for label in existing_labels.get("labels", []): - if label.get("title") == title: - label_id = label.get("id") - break - - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get("label", label_data) - label_id = label_info.get("id") - - # Collect case data and validate constraints - cases_to_update = [] - for case_id in valid_case_ids: - # Get current case to check existing labels - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - results["failed_cases"].append( - {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} - ) - continue - - case_data = case_response.response_text - current_labels = case_data.get("labels", []) - - # Check if label already exists on this case - if any(label.get("id") == label_id for label in current_labels): - results["successful_cases"].append( - {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} - ) - continue - - # Check maximum labels limit (10) - if len(current_labels) >= 10: - results["max_labels_reached"].append(case_id) - continue - - # Prepare case for update - existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] - updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) - - # Update cases using appropriate endpoint - if len(cases_to_update) == 1: - # Single case: use update_case/{case_id} - case_info = cases_to_update[0] - case_update_data = {"labels": case_info["labels"]} - - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - - if update_response.status_code == 200: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - results["failed_cases"].append( - {"case_id": case_info["case_id"], "error": update_response.error_message} - ) - elif len(cases_to_update) > 1: - # Multiple cases: use update_cases/{suite_id} - # Need to determine suite_id from the cases - case_suite_id = suite_id - if not case_suite_id: - # Get suite_id from the first case if not provided - first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get("suite_id") if first_case else None - - if not case_suite_id: - # Fall back to individual updates if no suite_id available - for case_info in cases_to_update: - case_update_data = {"labels": case_info["labels"]} - update_response = self.client.send_post( - f"update_case/{case_info['case_id']}", payload=case_update_data - ) - - if update_response.status_code == 200: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - results["failed_cases"].append( - {"case_id": case_info["case_id"], "error": update_response.error_message} - ) - else: - # Batch update using update_cases/{suite_id} - batch_update_data = { - "case_ids": [case_info["case_id"] for case_info in cases_to_update], - "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases - } - - batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - - if batch_response.status_code == 200: - for case_info in cases_to_update: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - # If batch update fails, fall back to individual updates - for case_info in cases_to_update: - case_update_data = {"labels": case_info["labels"]} - update_response = self.client.send_post( - f"update_case/{case_info['case_id']}", payload=case_update_data - ) - - if update_response.status_code == 200: - results["successful_cases"].append( - { - "case_id": case_info["case_id"], - "message": f"Successfully added label '{title}' to case {case_info['case_id']}", - } - ) - else: - results["failed_cases"].append( - {"case_id": case_info["case_id"], "error": update_response.error_message} - ) - - return results, "" + return self.label_manager.add_labels_to_cases( + case_ids, title, project_id, suite_id, get_all_cases_callback=self.__get_all_cases + ) def get_cases_by_label( self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None ) -> Tuple[List[dict], str]: - """ - Get test cases filtered by label ID or title - - :param project_id: Project ID - :param suite_id: Suite ID (optional) - :param label_ids: List of label IDs to filter by - :param label_title: Label title to filter by - :returns: Tuple with list of matching cases and error string - """ - # Get all cases first - all_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return [], error_message - - # If filtering by title, first get the label ID - target_label_ids = label_ids or [] - if label_title and not target_label_ids: - labels_data, error_message = self.get_labels(project_id) - if error_message: - return [], error_message - - for label in labels_data.get("labels", []): - if label.get("title") == label_title: - target_label_ids.append(label.get("id")) - - if not target_label_ids: - return [], "" # No label found is a valid case with 0 results - - # Filter cases that have any of the target labels - matching_cases = [] - for case in all_cases: - case_labels = case.get("labels", []) - case_label_ids = [label.get("id") for label in case_labels] - - # Check if any of the target label IDs are present in this case - if any(label_id in case_label_ids for label_id in target_label_ids): - matching_cases.append(case) - - return matching_cases, "" + return self.label_manager.get_cases_by_label( + project_id, suite_id, label_ids, label_title, get_all_cases_callback=self.__get_all_cases + ) def add_labels_to_tests( self, test_ids: List[int], titles: Union[str, List[str]], project_id: int ) -> Tuple[dict, str]: - """ - Add labels to multiple tests + return self.label_manager.add_labels_to_tests(test_ids, titles, project_id) - :param test_ids: List of test IDs - :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) - :param project_id: Project ID for validation - :returns: Tuple with response data and error string - """ - # Initialize results structure - results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: + return self.label_manager.get_tests_by_label(project_id, label_ids, label_title, run_ids) - # Normalize titles to a list - if isinstance(titles, str): - title_list = [titles] - else: - title_list = titles - - # At this point, title_list should already be validated by the CLI - # Just ensure we have clean titles - title_list = [title.strip() for title in title_list if title.strip()] - - if not title_list: - return {}, "No valid labels provided" - - # Validate test IDs by getting run information for each test - valid_test_ids = [] - for test_id in test_ids: - # Get test information to validate it exists - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results["test_not_found"].append(test_id) - continue - - test_data = test_response.response_text - # Validate that the test belongs to the correct project - run_id = test_data.get("run_id") - if run_id: - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.status_code == 200: - run_data = run_response.response_text - if run_data.get("project_id") == project_id: - valid_test_ids.append(test_id) - else: - results["test_not_found"].append(test_id) - else: - results["test_not_found"].append(test_id) - else: - results["test_not_found"].append(test_id) + def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: + return self.label_manager.get_test_labels(test_ids) - # If no valid test IDs, return early - if not valid_test_ids: - return results, "" + # Test case reference management methods (delegated to ReferenceManager for backward compatibility) + def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + return self.reference_manager.add_case_references(case_id, references) - # Check if labels exist or create them - existing_labels, error_message = self.get_labels(project_id) - if error_message: - return results, error_message - - # Process each title to get/create label IDs - label_ids = [] - label_id_to_title = {} # Map label IDs to their titles - for title in title_list: - # Find existing label with the same title - label_id = None - for label in existing_labels.get("labels", []): - if label.get("title") == title: - label_id = label.get("id") - break + def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + return self.reference_manager.update_case_references(case_id, references) - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get("label", label_data) - label_id = label_info.get("id") - - if label_id: - label_ids.append(label_id) - label_id_to_title[label_id] = title - - # Collect test data and validate constraints - tests_to_update = [] - for test_id in valid_test_ids: - # Get current test to check existing labels - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results["failed_tests"].append( - {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} - ) - continue + def delete_case_references(self, case_id: int, specific_references: List[str] = None) -> Tuple[bool, str]: + return self.reference_manager.delete_case_references(case_id, specific_references) - test_data = test_response.response_text - current_labels = test_data.get("labels", []) - current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + def update_case_automation_id(self, case_id: int, automation_id: str) -> Tuple[bool, str]: + return self.case_handler.update_case_automation_id(case_id, automation_id) - new_label_ids = [] - already_exists_titles = [] + def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: + return self.bdd_handler.add_bdd(section_id, feature_content) - for label_id in label_ids: - if label_id not in current_label_ids: - new_label_ids.append(label_id) - else: - if label_id in label_id_to_title: - already_exists_titles.append(label_id_to_title[label_id]) - - if not new_label_ids: - results["successful_tests"].append( - { - "test_id": test_id, - "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", - } - ) - continue - - # Check maximum labels limit (10) - if len(current_label_ids) + len(new_label_ids) > 10: - results["max_labels_reached"].append(test_id) - continue - - # Prepare test for update - updated_label_ids = current_label_ids + new_label_ids - - new_label_titles = [] - for label_id in new_label_ids: - if label_id in label_id_to_title: - new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append( - { - "test_id": test_id, - "labels": updated_label_ids, - "new_labels": new_label_ids, - "new_label_titles": new_label_titles, - } - ) + def update_bdd(self, case_id: int, feature_content: str) -> Tuple[List[int], str]: + """ + Update existing BDD test case with .feature file content - # Update tests using appropriate endpoint - if len(tests_to_update) == 1: - # Single test: use update_test/{test_id} - test_info = tests_to_update[0] - test_update_data = {"labels": test_info["labels"]} + Updates TestRail BDD test case from Gherkin .feature content. + The Gherkin content is sent in the request body as plain text. - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + Args: + case_id: TestRail test case ID to update + feature_content: Raw .feature file content (Gherkin syntax) - if update_response.status_code == 200: - new_label_titles = test_info.get("new_label_titles", []) - new_label_count = len(new_label_titles) + Returns: + Tuple of (case_ids, error_message) + - case_ids: List containing the updated test case ID + - error_message: Empty string on success, error details on failure - if new_label_count == 1: - message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" - elif new_label_count > 1: - message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" - else: - message = f"No new labels added to test {test_info['test_id']}" + API Endpoint: POST /api/v2/update_bdd/{case_id} + Request Body: Raw Gherkin text (multipart/form-data) + Response: Standard TestRail test case JSON with BDD custom fields + """ + # Send Gherkin content as file upload (multipart/form-data) + # TestRail expects the .feature file as an attachment + self.environment.vlog(f"Updating .feature file via update_bdd/{case_id}") + files = {"attachment": ("feature.feature", feature_content, "text/plain")} + response = self.client.send_post(f"update_bdd/{case_id}", payload=None, files=files) - results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) + if response.status_code == 200: + # Response is a test case object with 'id' field + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + return [case_id], "" + else: + return [], "Response missing 'id' field" else: - results["failed_tests"].append( - {"test_id": test_info["test_id"], "error": update_response.error_message} - ) + return [], "Unexpected response format" else: - # Multiple tests: use individual updates to ensure each test gets its specific labels - for test_info in tests_to_update: - test_update_data = {"labels": test_info["labels"]} - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - - if update_response.status_code == 200: - new_label_titles = test_info.get("new_label_titles", []) - new_label_count = len(new_label_titles) - - if new_label_count == 1: - message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" - elif new_label_count > 1: - message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" - else: - message = f"No new labels added to test {test_info['test_id']}" - - results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) - else: - results["failed_tests"].append( - {"test_id": test_info["test_id"], "error": update_response.error_message} - ) + error_msg = response.error_message or f"Failed to update feature file (HTTP {response.status_code})" + return [], error_msg - return results, "" + def get_bdd(self, case_id: int) -> Tuple[str, str]: + return self.bdd_handler.get_bdd(case_id) + + def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: + return self.bdd_handler.get_bdd_template_id(project_id) + + def find_bdd_case_by_name( + self, feature_name: str, project_id: int, suite_id: int + ) -> Tuple[Optional[int], Optional[str], List[int]]: + """ + Find a BDD test case by feature name (normalized matching). + + This method is shared by CucumberParser and JunitParser for feature name matching. + + Args: + feature_name: The feature name to search for + project_id: TestRail project ID + suite_id: TestRail suite ID + + Returns: + Tuple of (case_id, error_message, duplicate_case_ids): + - case_id: The matched case ID, or -1 if not found, or None if error/duplicates + - error_message: Error message if operation failed, None otherwise + - duplicate_case_ids: List of case IDs if duplicates found, empty list otherwise + """ + # Build cache if not already cached for this project/suite + cache_key = f"{project_id}_{suite_id}" + if cache_key not in self._bdd_case_cache: + error = self._build_bdd_case_cache(project_id, suite_id) + if error: + return None, error, [] + + # Normalize the feature name for matching + normalized_name = self._normalize_feature_name(feature_name) + + # Look up in cache + cache = self._bdd_case_cache.get(cache_key, {}) + matching_cases = cache.get(normalized_name, []) + + if len(matching_cases) == 0: + # Not found + self.environment.vlog(f"Feature '{feature_name}' not found in TestRail") + return -1, None, [] + elif len(matching_cases) == 1: + # Single match - success + case_id = matching_cases[0].get("id") + self.environment.vlog(f"Feature '{feature_name}' matched to case ID: C{case_id}") + return case_id, None, [] + else: + # Multiple matches - duplicate error + duplicate_ids = [case.get("id") for case in matching_cases] + self.environment.vlog(f"Feature '{feature_name}' has {len(matching_cases)} duplicates: {duplicate_ids}") + return None, None, duplicate_ids - def get_tests_by_label( - self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None - ) -> Tuple[List[dict], str]: + def _build_bdd_case_cache(self, project_id: int, suite_id: int) -> Optional[str]: """ - Get tests filtered by label ID or title from specific runs + Build cache of BDD test cases for a project/suite. - :param project_id: Project ID - :param label_ids: List of label IDs to filter by - :param label_title: Label title to filter by - :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) - :returns: Tuple with list of matching tests and error string - """ - # If filtering by title, first get the label ID - target_label_ids = label_ids or [] - if label_title and not target_label_ids: - labels_data, error_message = self.get_labels(project_id) - if error_message: - return [], error_message - - for label in labels_data.get("labels", []): - if label.get("title") == label_title: - target_label_ids.append(label.get("id")) - - if not target_label_ids: - return [], "" # No label found is a valid case with 0 results - - # Get runs for the project (either all runs or specific run IDs) - if run_ids: - # Use specific run IDs - validate they exist by getting run details - runs = [] - for run_id in run_ids: - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.status_code == 200: - runs.append(run_response.response_text) - else: - return [], f"Run ID {run_id} not found or inaccessible" - else: - # Get all runs for the project - runs_response = self.client.send_get(f"get_runs/{project_id}") - if runs_response.status_code != 200: - return [], runs_response.error_message + Args: + project_id: TestRail project ID + suite_id: TestRail suite ID - runs_data = runs_response.response_text - runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + Returns: + Error message if failed, None if successful + """ + cache_key = f"{project_id}_{suite_id}" - # Collect all tests from all runs - matching_tests = [] - for run in runs: - run_id = run.get("id") - if not run_id: - continue + self.environment.vlog(f"Building BDD case cache for project {project_id}, suite {suite_id}...") - # Get tests for this run - tests_response = self.client.send_get(f"get_tests/{run_id}") - if tests_response.status_code != 200: - continue # Skip this run if we can't get tests + # Fetch all cases for this suite + all_cases, error = self.__get_all_cases(project_id, suite_id) - tests_data = tests_response.response_text - tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + if error: + return f"Error fetching cases for cache: {error}" - # Filter tests that have any of the target labels - for test in tests: - test_labels = test.get("labels", []) - test_label_ids = [label.get("id") for label in test_labels] + # Resolve BDD case field name dynamically + bdd_field_name = self.get_bdd_case_field_name() - # Check if any of the target label IDs are present in this test - if any(label_id in test_label_ids for label_id in target_label_ids): - matching_tests.append(test) + # Filter to BDD cases only (have BDD scenarios field with content) + bdd_cases = [case for case in all_cases if case.get(bdd_field_name)] - return matching_tests, "" + self.environment.vlog( + f"Found {len(bdd_cases)} BDD cases out of {len(all_cases)} total cases (using field: {bdd_field_name})" + ) - def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: - """ - Get labels for specific tests + # Build normalized name -> [case, case, ...] mapping + cache = {} + for case in bdd_cases: + title = case.get("title", "") + normalized = self._normalize_feature_name(title) - :param test_ids: List of test IDs to get labels for - :returns: Tuple with list of test label information and error string - """ - results = [] - - for test_id in test_ids: - # Get test information - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) - continue - - test_data = test_response.response_text - test_labels = test_data.get("labels", []) - - results.append( - { - "test_id": test_id, - "title": test_data.get("title", "Unknown"), - "status_id": test_data.get("status_id"), - "labels": test_labels, - "error": None, - } - ) + if normalized not in cache: + cache[normalized] = [] + cache[normalized].append(case) - return results, "" + self._bdd_case_cache[cache_key] = cache + self.environment.vlog(f"Cached {len(cache)} unique feature name(s)") - # Test case reference management methods - def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: - """ - Add references to a test case - :param case_id: ID of the test case - :param references: List of references to add - :returns: Tuple with success status and error string - """ - # First get the current test case to retrieve existing references - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - - case_data = case_response.response_text - existing_refs = case_data.get("refs", "") or "" - - # Parse existing references - existing_ref_list = [] - if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] - - # Deduplicate input references while preserving order - deduplicated_input = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - deduplicated_input.append(ref_clean) - seen.add(ref_clean) - - # Add new references (avoid duplicates with existing) - all_refs = existing_ref_list.copy() - for ref in deduplicated_input: - if ref not in all_refs: - all_refs.append(ref) - - # Join all references - new_refs_string = ",".join(all_refs) - - # Validate total character limit - if len(new_refs_string) > 2000: - return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - - # Update the test case with new references - update_data = {"refs": new_refs_string} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return None - def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: - """ - Update references on a test case by replacing existing ones - :param case_id: ID of the test case - :param references: List of references to replace existing ones - :returns: Tuple with success status and error string + @staticmethod + def _normalize_feature_name(name: str) -> str: """ - # Deduplicate input references while preserving order - deduplicated_refs = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - deduplicated_refs.append(ref_clean) - seen.add(ref_clean) - - # Join references - new_refs_string = ",".join(deduplicated_refs) - - # Validate total character limit - if len(new_refs_string) > 2000: - return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - - # Update the test case with new references - update_data = {"refs": new_refs_string} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + Normalize a feature name for case-insensitive, whitespace-insensitive matching. - def delete_case_references(self, case_id: int, specific_references: List[str] = None) -> Tuple[bool, str]: - """ - Delete all or specific references from a test case - :param case_id: ID of the test case - :param specific_references: List of specific references to delete (None to delete all) - :returns: Tuple with success status and error string - """ - if specific_references is None: - # Delete all references by setting refs to empty string - update_data = {"refs": ""} - else: - # First get the current test case to retrieve existing references - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" + Converts to lowercase, strips whitespace, and removes special characters. + Hyphens, underscores, and special chars are converted to spaces for word boundaries. - case_data = case_response.response_text - existing_refs = case_data.get("refs", "") or "" + Args: + name: The feature name to normalize - if not existing_refs: - # No references to delete - return True, "" + Returns: + Normalized name (lowercase, special chars removed, collapsed whitespace, stripped) + """ + import re - # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Convert to lowercase and strip + normalized = name.lower().strip() + # Replace hyphens, underscores, and special chars with spaces + normalized = re.sub(r"[^a-z0-9\s]", " ", normalized) + # Collapse multiple spaces to single space + normalized = re.sub(r"\s+", " ", normalized) + # Final strip + return normalized.strip() - # Deduplicate input references for efficient processing - refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) + def get_bdd_case_field_name(self) -> str: + """Resolve BDD Scenarios case field name from TestRail API - # Remove specific references - remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] + Dynamically resolves the actual field name for BDD Scenarios (type_id=13). + This supports custom field names when users rename the default field in TestRail. - # Join remaining references - new_refs_string = ",".join(remaining_refs) - update_data = {"refs": new_refs_string} + Returns: + Resolved system_name of BDD Scenarios field, or default name if resolution fails + """ + # Return cached value if already resolved + if self._bdd_case_field_name is not None: + return self._bdd_case_field_name - # Update the test case - update_response = self.client.send_post(f"update_case/{case_id}", update_data) + try: + response = self.client.send_get("get_case_fields") + if not response.error_message and response.response_text: + for field in response.response_text: + if field.get("type_id") == 13: # BDD Scenarios type + self._bdd_case_field_name = field.get("system_name") + self.environment.vlog(f"Resolved BDD case field name: {self._bdd_case_field_name}") + return self._bdd_case_field_name + except Exception as e: + self.environment.vlog(f"Error resolving BDD case field name: {e}") + + # Fallback to default name + self._bdd_case_field_name = "custom_testrail_bdd_scenario" + self.environment.vlog(f"Using default BDD case field name: {self._bdd_case_field_name}") + return self._bdd_case_field_name + + def get_bdd_result_field_name(self) -> str: + """Resolve BDD Scenario Results result field name from TestRail API + + Dynamically resolves the actual field name for BDD Scenario Results (type_id=14). + This supports custom field names when users rename the default field in TestRail. + + Returns: + Resolved system_name of BDD Scenario Results field, or default name if resolution fails + """ + # Return cached value if already resolved + if self._bdd_result_field_name is not None: + return self._bdd_result_field_name - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + try: + response = self.client.send_get("get_result_fields") + if not response.error_message and response.response_text: + for field in response.response_text: + if field.get("type_id") == 14: # BDD Scenario Results type + self._bdd_result_field_name = field.get("system_name") + self.environment.vlog(f"Resolved BDD result field name: {self._bdd_result_field_name}") + return self._bdd_result_field_name + except Exception as e: + self.environment.vlog(f"Error resolving BDD result field name: {e}") + + # Fallback to default name + self._bdd_result_field_name = "custom_testrail_bdd_scenario_results" + self.environment.vlog(f"Using default BDD result field name: {self._bdd_result_field_name}") + return self._bdd_result_field_name + + def add_case_bdd( + self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None + ) -> Tuple[int, str]: + return self.bdd_handler.add_case_bdd(section_id, title, bdd_content, template_id, tags) diff --git a/trcli/api/case_handler.py b/trcli/api/case_handler.py index 4a6bf06..b8aeaee 100644 --- a/trcli/api/case_handler.py +++ b/trcli/api/case_handler.py @@ -117,66 +117,98 @@ def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: return response def update_existing_case_references( - self, case_id: int, junit_refs: str, strategy: str = "append" - ) -> Tuple[bool, str, List[str], List[str]]: + self, case_id: int, junit_refs: str, case_fields: dict = None, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str], List[str]]: """ - Update existing case references with values from JUnit properties. + Update existing case references and custom fields with values from JUnit properties. :param case_id: ID of the test case :param junit_refs: References from JUnit testrail_case_field property - :param strategy: 'append' or 'replace' - :returns: Tuple with (success, error_message, added_refs, skipped_refs) + :param case_fields: Dictionary of custom case fields to update (e.g., {'custom_preconds': 'value'}) + :param strategy: 'append' or 'replace' (applies to refs field only) + :returns: Tuple with (success, error_message, added_refs, skipped_refs, updated_fields) """ - if not junit_refs or not junit_refs.strip(): - return True, None, [], [] # No references to process - - # Parse and deduplicate JUnit references using utility function - junit_ref_list = deduplicate_references(parse_references(junit_refs)) - - if not junit_ref_list: - return False, "No valid references found in JUnit property", [], [] + updated_fields = [] - # Get current case data - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.error_message: - return False, case_response.error_message, [], [] + # Handle case where there are no refs but there are case fields to update + if (not junit_refs or not junit_refs.strip()) and not case_fields: + return True, None, [], [], [] # Nothing to process - existing_refs = case_response.response_text.get("refs", "") or "" - - if strategy == "replace": - # Replace strategy: use JUnit refs as-is - new_refs = join_references(junit_ref_list) - added_refs = junit_ref_list + if not junit_refs or not junit_refs.strip(): + # No refs to process, but we have case fields to update + new_refs = None + added_refs = [] skipped_refs = [] else: - # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = parse_references(existing_refs) - - # Determine which references are new vs duplicates - added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] - skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - - # If no new references to add, return current state - if not added_refs: - return True, None, added_refs, skipped_refs - - # Combine references - combined_list = existing_ref_list + added_refs - new_refs = join_references(combined_list) - - # Validate 2000 character limit for test case references - is_valid, error_msg = validate_references_length(new_refs, self.MAX_CASE_REFERENCES_LENGTH) - if not is_valid: - return False, error_msg, [], [] + # Parse and deduplicate JUnit references using utility function + junit_ref_list = deduplicate_references(parse_references(junit_refs)) + + if not junit_ref_list: + # If we have case fields, continue; otherwise return error + if not case_fields: + return False, "No valid references found in JUnit property", [], [], [] + new_refs = None + added_refs = [] + skipped_refs = [] + else: + # Get current case data + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.error_message: + return False, case_response.error_message, [], [], [] + + existing_refs = case_response.response_text.get("refs", "") or "" + + if strategy == "replace": + # Replace strategy: use JUnit refs as-is + new_refs = join_references(junit_ref_list) + added_refs = junit_ref_list + skipped_refs = [] + else: + # Append strategy: combine with existing refs, avoiding duplicates + existing_ref_list = parse_references(existing_refs) + + # Determine which references are new vs duplicates + added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] + skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] + + # If no new references to add and no case fields, return current state + if not added_refs and not case_fields: + return True, None, added_refs, skipped_refs, [] + + # Combine references + combined_list = existing_ref_list + added_refs + new_refs = join_references(combined_list) + + # Validate 2000 character limit for test case references + if new_refs: + is_valid, error_msg = validate_references_length(new_refs, self.MAX_CASE_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg, [], [], [] + + # Build update data with refs and custom case fields + update_data = {} + if new_refs is not None: + update_data["refs"] = new_refs + + # Add custom case fields to the update + if case_fields: + for field_name, field_value in case_fields.items(): + # Skip special internal fields that shouldn't be updated + if field_name not in ["case_id", "section_id", "result"]: + update_data[field_name] = field_value + updated_fields.append(field_name) + + # Only update if we have data to send + if not update_data: + return True, None, added_refs, skipped_refs, updated_fields # Update the case - update_data = {"refs": new_refs} update_response = self.client.send_post(f"update_case/{case_id}", update_data) if update_response.error_message: - return False, update_response.error_message, [], [] + return False, update_response.error_message, [], [], [] - return True, None, added_refs, skipped_refs + return True, None, added_refs, skipped_refs, updated_fields def delete_cases(self, suite_id: int, added_cases: List[Dict]) -> Tuple[Dict, str]: """ From 8f930aee781c79b607478331bf7f4d4f3dfd3861 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 6 Feb 2026 17:03:03 +0800 Subject: [PATCH 31/33] Fix: fixed unicode utf-8 encoding issue --- tests/test_logging/test_file_handler.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/test_logging/test_file_handler.py b/tests/test_logging/test_file_handler.py index 35c7560..173170a 100644 --- a/tests/test_logging/test_file_handler.py +++ b/tests/test_logging/test_file_handler.py @@ -58,7 +58,7 @@ def test_writes_to_file(self): handler.close() # File should contain messages - content = self.log_file.read_text() + content = self.log_file.read_text(encoding="utf-8") self.assertIn("Test message 1", content) self.assertIn("Test message 2", content) @@ -103,7 +103,7 @@ def test_flush(self): handler.flush() # File should be written immediately - content = self.log_file.read_text() + content = self.log_file.read_text(encoding="utf-8") self.assertIn("Test message", content) handler.close() @@ -115,7 +115,7 @@ def test_context_manager(self): # File should be closed and content written self.assertTrue(self.log_file.exists()) - content = self.log_file.read_text() + content = self.log_file.read_text(encoding="utf-8") self.assertIn("Test message", content) def test_multiple_writes_same_file(self): @@ -128,7 +128,7 @@ def test_multiple_writes_same_file(self): handler.close() - content = self.log_file.read_text() + content = self.log_file.read_text(encoding="utf-8") for msg in messages: self.assertIn(msg.strip(), content) @@ -142,7 +142,7 @@ def test_unicode_content(self): handler.close() - content = self.log_file.read_text() + content = self.log_file.read_text(encoding="utf-8") self.assertIn("🎉", content) self.assertIn("你好世界", content) self.assertIn("مرحبا بالعالم", content) @@ -174,12 +174,12 @@ def test_rotation_preserves_content(self): # Collect all content from all files all_content = "" if self.log_file.exists(): - all_content += self.log_file.read_text() + all_content += self.log_file.read_text(encoding="utf-8") for i in range(1, 6): backup = Path(f"{self.log_file}.{i}") if backup.exists(): - all_content += backup.read_text() + all_content += backup.read_text(encoding="utf-8") # All messages should be somewhere for msg in messages: @@ -209,8 +209,8 @@ def test_writes_to_multiple_files(self): multi.close() # Both files should have the message - content1 = self.log_file1.read_text() - content2 = self.log_file2.read_text() + content1 = self.log_file1.read_text(encoding="utf-8") + content2 = self.log_file2.read_text(encoding="utf-8") self.assertIn("Test message", content1) self.assertIn("Test message", content2) @@ -238,8 +238,8 @@ def test_continues_on_handler_failure(self): self.assertTrue(self.log_file1.exists()) self.assertTrue(self.log_file2.exists()) - content1 = self.log_file1.read_text() - content2 = self.log_file2.read_text() + content1 = self.log_file1.read_text(encoding="utf-8") + content2 = self.log_file2.read_text(encoding="utf-8") self.assertIn("Test message", content1) self.assertIn("Test message", content2) From d28b3600d653e5566b70303491cd53601ab31baf Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 6 Feb 2026 17:09:13 +0800 Subject: [PATCH 32/33] Updated README file for 1.13.0 release --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 29a2be1..95b1da0 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.12.6 +TestRail CLI v1.13.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) @@ -51,7 +51,7 @@ CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.12.6 +TestRail CLI v1.13.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -1486,7 +1486,7 @@ Options: ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.12.6 +TestRail CLI v1.13.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -1610,7 +1610,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.12.6 +TestRail CLI v1.13.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] From 6107eab2ca8393d6a2e17bf0555bc0f52604f8bd Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 6 Feb 2026 17:44:30 +0800 Subject: [PATCH 33/33] Added minor change for parse_cucumber caching --- trcli/commands/cmd_parse_cucumber.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py index feea27f..3634bcd 100644 --- a/trcli/commands/cmd_parse_cucumber.py +++ b/trcli/commands/cmd_parse_cucumber.py @@ -222,6 +222,11 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.vlog("Clearing BDD cache to include newly created cases...") api_handler._bdd_case_cache.clear() + # Also clear the RequestCache for get_cases so fresh data is fetched + # The RequestCache caches get_cases API responses, so newly created cases + # won't be visible until we invalidate this cache + api_handler._cache.invalidate_pattern(f"get_cases/{resolved_project_id}") + # Re-parse with the newly created case IDs environment.vlog("Re-parsing to match newly created cases...") parser_for_results = CucumberParser(environment)