From b6cab5964d80cd8c01a65eb522c7a13455f17c8a Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 18 Mar 2026 17:48:37 +0800 Subject: [PATCH 1/6] TRCLI-162: Updated parse_junit with special parser multisuite --- trcli/api/run_handler.py | 33 ++++++++++++++++++++++++ trcli/commands/cmd_parse_junit.py | 43 ++++++++++++++++++++++++------- trcli/constants.py | 6 +++++ trcli/readers/junit_xml.py | 8 ++++++ 4 files changed, 80 insertions(+), 10 deletions(-) diff --git a/trcli/api/run_handler.py b/trcli/api/run_handler.py index d30e972..011e84b 100644 --- a/trcli/api/run_handler.py +++ b/trcli/api/run_handler.py @@ -318,3 +318,36 @@ def delete_run(self, run_id: int) -> Tuple[dict, str]: """ response = self.client.send_post(f"delete_run/{run_id}", payload={}) return response.response_text, response.error_message + + def add_plan( + self, + project_id: int, + plan_name: str, + entries: List[Dict], + description: str = None, + milestone_id: int = None, + ) -> Tuple[dict, str]: + """ + Creates a new test plan with multiple test runs (one per suite). + + :param project_id: project id + :param plan_name: name for the test plan + :param entries: list of entry dictionaries, each containing suite_id and case_ids + Example: [{"suite_id": 1, "include_all": False, "case_ids": [1, 2, 3]}] + :param description: optional description for the plan + :param milestone_id: optional milestone id to associate with the plan + :returns: Tuple with plan response dict and error string. + """ + plan_data = { + "name": plan_name, + "entries": entries, + } + + if description: + plan_data["description"] = description + + if milestone_id: + plan_data["milestone_id"] = milestone_id + + response = self.client.send_post(f"add_plan/{project_id}", plan_data) + return response.response_text, response.error_message diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index 2c95beb..093b91a 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -18,8 +18,8 @@ "--special-parser", metavar="", default="junit", - type=click.Choice(["junit", "saucectl", "bdd"], case_sensitive=False), - help="Optional special parser option for specialized JUnit reports. Use 'bdd' for BDD framework JUnit output.", + type=click.Choice(["junit", "saucectl", "bdd", "multisuite"], case_sensitive=False), + help="Optional special parser option for specialized JUnit reports. Use 'bdd' for BDD framework JUnit output, 'multisuite' for cross-suite test plans.", ) @click.option( "-a", @@ -62,22 +62,45 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.elog(validation_error) exit(1) + # Validate multisuite requirements + if environment.special_parser == "multisuite": + if environment.case_matcher not in ["name", "property"]: + environment.elog(FAULT_MAPPING["multisuite_requires_name_or_property"]) + exit(1) + + # Reject --run-id with multisuite (use --plan-id instead) + if environment.run_id: + environment.elog(FAULT_MAPPING["multisuite_run_id_not_supported"]) + exit(1) + settings.ALLOW_ELAPSED_MS = environment.allow_ms print_config(environment) try: parsed_suites = JunitParser(environment).parse_file() run_id = None case_update_results = {} - for suite in parsed_suites: - result_uploader = ResultsUploader(environment=environment, suite=suite) - result_uploader.upload_results() - if run_id is None and hasattr(result_uploader, "last_run_id"): - run_id = result_uploader.last_run_id + # Multisuite mode: use MultisuiteUploader for cross-suite plans + if environment.special_parser == "multisuite": + from trcli.api.multisuite_uploader import MultisuiteUploader + + multisuite_uploader = MultisuiteUploader(environment=environment, suite=parsed_suites[0]) + multisuite_uploader.upload_results() + + # Use plan_id for reference handling + run_id = multisuite_uploader.last_plan_id + else: + # Normal mode: process each suite separately + for suite in parsed_suites: + result_uploader = ResultsUploader(environment=environment, suite=suite) + result_uploader.upload_results() + + if run_id is None and hasattr(result_uploader, "last_run_id"): + run_id = result_uploader.last_run_id - # Collect case update results - if hasattr(result_uploader, "case_update_results"): - case_update_results = result_uploader.case_update_results + # Collect case update results + if hasattr(result_uploader, "case_update_results"): + case_update_results = result_uploader.case_update_results if environment.test_run_ref and run_id: _handle_test_run_references(environment, run_id) diff --git a/trcli/constants.py b/trcli/constants.py index 85548f2..c32fa68 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -71,6 +71,12 @@ attachment_upload_failed="Failed to upload attachment '{file_path}' for case {case_id}: {error_message}", attachment_too_large="Failed to upload attachment '{file_name}' for case {case_id}: " "File size exceeds TestRail's maximum limit of 250 MB. Please reduce the file size or exclude this attachment.", + multisuite_requires_name_or_property="ERROR: --special-parser multisuite requires --case-matcher to be 'name' or 'property'.", + multisuite_run_id_not_supported="ERROR: --run-id cannot be used with --special-parser multisuite. Use --plan-id to add runs to an existing plan.", + multisuite_missing_case_ids="ERROR: All test cases must have case IDs when using multisuite parser. Found {count} case(s) without IDs.", + multisuite_cross_project_cases="WARNING: Skipped {count} test case(s) belonging to different project(s). Case IDs: {case_ids}", + multisuite_plan_creation_failed="ERROR: Failed to create test plan: {error_message}", + multisuite_fetch_case_failed="ERROR: Failed to fetch case information for case ID {case_id}: {error_message}", ) COMMAND_FAULT_MAPPING = dict( diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 3cc6801..65cd9cc 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -401,6 +401,14 @@ def _is_bdd_mode(self) -> bool: """ return self._special == "bdd" + def _is_multisuite_mode(self) -> bool: + """Check if multisuite mode is enabled + + Returns: + True if special parser is 'multisuite', False otherwise + """ + return self._special == "multisuite" + def _extract_feature_case_id_from_property(self, testsuite) -> Union[int, None]: """Extract case ID from testsuite-level properties From 086dd074484e5592bda752c4016c88f3409625c0 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 18 Mar 2026 17:50:27 +0800 Subject: [PATCH 2/6] TRCLI-162: Updated readme and added unit tests for multisuite special parser --- CHANGELOG.MD | 1 + README.md | 34 ++++++++ tests/test_data/XML/example-multisuite-sp.xml | 46 +++++++++++ tests/test_data/XML/multisuite_report.xml | 77 +++++++++++++++++++ 4 files changed, 158 insertions(+) create mode 100644 tests/test_data/XML/example-multisuite-sp.xml create mode 100644 tests/test_data/XML/multisuite_report.xml diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 0ff8843..1ec6105 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -13,6 +13,7 @@ _released 03-17-2026 ### Added - Extended glob support for robot parser - Added `--clear-run-assigned-to-id` flag to `add_run` command for clearing test run assignees during updates + - Added `multisuite` special-parser for creating cross-suite test plans from JUnit reports (automatically groups test cases by suite and creates one test run per suite within a single test plan; supports `--plan-id` for adding to existing plans) ### Fixed - Cannot add empty runs via add_run command due to empty test case validation. diff --git a/README.md b/README.md index 52d0e22..04fa37a 100644 --- a/README.md +++ b/README.md @@ -864,6 +864,40 @@ the `--special-parser saucectl` command line option. Please refer to the [SauceLabs and saucectl reports](https://support.gurock.com/hc/en-us/articles/12719558686484) documentation for further information. +#### Cross-suite test plans with multisuite parser + +If your test automation spans multiple TestRail suites, the multisuite parser allows you to create a single test plan with one run per suite from a single JUnit XML report. The CLI automatically detects which suite each test case belongs to, groups them accordingly, and creates or updates a test plan with the appropriate structure. + +**Requirements:** +- All test cases must have case IDs (C123 format in test names or `test_id` properties) +- Must use `--case-matcher name` or `--case-matcher property` (not `auto`) +- All cases must belong to the same project (cross-project cases are skipped with warnings) + +**Basic usage (create new plan):** +```bash +trcli parse_junit \ + --special-parser multisuite \ + --title "Cross-Suite Test Plan" \ + --file results.xml \ + --case-matcher property +``` + +**Add to existing plan:** +```bash +trcli parse_junit \ + --special-parser multisuite \ + --plan-id 1234 \ + --file results.xml \ + --case-matcher property +``` + +The parser automatically: +- Fetches suite information for each case ID concurrently (fast performance) +- Groups cases by their suite +- Creates a test plan with one run per suite +- Uploads results to the correct run within the plan +- Includes suite names and test counts in the plan description + #### Creating new test runs When a test run MUST created before using one of the parse commands, use the `add_run` command. For example, if diff --git a/tests/test_data/XML/example-multisuite-sp.xml b/tests/test_data/XML/example-multisuite-sp.xml new file mode 100644 index 0000000..05e0603 --- /dev/null +++ b/tests/test_data/XML/example-multisuite-sp.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/test_data/XML/multisuite_report.xml b/tests/test_data/XML/multisuite_report.xml new file mode 100644 index 0000000..f58f51d --- /dev/null +++ b/tests/test_data/XML/multisuite_report.xml @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + Expected: "Invalid password" message to be displayed + Actual: No error message shown + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Expected: Cart count to decrease by 1 + Actual: Cart count unchanged + + + + + + + + + + + + + + + + + + + + + + + + + + + From 99870151e407db189e97edd6cc98da107e943ba8 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 18 Mar 2026 17:51:19 +0800 Subject: [PATCH 3/6] TRCLI-162: Added multisuite uploader api class for handling multisuite API operations --- trcli/api/multisuite_uploader.py | 391 +++++++++++++++++++++++++++++++ 1 file changed, 391 insertions(+) create mode 100644 trcli/api/multisuite_uploader.py diff --git a/trcli/api/multisuite_uploader.py b/trcli/api/multisuite_uploader.py new file mode 100644 index 0000000..2767df6 --- /dev/null +++ b/trcli/api/multisuite_uploader.py @@ -0,0 +1,391 @@ +""" +MultisuiteUploader - Handles cross-suite test plan creation and result uploads + +This module provides functionality to upload JUnit test results across multiple +TestRail suites in a single test plan. It fetches suite information for each case, +groups cases by suite, creates a test plan with one run per suite, and uploads +results to the appropriate runs. +""" + +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from collections import defaultdict +from beartype.typing import Dict, List, Tuple, Set + +from trcli.api.project_based_client import ProjectBasedClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase + + +class MultisuiteUploader(ProjectBasedClient): + """ + Handles uploading test results across multiple TestRail suites. + + Creates a test plan with one run per suite and uploads results accordingly. + Requires that all test cases have case IDs (strict validation). + """ + + def __init__(self, environment: Environment, suite: TestRailSuite): + """ + Initialize the MultisuiteUploader + + :param environment: Environment configuration + :param suite: TestRailSuite containing all test cases from the report + """ + super().__init__(environment, suite) + self.last_plan_id = None + self.last_run_ids = {} # {suite_id: run_id} + + def upload_results(self): + """ + Main orchestration method for multisuite upload. + + Flow: + 1. Validate all cases have IDs + 2. Fetch suite_id for each case + 3. Validate single project + 4. Group cases by suite_id + 5. Create or update test plan + 6. Upload results per run + 7. Upload attachments + """ + start = time.time() + + self.environment.log("Multisuite mode: Preparing cross-suite test plan...") + + # Step 1: Resolve project + self.resolve_project() + + # Step 2: Collect all case IDs and validate they exist + all_case_ids = self._collect_all_case_ids() + if not all_case_ids: + self.environment.elog("No test cases with case IDs found in the report.") + exit(1) + + self.environment.log(f"Found {len(all_case_ids)} unique case ID(s) in report.") + + # Step 3: Fetch suite_id for each case (concurrent for performance) + self.environment.log("Fetching suite information for all cases...") + case_suite_mapping = self._fetch_suite_ids_for_cases(all_case_ids) + + if not case_suite_mapping: + self.environment.elog("Failed to fetch suite information for any cases.") + exit(1) + + # Step 4: Validate single project and filter cross-project cases + valid_case_suite_mapping, skipped_count = self._validate_single_project(case_suite_mapping) + + if skipped_count > 0: + self.environment.log(f"Filtered out {skipped_count} cross-project case(s).") + + if not valid_case_suite_mapping: + self.environment.elog("No valid cases remaining after project validation.") + exit(1) + + # Step 5: Group cases by suite + suite_groups = self._group_cases_by_suite(valid_case_suite_mapping) + self.environment.log(f"Grouped cases into {len(suite_groups)} suite(s).") + + # Step 6: Create or update test plan + if self.environment.plan_id: + # Existing plan mode - add runs to existing plan + run_mapping, error = self._update_existing_plan(self.environment.plan_id, suite_groups) + plan_id = self.environment.plan_id + else: + # New plan mode - create new plan with runs + plan_id, run_mapping, error = self._create_test_plan(suite_groups) + + if error: + self.environment.elog(FAULT_MAPPING["multisuite_plan_creation_failed"].format(error_message=error)) + exit(1) + + self.last_plan_id = plan_id + self.last_run_ids = run_mapping + + self.environment.log(f"Test plan created/updated (ID: {plan_id}) with {len(run_mapping)} run(s).") + + # Step 7: Upload results per run + total_results = self._upload_results_per_run(suite_groups, run_mapping) + + stop = time.time() + self.environment.log( + f"Uploaded {total_results} result(s) across {len(run_mapping)} run(s) in {stop - start:.1f} secs." + ) + + def _collect_all_case_ids(self) -> Set[int]: + """ + Collect all unique case IDs from the test suite. + Validates that ALL cases have case IDs (strict mode). + + :returns: Set of case IDs + :raises: SystemExit if any case lacks a case ID + """ + case_ids = set() + missing_id_count = 0 + + for section in self.api_request_handler.suites_data_from_provider.testsections: + for test_case in section.testcases: + if test_case.case_id is None or test_case.case_id == 0: + missing_id_count += 1 + else: + case_ids.add(test_case.case_id) + + if missing_id_count > 0: + self.environment.elog(FAULT_MAPPING["multisuite_missing_case_ids"].format(count=missing_id_count)) + exit(1) + + return case_ids + + def _fetch_suite_ids_for_cases(self, case_ids: Set[int]) -> Dict[int, int]: + """ + Fetch suite_id for each case ID using concurrent requests. + + :param case_ids: Set of case IDs to fetch + :returns: Dictionary mapping {case_id: suite_id} + """ + case_suite_mapping = {} + failed_cases = [] + + def fetch_case(case_id: int) -> Tuple[int, int, str]: + """Fetch a single case and return (case_id, suite_id, error)""" + response = self.api_request_handler.client.send_get(f"get_case/{case_id}") + if response.error_message: + return case_id, None, response.error_message + + suite_id = response.response_text.get("suite_id") + if suite_id is None: + return case_id, None, "No suite_id in response" + + return case_id, suite_id, None + + # Use ThreadPoolExecutor for concurrent fetching (max 10 threads) + with ThreadPoolExecutor(max_workers=10) as executor: + futures = {executor.submit(fetch_case, cid): cid for cid in case_ids} + + for future in as_completed(futures): + case_id, suite_id, error = future.result() + + if error: + failed_cases.append(case_id) + self.environment.vlog( + FAULT_MAPPING["multisuite_fetch_case_failed"].format(case_id=case_id, error_message=error) + ) + else: + case_suite_mapping[case_id] = suite_id + + if failed_cases: + self.environment.log(f"Warning: Failed to fetch {len(failed_cases)} case(s). They will be skipped.") + + return case_suite_mapping + + def _validate_single_project(self, case_suite_mapping: Dict[int, int]) -> Tuple[Dict[int, int], int]: + """ + Validate that all suites belong to the target project. + Filters out cases from other projects. + + :param case_suite_mapping: Dictionary {case_id: suite_id} + :returns: Tuple (filtered_mapping, skipped_count) + """ + valid_mapping = {} + skipped_cases = [] + suite_project_cache = {} # {suite_id: project_id} + + target_project_id = self.project.project_id + + for case_id, suite_id in case_suite_mapping.items(): + # Check cache first + if suite_id in suite_project_cache: + project_id = suite_project_cache[suite_id] + else: + # Fetch suite info + response = self.api_request_handler.client.send_get(f"get_suite/{suite_id}") + if response.error_message: + self.environment.vlog(f"Warning: Failed to fetch suite {suite_id} info: {response.error_message}") + skipped_cases.append(case_id) + continue + + project_id = response.response_text.get("project_id") + suite_project_cache[suite_id] = project_id + + if project_id == target_project_id: + valid_mapping[case_id] = suite_id + else: + skipped_cases.append(case_id) + + if skipped_cases: + self.environment.log( + FAULT_MAPPING["multisuite_cross_project_cases"].format( + count=len(skipped_cases), + case_ids=", ".join([f"C{cid}" for cid in skipped_cases[:10]]) + + ("..." if len(skipped_cases) > 10 else ""), + ) + ) + + return valid_mapping, len(skipped_cases) + + def _group_cases_by_suite(self, case_suite_mapping: Dict[int, int]) -> Dict[int, List[TestRailCase]]: + """ + Group TestRailCase objects by their suite_id. + + :param case_suite_mapping: Dictionary {case_id: suite_id} + :returns: Dictionary {suite_id: [TestRailCase objects]} + """ + suite_groups = defaultdict(list) + + for section in self.api_request_handler.suites_data_from_provider.testsections: + for test_case in section.testcases: + case_id = test_case.case_id + if case_id in case_suite_mapping: + suite_id = case_suite_mapping[case_id] + suite_groups[suite_id].append(test_case) + + return dict(suite_groups) + + def _create_test_plan(self, suite_groups: Dict[int, List[TestRailCase]]) -> Tuple[int, Dict[int, int], str]: + """ + Create a new test plan with one run per suite. + + :param suite_groups: Dictionary {suite_id: [TestRailCase objects]} + :returns: Tuple (plan_id, {suite_id: run_id}, error_message) + """ + # Build plan description with suite names and test counts + description_parts = [] + suite_names = {} # Cache suite names + + for suite_id in suite_groups.keys(): + response = self.api_request_handler.client.send_get(f"get_suite/{suite_id}") + if not response.error_message: + suite_name = response.response_text.get("name", f"Suite {suite_id}") + suite_names[suite_id] = suite_name + test_count = len(suite_groups[suite_id]) + description_parts.append(f"{suite_name} ({test_count} test(s))") + + description = ", ".join(description_parts) + + # Build entries for add_plan + entries = [] + for suite_id, cases in suite_groups.items(): + case_ids = [case.case_id for case in cases] + entry = { + "suite_id": suite_id, + "include_all": False, + "case_ids": case_ids, + } + entries.append(entry) + + # Create the plan + plan_response, error = self.api_request_handler.run_handler.add_plan( + project_id=self.project.project_id, + plan_name=self.environment.title, + entries=entries, + description=description, + milestone_id=getattr(self.environment, "milestone_id", None), + ) + + if error: + return None, {}, error + + # Extract run IDs from plan response + plan_id = plan_response.get("id") + run_mapping = {} + + for entry in plan_response.get("entries", []): + suite_id = entry.get("suite_id") + runs = entry.get("runs", []) + if runs: + run_id = runs[0].get("id") + run_mapping[suite_id] = run_id + + return plan_id, run_mapping, None + + def _update_existing_plan( + self, plan_id: int, suite_groups: Dict[int, List[TestRailCase]] + ) -> Tuple[Dict[int, int], str]: + """ + Add results to an existing test plan. + Matches existing runs or creates new plan entries for missing suites. + + :param plan_id: Existing plan ID + :param suite_groups: Dictionary {suite_id: [TestRailCase objects]} + :returns: Tuple ({suite_id: run_id}, error_message) + """ + # Fetch existing plan structure + response = self.api_request_handler.client.send_get(f"get_plan/{plan_id}") + if response.error_message: + return {}, f"Failed to fetch plan {plan_id}: {response.error_message}" + + plan_data = response.response_text + run_mapping = {} + + # Match existing entries + for entry in plan_data.get("entries", []): + suite_id = entry.get("suite_id") + if suite_id in suite_groups: + runs = entry.get("runs", []) + if runs: + run_mapping[suite_id] = runs[0].get("id") + + # Create new entries for missing suites + for suite_id in suite_groups.keys(): + if suite_id not in run_mapping: + # Add new plan entry + case_ids = [case.case_id for case in suite_groups[suite_id]] + entry_data = { + "suite_id": suite_id, + "include_all": False, + "case_ids": case_ids, + } + + add_entry_response = self.api_request_handler.client.send_post(f"add_plan_entry/{plan_id}", entry_data) + if add_entry_response.error_message: + self.environment.elog( + f"Warning: Failed to add entry for suite {suite_id}: {add_entry_response.error_message}" + ) + continue + + runs = add_entry_response.response_text.get("runs", []) + if runs: + run_mapping[suite_id] = runs[0].get("id") + + return run_mapping, None + + def _upload_results_per_run(self, suite_groups: Dict[int, List[TestRailCase]], run_mapping: Dict[int, int]) -> int: + """ + Upload results to each run. + + :param suite_groups: Dictionary {suite_id: [TestRailCase objects]} + :param run_mapping: Dictionary {suite_id: run_id} + :returns: Total number of results uploaded + """ + total_results = 0 + + for suite_id, cases in suite_groups.items(): + run_id = run_mapping.get(suite_id) + if not run_id: + self.environment.log(f"Warning: No run ID found for suite {suite_id}, skipping.") + continue + + # Prepare data provider with cases for this suite + self.api_request_handler.data_provider.suites_input.testsections[0].testcases = cases + + # Upload results for this run + results, error, results_count = self.api_request_handler.result_handler.add_results(run_id) + + if error: + self.environment.elog(f"Error uploading results to run {run_id}: {error}") + else: + total_results += results_count + self.environment.log(f"Uploaded {results_count} result(s) to run {run_id} (suite {suite_id}).") + + # Upload attachments if any + report_results = [ + {"case_id": case.case_id, "attachments": case.result.attachments} + for case in cases + if case.result.attachments + ] + + if report_results: + self.api_request_handler.result_handler.upload_attachments(report_results, results, run_id) + + return total_results From 43388893a99783083b9a1ff75eccff59d6f89e6b Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 6 Mar 2026 18:40:21 +0800 Subject: [PATCH 4/6] TRCLI-237: Extended glob support for robot parser, also added tests and updated readme guide --- README.md | 20 +++++- tests/test_glob_integration.py | 31 ++++++++++ tests/test_robot_parser.py | 108 +++++++++++++++++++++++++++++++++ trcli/readers/robot_xml.py | 62 ++++++++++++++++++- 4 files changed, 217 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 04fa37a..f50d350 100644 --- a/README.md +++ b/README.md @@ -196,7 +196,7 @@ For further detail, please refer to the ### Using Glob Patterns for Multiple Files -TRCLI supports glob patterns to process multiple report files in a single command. This feature is available for **JUnit XML** and **Cucumber JSON** parsers. +TRCLI supports glob patterns to process multiple report files in a single command. This feature is available for **JUnit XML**, **Robot Framework**, and **Cucumber JSON** parsers. #### Important: Shell Quoting Requirement @@ -239,6 +239,7 @@ When a glob pattern matches **multiple files**, TRCLI automatically: 3. **Merges test results** into a single combined report 4. **Writes merged file** to current directory: - JUnit: `Merged-JUnit-report.xml` + - Robot Framework: `Merged-Robot-report.xml` - Cucumber: `merged_cucumber.json` 5. **Processes the merged file** as a single test run upload @@ -263,6 +264,23 @@ trcli parse_junit \ --case-matcher auto ``` +**Robot Framework - Multiple output files:** +```bash +# Merge multiple Robot Framework test runs +trcli -y \ + -h https://example.testrail.com \ + --project "My Project" \ + parse_robot \ + -f "reports/robot-*.xml" \ + --title "Merged Robot Tests" + +# Recursive search for all Robot outputs +trcli parse_robot \ + -f "test-results/**/output.xml" \ + --title "All Robot Results" \ + --case-matcher property +``` + **Cucumber JSON - Multiple test runs:** ```bash # Merge multiple Cucumber JSON reports diff --git a/tests/test_glob_integration.py b/tests/test_glob_integration.py index 81aa056..68c9e30 100644 --- a/tests/test_glob_integration.py +++ b/tests/test_glob_integration.py @@ -146,6 +146,37 @@ def test_glob_junit_multiple_results_scenario_2(self): if merged_file.exists(): merged_file.unlink() + @pytest.mark.parse_robot + def test_glob_robot_duplicate_automation_ids(self): + """Test Robot Framework glob pattern with duplicate automation_ids.""" + env = Environment() + env.case_matcher = MatchersParser.AUTO + env.file = Path(__file__).parent / "test_data/XML/testglob_robot/*.xml" + + # Check if test files exist + if not list(Path(__file__).parent.glob("test_data/XML/testglob_robot/*.xml")): + pytest.skip("Robot test data not available") + + parser = RobotParser(env) + parsed_suites = parser.parse_file() + suite = parsed_suites[0] + + # Similar verification as JUnit tests + data_provider = ApiDataProvider(suite) + cases_to_add = data_provider.add_cases() + + # Verify deduplication occurred if there were duplicates + total_cases = sum(len(section.testcases) for section in suite.testsections) + automation_ids = [c.custom_automation_id for c in cases_to_add if c.custom_automation_id] + + # Cases to add should have unique automation_ids + assert len(automation_ids) == len(set(automation_ids)), "Cases to add should have unique automation_ids" + + # Clean up merged file + merged_file = Path.cwd() / "Merged-Robot-report.xml" + if merged_file.exists(): + merged_file.unlink() + @pytest.mark.parse_cucumber def test_cucumber_glob_filepath_not_pattern(self): """Test Scenario 3: Cucumber glob pattern uses correct filepath (not pattern string). diff --git a/tests/test_robot_parser.py b/tests/test_robot_parser.py index ca73dc8..02a7c27 100644 --- a/tests/test_robot_parser.py +++ b/tests/test_robot_parser.py @@ -76,3 +76,111 @@ def test_robot_xml_parser_file_not_found(self): env = Environment() env.file = Path(__file__).parent / "not_found.xml" RobotParser(env) + + @pytest.mark.parse_robot + def test_robot_xml_parser_glob_pattern_single_file(self): + """Test glob pattern that matches single file""" + env = Environment() + env.case_matcher = MatchersParser.AUTO + # Use glob pattern that matches only one file + env.file = Path(__file__).parent / "test_data/XML/robotframework_simple_RF50.xml" + + # This should work just like a regular file path + file_reader = RobotParser(env) + result = file_reader.parse_file() + + assert len(result) == 1 + assert isinstance(result[0], TestRailSuite) + # Verify it has test sections and cases + assert len(result[0].testsections) > 0 + + @pytest.mark.parse_robot + def test_robot_xml_parser_glob_pattern_multiple_files(self): + """Test glob pattern that matches multiple files and merges them""" + env = Environment() + env.case_matcher = MatchersParser.AUTO + # Use glob pattern that matches multiple Robot XML files + env.file = Path(__file__).parent / "test_data/XML/testglob_robot/*.xml" + + file_reader = RobotParser(env) + result = file_reader.parse_file() + + # Should return a merged result + assert len(result) == 1 + assert isinstance(result[0], TestRailSuite) + + # Verify merged file was created + merged_file = Path.cwd() / "Merged-Robot-report.xml" + assert merged_file.exists(), "Merged Robot report should be created" + + # Verify the merged result contains test cases from both files + total_cases = sum(len(section.testcases) for section in result[0].testsections) + assert total_cases > 0, "Merged result should contain test cases" + + # Clean up merged file + if merged_file.exists(): + merged_file.unlink() + + @pytest.mark.parse_robot + def test_robot_xml_parser_glob_pattern_no_matches(self): + """Test glob pattern that matches no files""" + with pytest.raises(FileNotFoundError): + env = Environment() + env.case_matcher = MatchersParser.AUTO + # Use glob pattern that matches no files + env.file = Path(__file__).parent / "test_data/XML/nonexistent_*.xml" + RobotParser(env) + + @pytest.mark.parse_robot + def test_robot_check_file_glob_returns_path(self): + """Test that check_file method returns valid Path for glob pattern""" + # Test single file match + single_file_glob = Path(__file__).parent / "test_data/XML/robotframework_simple_RF50.xml" + result = RobotParser.check_file(single_file_glob) + assert isinstance(result, Path) + assert result.exists() + + # Test multiple file match (returns merged file path) + multi_file_glob = Path(__file__).parent / "test_data/XML/testglob_robot/*.xml" + result = RobotParser.check_file(multi_file_glob) + assert isinstance(result, Path) + assert result.name == "Merged-Robot-report.xml" + assert result.exists() + + # Clean up + if result.exists() and result.name == "Merged-Robot-report.xml": + result.unlink() + + @pytest.mark.parse_robot + def test_robot_xml_parser_glob_merges_duplicate_sections(self): + """Test that glob pattern merging handles duplicate section names correctly. + + When multiple Robot XML files have the same suite structure, sections with + the same name should be merged into one section with all test cases combined. + This prevents the "Section duplicates detected" error. + """ + env = Environment() + env.case_matcher = MatchersParser.AUTO + env.file = Path(__file__).parent / "test_data/XML/testglob_robot/*.xml" + + file_reader = RobotParser(env) + result = file_reader.parse_file() + + assert len(result) == 1 + suite = result[0] + + # Verify no duplicate section names + section_names = [section.name for section in suite.testsections] + unique_section_names = set(section_names) + + assert len(section_names) == len(unique_section_names), f"Duplicate section names detected: {section_names}" + + # Verify sections have combined test cases from both files + # Both robot-1.xml and robot-2.xml have same structure, so sections should have tests from both + total_cases = sum(len(section.testcases) for section in suite.testsections) + assert total_cases > 4, "Sections should contain test cases from both merged files" + + # Clean up merged file + merged_file = Path.cwd() / "Merged-Robot-report.xml" + if merged_file.exists(): + merged_file.unlink() diff --git a/trcli/readers/robot_xml.py b/trcli/readers/robot_xml.py index 0ef44e2..72e5088 100644 --- a/trcli/readers/robot_xml.py +++ b/trcli/readers/robot_xml.py @@ -1,6 +1,8 @@ from datetime import datetime, timedelta -from beartype.typing import List +from beartype.typing import List, Union +from pathlib import Path from xml.etree import ElementTree +import glob from trcli.backports import removeprefix from trcli.cli import Environment @@ -21,6 +23,54 @@ def __init__(self, environment: Environment): super().__init__(environment) self.case_matcher = environment.case_matcher + @staticmethod + def check_file(filepath: Union[str, Path]) -> Path: + """Check and process file path, supporting glob patterns. + + If the filepath contains glob patterns (*, ?, []), expand them: + - Single file match: Return that file path + - Multiple file matches: Merge the files and return merged file path + - No matches: Raise FileNotFoundError + """ + filepath = Path(filepath) + + # Check if this is a glob pattern (contains wildcards) + filepath_str = str(filepath) + if any(char in filepath_str for char in ["*", "?", "["]): + # Expand glob pattern + files = glob.glob(filepath_str, recursive=True) + + if not files: + raise FileNotFoundError(f"File not found: {filepath}") + elif len(files) == 1: + # Single file match - return it directly + return Path().cwd().joinpath(files[0]) + else: + # Multiple files - merge them + merged_root = ElementTree.Element("robot", generator="Robot 7.0 (merged)") + + for file_path in files: + tree = ElementTree.parse(file_path) + root = tree.getroot() + + # Merge all elements from each file + for suite in root.findall("suite"): + merged_root.append(suite) + + # Write merged XML to a file + merged_tree = ElementTree.ElementTree(merged_root) + merged_file_path = Path.cwd() / "Merged-Robot-report.xml" + + # Use UTF-8 encoding explicitly + merged_tree.write(merged_file_path, encoding="utf-8", xml_declaration=True) + + return merged_file_path + else: + # Not a glob pattern - use parent class behavior + if not filepath.is_file(): + raise FileNotFoundError(f"File not found: {filepath}") + return filepath + def parse_file(self) -> List[TestRailSuite]: self.env.log(f"Parsing Robot Framework report.") tree = ElementTree.parse(self.filepath) @@ -46,8 +96,14 @@ def _find_suites(self, suite_element, sections_list: List, namespace=""): namespace += f".{name}" if namespace else name tests = suite_element.findall("test") if tests: - section = TestRailSection(namespace) - sections_list.append(section) + # Check if section with this namespace already exists (for merged files with duplicate suites) + section = next((s for s in sections_list if s.name == namespace), None) + if section is None: + # Create new section if it doesn't exist + section = TestRailSection(namespace) + sections_list.append(section) + # else: reuse existing section and add tests to it + for test in tests: case_id = None case_name = test.get("name") From 8b22078504c2ad851c697a2a04d23623c52265a5 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 19 Mar 2026 16:46:56 +0800 Subject: [PATCH 5/6] TRCLI-162: Accept changes from last release to changelog --- CHANGELOG.MD | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 1ec6105..71ddcfa 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -8,12 +8,11 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb ## [1.13.4] -_released 03-17-2026 +_released 03-18-2026 ### Added - - Extended glob support for robot parser - - Added `--clear-run-assigned-to-id` flag to `add_run` command for clearing test run assignees during updates - - Added `multisuite` special-parser for creating cross-suite test plans from JUnit reports (automatically groups test cases by suite and creates one test run per suite within a single test plan; supports `--plan-id` for adding to existing plans) + - Extended glob support for robot parser for efficient multiple test result file processing + - Added `--run-assigned-to-id` and `--clear-run-assigned-to-id` options to `add_run` command for clearing and setting test run assignees during run creation and updates ### Fixed - Cannot add empty runs via add_run command due to empty test case validation. From 20a1417ce38f7db54e211ad56b454b9b9bf1543b Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 19 Mar 2026 16:10:02 +0800 Subject: [PATCH 6/6] Updated version for v1.14.0 release --- CHANGELOG.MD | 11 +++++++++++ README.md | 8 ++++---- trcli/__init__.py | 2 +- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 71ddcfa..435b113 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -6,6 +6,17 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb - **MINOR**: New features that are backward-compatible. - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility. + +## [1.14.0] + +_released 04-01-2026 + +### Added + - + +### Fixed + - + ## [1.13.4] _released 03-18-2026 diff --git a/README.md b/README.md index f50d350..5cdab6b 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.13.4 +TestRail CLI v1.14.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) @@ -51,7 +51,7 @@ CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.13.4 +TestRail CLI v1.14.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -1675,7 +1675,7 @@ Options: ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.13.4 +TestRail CLI v1.14.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -1834,7 +1834,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.13.4 +TestRail CLI v1.14.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] diff --git a/trcli/__init__.py b/trcli/__init__.py index f7d8b10..b9f68ed 100644 --- a/trcli/__init__.py +++ b/trcli/__init__.py @@ -1 +1 @@ -__version__ = "1.13.4" +__version__ = "1.14.0"