From 305e950bf5df70223e70ba086b9a46935e54dcfc Mon Sep 17 00:00:00 2001 From: Embedded DevOps Date: Mon, 6 Jan 2025 20:57:17 -0800 Subject: [PATCH] Revert "Enhanced coverage processing (#2) (#254)" This reverts commit fa343e254fab6488f4115644882f3926bb4ebdf6. --- cover_agent/UnitTestValidator.py | 1 + cover_agent/coverage/processor.py | 255 ++++++++++++++++-------------- cover_agent/version.txt | 2 +- tests/coverage/test_processor.py | 164 ++++--------------- tests/test_UnitTestValidator.py | 19 +-- 5 files changed, 182 insertions(+), 259 deletions(-) diff --git a/cover_agent/UnitTestValidator.py b/cover_agent/UnitTestValidator.py index 1bf089e2f..0a92220da 100644 --- a/cover_agent/UnitTestValidator.py +++ b/cover_agent/UnitTestValidator.py @@ -681,6 +681,7 @@ def post_process_coverage_report(self, time_of_test_command: int): report_path=self.code_coverage_report_path, src_file_path=self.source_file_path, is_global_coverage_enabled=self.use_report_coverage_feature_flag, + file_pattern=None, diff_coverage_report_path=self.diff_cover_report_path, ) self.logger.info( diff --git a/cover_agent/coverage/processor.py b/cover_agent/coverage/processor.py index d9b022f4a..bd6d46a24 100644 --- a/cover_agent/coverage/processor.py +++ b/cover_agent/coverage/processor.py @@ -26,7 +26,6 @@ class CoverageData: missed (int) : The number of lines that are not covered by tests. coverage (float) : The coverage percentage of the file or class. """ - is_target_file: bool covered_lines: List[int] covered: int missed_lines: List[int] @@ -52,19 +51,6 @@ class CoverageReport: total_coverage: float file_coverage: Dict[str, CoverageData] - def filter_to_target_coverage(self) -> "CoverageReport": - """ - Returns a new CoverageReport object with only the target file's coverage data. - """ - target_coverage = { - file: coverage - for file, coverage in self.file_coverage.items() - if coverage.is_target_file - } - total_lines = sum(len(cov.covered_lines) + len(cov.missed_lines) for cov in target_coverage.values()) - total_coverage = (sum(len(cov.covered_lines) for cov in target_coverage.values()) / total_lines) if total_lines > 0 else 0.0 - return CoverageReport(total_coverage, target_coverage) - class CoverageProcessor(ABC): """ Abstract base class for processing coverage reports. @@ -116,12 +102,14 @@ def process_coverage_report(self, time_of_test_command: int) -> CoverageReport: Processes the coverage report and returns the coverage data. """ self._is_coverage_valid(time_of_test_command=time_of_test_command) - coverage_data = self.parse_coverage_report() - total_covered = sum(cov.covered for cov in coverage_data.values()) - total_missed = sum(cov.missed for cov in coverage_data.values()) - total_lines = total_covered + total_missed - total_coverage = (total_covered / total_lines) if total_lines > 0 else 0.0 - return CoverageReport(total_coverage, coverage_data) + coverage = self.parse_coverage_report() + report = CoverageReport(0.0, coverage) + if coverage: + total_covered = sum(cov.covered for cov in coverage.values()) + total_missed = sum(cov.missed for cov in coverage.values()) + total_lines = total_covered + total_missed + report.total_coverage = (float(total_covered) / float(total_lines)) if total_lines > 0 else 0.0 + return report def _is_coverage_valid( self, time_of_test_command: int @@ -161,7 +149,7 @@ def _is_report_obsolete(self, time_of_test_command: int) -> bool: bool: True if the report is obsolete, False otherwise. """ return int(round(os.path.getmtime(self.file_path) * 1000)) < time_of_test_command - + class CoberturaProcessor(CoverageProcessor): """ A class to process Cobertura code coverage reports. @@ -172,28 +160,14 @@ def parse_coverage_report(self) -> Dict[str, CoverageData]: tree = ET.parse(self.file_path) root = tree.getroot() coverage = {} - for package in root.findall(".//package"): - # Package name could be '.' if the class is in the default package - # Eg: - # In such cases, lets use default as the package name. - package_name = package.get("name", ".") - if package_name == ".": - package_name = "default" - for cls in package.findall(".//class"): - # In languages where Class is not a first class citizen, - # the class name is set to the file name as you can see - # in the below example from the Cobertura XML report. - # Usually this could be your util files. So we are good - # to consier name as the key for the CoverageData. - # Eg: - class_name = cls.get("name", "") - fully_qualified_name = f"{package_name}.{class_name}".strip('.') - coverage[fully_qualified_name] = self._parse_class_coverage(cls) + for cls in root.findall(".//class"): + cls_filename = cls.get("filename") + if cls_filename: + coverage[cls_filename] = self._parse_coverage_data_for_class(cls) return coverage - def _parse_class_coverage(self, cls) -> CoverageData: - lines_covered = [] - lines_missed = [] + def _parse_coverage_data_for_class(self, cls) -> CoverageData: + lines_covered, lines_missed = [], [] for line in cls.findall(".//line"): line_number = int(line.get("number")) hits = int(line.get("hits")) @@ -202,12 +176,9 @@ def _parse_class_coverage(self, cls) -> CoverageData: else: lines_missed.append(line_number) total_lines = len(lines_covered) + len(lines_missed) - coverage = (len(lines_covered) / total_lines) if total_lines > 0 else 0.0 - is_target = False - if self.src_file_path.endswith(cls.get("filename")): - is_target = True - return CoverageData(is_target, lines_covered, len(lines_covered), lines_missed, len(lines_missed), coverage) - + coverage_percentage = (float(len(lines_covered)) / total_lines) if total_lines > 0 else 0.0 + return CoverageData(lines_covered, len(lines_covered), lines_missed, len(lines_missed), coverage_percentage) + class LcovProcessor(CoverageProcessor): """ A class to process LCOV code coverage reports. @@ -235,10 +206,7 @@ def parse_coverage_report(self) -> Dict[str, CoverageData]: break total_lines = len(lines_covered) + len(lines_missed) coverage_percentage = (float(len(lines_covered)) / total_lines) if total_lines > 0 else 0.0 - is_target = False - if filename == self.src_file_path: - is_target = True - coverage[filename] = CoverageData(is_target, lines_covered, len(lines_covered), lines_missed, len(lines_missed), coverage_percentage) + coverage[filename] = CoverageData(lines_covered, len(lines_covered), lines_missed, len(lines_missed), coverage_percentage) except (FileNotFoundError, IOError) as e: self.logger.error(f"Error reading file {self.file_path}: {e}") raise @@ -254,61 +222,85 @@ class JacocoProcessor(CoverageProcessor): reports in both XML and CSV formats. """ def parse_coverage_report(self) -> Dict[str, CoverageData]: - extension = os.path.splitext(self.file_path)[1].lower() - if extension == ".xml": - return self._parse_xml() - elif extension == ".csv": - return self._parse_csv() + coverage = {} + package_name, class_name = self._extract_package_and_class_java() + file_extension = self._get_file_extension(self.file_path) + if file_extension == 'xml': + missed, covered = self._parse_jacoco_xml(class_name=class_name) + elif file_extension == 'csv': + missed, covered = self._parse_jacoco_csv(package_name=package_name, class_name=class_name) else: - raise ValueError(f"Unsupported JaCoCo report format: {extension}") + raise ValueError(f"Unsupported JaCoCo code coverage report format: {file_extension}") + total_lines = missed + covered + coverage_percentage = (float(covered) / total_lines) if total_lines > 0 else 0.0 + coverage[class_name] = CoverageData(covered_lines=[], covered=covered, missed_lines=[], missed=missed, coverage=coverage_percentage) + return coverage + + def _get_file_extension(self, filename: str) -> str | None: + """Get the file extension from a given filename.""" + return os.path.splitext(filename)[1].lstrip(".") - def _parse_xml(self) -> Dict[str, CoverageData]: + def _extract_package_and_class_java(self): + package_pattern = re.compile(r"^\s*package\s+([\w\.]+)\s*;.*$") + class_pattern = re.compile(r"^\s*public\s+class\s+(\w+).*") + + package_name = "" + class_name = "" + try: + with open(self.src_file_path, "r") as file: + for line in file: + if not package_name: # Only match package if not already found + package_match = package_pattern.match(line) + if package_match: + package_name = package_match.group(1) + + if not class_name: # Only match class if not already found + class_match = class_pattern.match(line) + if class_match: + class_name = class_match.group(1) + + if package_name and class_name: # Exit loop if both are found + break + except (FileNotFoundError, IOError) as e: + self.logger.error(f"Error reading file {self.src_file_path}: {e}") + raise + + return package_name, class_name + + def _parse_jacoco_xml( + self, class_name: str + ) -> tuple[int, int]: """Parses a JaCoCo XML code coverage report to extract covered and missed line numbers for a specific file.""" tree = ET.parse(self.file_path) root = tree.getroot() - coverage = {} - for package in root.findall(".//package"): - package_name = package.get("name", "") - for cls in package.findall(".//class"): - class_name = cls.get("sourcefilename", "") - fully_qualified_name = f"{package_name}.{class_name}".replace("/", ".") - missed = 0 - covered = 0 - for counter in cls.findall('counter'): - if counter.attrib.get('type') == 'LINE': - missed += int(counter.attrib.get('missed', 0)) - covered += int(counter.attrib.get('covered', 0)) - break - total_lines = covered + missed - coverage_percentage = (covered / total_lines) if total_lines > 0 else 0.0 - is_target = False - src_path = cls.get("name", "") - if f"{src_path}/{class_name}" == self.src_file_path: - is_target = True - # TODO: Add support for identifying which lines are covered and missed - coverage[fully_qualified_name] = CoverageData(is_target, [], covered, [], missed, coverage_percentage) - return coverage + sourcefile = root.find(f".//sourcefile[@name='{class_name}.java']") - def _parse_csv(self) -> Dict[str, CoverageData]: - coverage = {} - with open(self.file_path, "r") as csvfile: - reader = csv.DictReader(csvfile) + if sourcefile is None: + return 0, 0 + + missed, covered = 0, 0 + for counter in sourcefile.findall('counter'): + if counter.attrib.get('type') == 'LINE': + missed += int(counter.attrib.get('missed', 0)) + covered += int(counter.attrib.get('covered', 0)) + break + + return missed, covered + def _parse_jacoco_csv(self, package_name, class_name) -> Dict[str, CoverageData]: + with open(self.file_path, "r") as file: + reader = csv.DictReader(file) + missed, covered = 0, 0 for row in reader: - group = row.get("GROUP", "").strip() - package_name = row.get("PACKAGE", "").strip() - class_name = row.get("CLASS", "").strip() - fully_qualified_name = f"{group}.{package_name}.{class_name}".strip('.') - - covered = int(row.get("LINE_COVERED", 0)) - missed = int(row.get("LINE_MISSED", 0)) - total = covered + missed - coverage_percentage = (covered / total) if total > 0 else 0.0 - is_target = False - src_path = package_name.replace(".", "/") - if f"{src_path}/{class_name}" == self.src_file_path: - is_target = True - coverage[fully_qualified_name] = CoverageData(is_target, [], covered, [], missed, coverage_percentage) - return coverage + if row["PACKAGE"] == package_name and row["CLASS"] == class_name: + try: + missed = int(row["LINE_MISSED"]) + covered = int(row["LINE_COVERED"]) + break + except KeyError as e: + self.logger.error(f"Missing expected column in CSV: {e}") + raise + + return missed, covered class DiffCoverageProcessor(CoverageProcessor): """ @@ -370,18 +362,45 @@ def parse_coverage_report(self) -> Dict[str, CoverageData]: violation_lines = [] coverage_percentage = 0.0 - # Consider every file as target file during diff coverage - coverage[self.file_path] = CoverageData(is_target_file=True, covered_lines=covered_lines, covered=len(covered_lines), missed_lines=violation_lines,missed=len(violation_lines), coverage=coverage_percentage) + coverage[self.file_path] = CoverageData(covered_lines=covered_lines, covered=len(covered_lines), missed_lines=violation_lines,missed=len(violation_lines), coverage=coverage_percentage) return coverage +class CoverageReportFilter: + """ + A class to filter coverage reports based on + file patterns. This class abstracts the logic + for filtering coverage reports based on file + patterns. + """ + def filter_report(self, report: CoverageReport, file_pattern: str) -> CoverageReport: + """ + Filters the coverage report based on the specified file pattern. + + Args: + report (CoverageReport): The coverage report to filter. + file_pattern (str): The file pattern to filter by. + + Returns: + CoverageReport: The filtered coverage report. + """ + filtered_coverage = { + file: coverage + for file, coverage in report.file_coverage.items() + if file_pattern in file + } + total_lines = sum(len(cov.covered_lines) + len(cov.missed_lines) for cov in filtered_coverage.values()) + total_coverage = (sum(len(cov.covered_lines) for cov in filtered_coverage.values()) / total_lines) if total_lines > 0 else 0.0 + return CoverageReport(total_coverage = total_coverage, file_coverage=filtered_coverage) + class CoverageProcessorFactory: """Factory for creating coverage processors based on tool type.""" + @staticmethod def create_processor( tool_type: str, report_path: str, src_file_path: str, - diff_report_path: Optional[str] = None + diff_coverage_report_path: Optional[str] = None ) -> CoverageProcessor: """ Creates appropriate coverage processor instance. @@ -397,21 +416,17 @@ def create_processor( Raises: ValueError: If invalid tool type specified """ - processor_map = { - "cobertura": CoberturaProcessor, - "lcov": LcovProcessor, - "jacoco": JacocoProcessor, - "diff_cover_json": DiffCoverageProcessor, + processors = { + 'cobertura': CoberturaProcessor, + 'jacoco': JacocoProcessor, + 'lcov': LcovProcessor, + 'diff_cover_json': DiffCoverageProcessor } - if tool_type.lower() not in processor_map: - raise ValueError(f"Unsupported tool type: {tool_type}") - - if tool_type.lower() == "diff_cover_json": - if not diff_report_path: - raise ValueError("Diff report path must be provided for diff processor.") - return DiffCoverageProcessor(report_path, src_file_path, diff_report_path) - - return processor_map[tool_type.lower()](report_path, src_file_path) + if tool_type.lower() not in processors: + raise ValueError(f"Invalid coverage type specified: {tool_type}") + if tool_type.lower() == 'diff_cover_json': + return DiffCoverageProcessor(diff_coverage_report_path, report_path, src_file_path) + return processors[tool_type.lower()](report_path, src_file_path) def process_coverage( tool_type: str, @@ -419,6 +434,7 @@ def process_coverage( report_path: str, src_file_path: str, is_global_coverage_enabled: bool = True, + file_pattern: Optional[str] = None, diff_coverage_report_path: Optional[str] = None ) -> CoverageReport: # Create appropriate processor @@ -430,5 +446,8 @@ def process_coverage( if is_global_coverage_enabled: return report - # If global coverage is disabled, filter to target coverage - return report.filter_to_target_coverage() \ No newline at end of file + # Apply filtering if needed + if file_pattern: + filter = CoverageReportFilter() + report = filter.filter_report(report, file_pattern) + return report \ No newline at end of file diff --git a/cover_agent/version.txt b/cover_agent/version.txt index 645604fb1..f21e5a1f2 100644 --- a/cover_agent/version.txt +++ b/cover_agent/version.txt @@ -1 +1 @@ -0.2.14 \ No newline at end of file +0.2.13 \ No newline at end of file diff --git a/tests/coverage/test_processor.py b/tests/coverage/test_processor.py index 48995191f..7b967f6d5 100644 --- a/tests/coverage/test_processor.py +++ b/tests/coverage/test_processor.py @@ -9,6 +9,7 @@ LcovProcessor, CoverageData, CoverageReport, + CoverageReportFilter, DiffCoverageProcessor ) from unittest.mock import patch, MagicMock @@ -54,16 +55,17 @@ def test_process_coverage_report_file_not_found(self, mock_exists): class TestCoverageReportFilter: def test_filter_report_with_file_pattern(self): coverage_data = { - 'file1.java': CoverageData(True, [1, 2, 3], 3, [4, 5], 2, 0.6), - 'file2.java': CoverageData(False, [1, 2], 2, [3, 4, 5], 3, 0.4), - 'test_file.java': CoverageData(False, [1], 1, [2, 3, 4, 5], 4, 0.2) + 'file1.java': CoverageData([1, 2, 3], 3, [4, 5], 2, 0.6), + 'file2.java': CoverageData([1, 2], 2, [3, 4, 5], 3, 0.4), + 'test_file.java': CoverageData([1], 1, [2, 3, 4, 5], 4, 0.2) } report = CoverageReport(total_coverage=0.5, file_coverage=coverage_data) - filtered_report = report.filter_to_target_coverage() + filter = CoverageReportFilter() + filtered_report = filter.filter_report(report, 'test_file') assert len(filtered_report.file_coverage) == 1 - assert 'file1.java' in filtered_report.file_coverage - assert filtered_report.total_coverage == 0.6 + assert 'test_file.java' in filtered_report.file_coverage + assert filtered_report.total_coverage == 0.2 @pytest.fixture def mock_xml_tree(monkeypatch): @@ -74,9 +76,9 @@ def mock_parse(file_path): # Mock XML structure for the test xml_str = """ - + - + @@ -105,7 +107,7 @@ def test_create_processor_lcov(self): assert isinstance(processor, LcovProcessor), "Expected LcovProcessor instance" def test_create_processor_unsupported_type(self): - with pytest.raises(ValueError, match="Unsupported tool type: unsupported_type"): + with pytest.raises(ValueError, match="Invalid coverage type specified: unsupported_type"): CoverageProcessorFactory.create_processor("unsupported_type", "fake_path", "app.py") class TestCoverageProcessor: @@ -138,8 +140,8 @@ def test_process_valid_coverage_data(self, mocker): # Arrange time_of_test = 123456 coverage_data = { - "file1.py": CoverageData(is_target_file=False, covered_lines=[], covered=80, missed_lines=[], missed=20, coverage=0.8), - "file2.py": CoverageData(is_target_file=False, covered_lines=[], covered=60, missed_lines=[], missed=40, coverage=0.6) + "file1.py": CoverageData(covered_lines=[], covered=80, missed_lines=[], missed=20, coverage=0.8), + "file2.py": CoverageData(covered_lines=[], covered=60, missed_lines=[], missed=40, coverage=0.6) } processor = CoverageProcessorFactory.create_processor("cobertura", "fake_path", "app.py") @@ -159,8 +161,8 @@ def test_process_zero_lines_coverage(self, mocker): # Arrange time_of_test = 123456 coverage_data = { - "file1.py": CoverageData(is_target_file=False, covered_lines=[], covered=0, missed_lines=[], missed=0, coverage=0.0), - "file2.py": CoverageData(is_target_file=False, covered_lines=[], covered=0, missed_lines=[], missed=0, coverage=0.0) + "file1.py": CoverageData(covered_lines=[], covered=0, missed_lines=[], missed=0, coverage=0.0), + "file2.py": CoverageData(covered_lines=[], covered=0, missed_lines=[], missed=0, coverage=0.0) } processor = CoverageProcessorFactory.create_processor("cobertura", "fake_path", "app.py") @@ -186,51 +188,12 @@ def test_parse_coverage_report_cobertura(self, mock_xml_tree, processor): Tests the parse_coverage_report method for correct line number and coverage calculation with Cobertura reports. """ coverage = processor.parse_coverage_report() - print(coverage) assert len(coverage) == 1, "Expected coverage data for one file" - assert coverage["default.app.py"].covered_lines == [1], "Should list line 1 as covered" - assert coverage["default.app.py"].covered == 1, "Should have 1 line as covered" - assert coverage["default.app.py"].missed_lines == [2], "Should list line 2 as missed" - assert coverage["default.app.py"].missed == 1, "Should have 1 line as missed" - assert coverage["default.app.py"].coverage == 0.5, "Coverage should be 50 percent" - assert coverage["default.app.py"].is_target_file == True, "Should be a target file" - - def test_parse_non_target_coverage(self, mocker): - # Arrange - xml_content = ''' - - - - - - - - - - - - - - - ''' - mock_file = mocker.mock_open(read_data='class Other:') - mocker.patch('builtins.open', mock_file) - mocker.patch('xml.etree.ElementTree.parse', return_value=ET.ElementTree(ET.fromstring(xml_content))) - processor = CoberturaProcessor('coverage.xml', 'app.py') - - # Act - coverage_data = processor.parse_coverage_report() - print(coverage_data) - - # Assert - assert len(coverage_data) == 1 - assert 'default.other.py' in coverage_data - assert coverage_data['default.other.py'].missed == 1 - assert coverage_data['default.other.py'].missed_lines == [2] - assert coverage_data['default.other.py'].covered == 1 - assert coverage_data['default.other.py'].covered_lines == [1] - assert coverage_data['default.other.py'].coverage == 0.5 - assert coverage_data['default.other.py'].is_target_file == False + assert coverage["app.py"].covered_lines == [1], "Should list line 1 as covered" + assert coverage["app.py"].covered == 1, "Should have 1 line as covered" + assert coverage["app.py"].missed_lines == [2], "Should list line 2 as missed" + assert coverage["app.py"].missed == 1, "Should have 1 line as missed" + assert coverage["app.py"].coverage == 0.5, "Coverage should be 50 percent" class TestLcovProcessor: # Parse LCOV file with single source file containing covered and uncovered lines @@ -258,7 +221,6 @@ def test_parse_lcov_file_with_covered_and_uncovered_lines(self, tmp_path): assert coverage_data.covered == 2 assert coverage_data.missed == 1 assert coverage_data.coverage == 2/3 - assert coverage_data.is_target_file == True # Handle malformed LCOV file with missing end_of_record def test_parse_malformed_lcov_missing_end_record(self, tmp_path): @@ -285,45 +247,16 @@ def test_parse_malformed_lcov_missing_end_record(self, tmp_path): assert coverage_data.missed == 1 assert coverage_data.coverage == 2/3 - # Parse LCOV file with multiple source file containing covered and uncovered lines - def test_parse_lcov_file_with_multiple_covered_and_uncovered_lines(self, tmp_path): - # Arrange - lcov_content = """SF:src/file1.py - DA:1,1 - DA:2,0 - DA:3,1 - end_of_record - SF:src/file2.py - DA:1,0 - DA:2,1 - DA:3,1 - end_of_record""" - lcov_file = tmp_path / "coverage.lcov" - lcov_file.write_text(lcov_content) - - processor = LcovProcessor(str(lcov_file), "src/file1.py") - - # Act - result = processor.parse_coverage_report() - print(result) - # Assert - assert len(result) == 2 - assert "src/file1.py" in result - coverage_data = result["src/file1.py"] - assert coverage_data.is_target_file == True - other_coverage_data = result["src/file2.py"] - assert other_coverage_data.is_target_file == False - class TestJacocoProcessor: # Successfully parse XML JaCoCo report and extract coverage data def test_parse_xml_coverage_report_success(self, mocker): # Arrange xml_content = ''' - - + + - + ''' @@ -331,52 +264,17 @@ def test_parse_xml_coverage_report_success(self, mocker): mocker.patch('builtins.open', mock_file) mocker.patch('xml.etree.ElementTree.parse', return_value=ET.ElementTree(ET.fromstring(xml_content))) - processor = JacocoProcessor('coverage.xml', 'com/example/MyClass.java') + processor = JacocoProcessor('coverage.xml', 'MyClass.java') # Act coverage_data = processor.parse_coverage_report() # Assert assert len(coverage_data) == 1 - assert 'com.example.MyClass.java' in coverage_data - assert coverage_data['com.example.MyClass.java'].missed == 5 - assert coverage_data['com.example.MyClass.java'].covered == 15 - assert coverage_data['com.example.MyClass.java'].coverage == 0.75 - assert coverage_data['com.example.MyClass.java'].is_target_file == True - - # Successfully parse XML JaCoCo report with multiple files and extract coverage data - def test_parse_xml_coverage_report_multi_files(self, mocker): - # Arrange - xml_content = ''' - - - - - - - - - - ''' - - mock_file = mocker.mock_open(read_data='package com.example;\npublic class MyClass {') - mocker.patch('builtins.open', mock_file) - mocker.patch('xml.etree.ElementTree.parse', return_value=ET.ElementTree(ET.fromstring(xml_content))) - - processor = JacocoProcessor('coverage.xml', 'com/example/MyClass.java') - - # Act - coverage_data = processor.parse_coverage_report() - - # Assert - assert len(coverage_data) == 2 - assert 'com.example.MyClass.java' in coverage_data - assert coverage_data['com.example.MyClass.java'].is_target_file == True - assert 'com.example.Other.java' in coverage_data - assert coverage_data['com.example.Other.java'].missed == 2 - assert coverage_data['com.example.Other.java'].covered == 20 - assert coverage_data['com.example.Other.java'].coverage == 0.9090909090909091 - assert coverage_data['com.example.Other.java'].is_target_file == False + assert 'MyClass' in coverage_data + assert coverage_data['MyClass'].missed == 5 + assert coverage_data['MyClass'].covered == 15 + assert coverage_data['MyClass'].coverage == 0.75 # Handle empty or malformed XML/CSV coverage reports def test_parse_empty_xml_coverage_report(self, mocker): @@ -397,7 +295,11 @@ def test_parse_empty_xml_coverage_report(self, mocker): coverage_data = processor.parse_coverage_report() # Assert - assert len(coverage_data) == 0 + assert len(coverage_data) == 1 + assert 'MyClass' in coverage_data + assert coverage_data['MyClass'].missed == 0 + assert coverage_data['MyClass'].covered == 0 + assert coverage_data['MyClass'].coverage == 0.0 class TestDiffCoverageProcessor: # Successfully parse JSON diff coverage report and extract coverage data for matching file path diff --git a/tests/test_UnitTestValidator.py b/tests/test_UnitTestValidator.py index 0a271f104..3e34023f4 100644 --- a/tests/test_UnitTestValidator.py +++ b/tests/test_UnitTestValidator.py @@ -36,7 +36,7 @@ def test_run_coverage_with_report_coverage_flag(self): use_report_coverage_feature_flag=True ) with patch.object(Runner, 'run_command', return_value=("", "", 0, datetime.datetime.now())): - with patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=1.0, file_coverage={'test.py': CoverageData(False, [], 0, [], 0, 1.0)})): + with patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=1.0, file_coverage={'test.py': CoverageData([], 0, [], 0, 1.0)})): generator.run_coverage() # Dividing by zero so we're expecting a logged error and a return of 0 assert generator.current_coverage_report.total_coverage == 1.0 @@ -80,7 +80,7 @@ def test_validate_test_pass_no_coverage_increase_with_prompt(self): ) # Setup initial state - generator.current_coverage_report = CoverageReport(total_coverage=0.5, file_coverage={'test.py': CoverageData(False, [], 0, [], 0, 0.0)}) + generator.current_coverage_report = CoverageReport(total_coverage=0.5, file_coverage={'test.py': CoverageData([], 0, [], 0, 0.0)}) generator.test_headers_indentation = 4 generator.relevant_line_number_to_insert_tests_after = 100 generator.relevant_line_number_to_insert_imports_after = 10 @@ -97,7 +97,7 @@ def test_validate_test_pass_no_coverage_increase_with_prompt(self): with patch("builtins.open", mock_file), \ patch.object(Runner, 'run_command', return_value=("", "", 0, datetime.datetime.now())), \ - patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.4, file_coverage={'test.py': CoverageData(False, [], 0, [], 0, 0.0)})): + patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.4, file_coverage={'test.py': CoverageData([], 0, [], 0, 0.0)})): result = generator.validate_test(test_to_validate) @@ -148,10 +148,11 @@ def test_post_process_coverage_report_with_report_coverage_flag(self): llm_model="gpt-3", use_report_coverage_feature_flag=True ) - with patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=1.0, file_coverage={'test.py': CoverageData(False, [1], 1, [1], 1, 1.0)})): + # patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.4, file_coverage={'test.py': CoverageData([], 0, [], 0, 0.0)})): + with patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=1.0, file_coverage={'test.py': CoverageData([1], 1, [1], 1, 1.0)})): coverage_report = generator.post_process_coverage_report(datetime.datetime.now()) assert coverage_report.total_coverage == 1.0 - assert coverage_report.file_coverage == {'test.py': CoverageData(False, [1], 1, [1], 1, 1.0)} + assert coverage_report.file_coverage == {'test.py': CoverageData([1], 1, [1], 1, 1.0)} def test_post_process_coverage_report_with_diff_coverage(self): with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp_source_file: @@ -164,9 +165,9 @@ def test_post_process_coverage_report_with_diff_coverage(self): diff_coverage=True ) with patch.object(generator, 'generate_diff_coverage_report'), \ - patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.5, file_coverage={'test.py': CoverageData(True, [1], 1, [2], 1, 0.5)})): + patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.8, file_coverage={'test.py': CoverageData([1], 1, [1], 1, 1.0)})): coverage_report = generator.post_process_coverage_report(datetime.datetime.now()) - assert coverage_report.total_coverage == 0.5 + assert coverage_report.total_coverage == 0.8 def test_post_process_coverage_report_without_flags(self): with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp_source_file: @@ -177,9 +178,9 @@ def test_post_process_coverage_report_without_flags(self): test_command="pytest", llm_model="gpt-3" ) - with patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.7, file_coverage={'test.py': CoverageData(True, [1], 1, [2], 1, 0.5)})): + with patch.object(CoverageProcessor, 'process_coverage_report', return_value=CoverageReport(total_coverage=0.7, file_coverage={'test.py': CoverageData([1], 1, [1], 1, 1.0)})): coverage_report = generator.post_process_coverage_report(datetime.datetime.now()) - assert coverage_report.total_coverage == 0.5 + assert coverage_report.total_coverage == 0.7 def test_generate_diff_coverage_report(self): with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp_source_file: