diff --git a/tools/compare_cts_reports/compare_cts_reports.py b/tools/compare_cts_reports/compare_cts_reports.py index 41efc67e2..5c9d26de4 100755 --- a/tools/compare_cts_reports/compare_cts_reports.py +++ b/tools/compare_cts_reports/compare_cts_reports.py @@ -34,12 +34,17 @@ Usage example: import argparse import csv import os +import re import tempfile import aggregate_cts_reports import parse_cts_report +TESTED_ITEMS = 'tested_items' +PASS_RATE = 'pass_rate' + + def one_way_compare(reports, diff_csv): """Compare two reports in One-way Mode. @@ -61,15 +66,18 @@ def one_way_compare(reports, diff_csv): for keys in report_a.gen_keys_list(): module_name, abi, class_name, test_name = keys - result_in_a = report_a.get_test_status(module_name, abi, - class_name, test_name) + result_in_a = report_a.get_test_status( + module_name, abi, class_name, test_name + ) if parse_cts_report.CtsReport.is_fail(result_in_a): - result_in_b = report_b.get_test_status(module_name, abi, - class_name, test_name) + result_in_b = report_b.get_test_status( + module_name, abi, class_name, test_name + ) - diff_writer.writerow([module_name, abi, class_name, test_name, - result_in_a, result_in_b]) + diff_writer.writerow( + [module_name, abi, class_name, test_name, result_in_a, result_in_b] + ) def two_way_compare(reports, diff_csv): @@ -115,6 +123,98 @@ def two_way_compare(reports, diff_csv): diff_writer.writerow(row) +def gen_summary_row(reports, module_with_abi, item): + """Generate one row of diff.csv. + + According to module_with_abi and item, find the value of each report and + return as a list. + + Args: + reports: list of CtsReport object + module_with_abi: combined module_name and abi + item: the attribute to find in report + + Returns: + row: list to write into output file + """ + + row = [] + + abi_with_bracket = re.findall(r'\[[^\[^\]]+\]$', module_with_abi)[0] + + module_name = module_with_abi.removesuffix(abi_with_bracket) + abi = abi_with_bracket[1:-1] + + for report in reports: + module_summary = ( + report.module_summaries[module_name] + if module_name in report.module_summaries + else {} + ) + + summary = module_summary[abi] if abi in module_summary else None + + if not summary: + row.append(0.0 if item == PASS_RATE else 0) + elif item == TESTED_ITEMS: + row.append(summary.tested_items) + elif item == PASS_RATE: + row.append(summary.pass_rate) + elif item in parse_cts_report.CtsReport.STATUS_ORDER: + row.append(summary.counter[item]) + else: + raise ValueError(f"Invalid value '{item}' for argument 'item'") + + return row + + +def n_way_compare(reports, diff_csv): + """Compare multiple reports in N-way Mode. + + Given multiple sets of reports, aggregate them into reports. Then, summarize + the results in these reports. Write the summary into diff_csv, where the + module with the lowest pass rate among all reports will be displayed first. + + Args: + reports: list of reports + diff_csv: path to csv which stores comparison results + """ + + modules_min_rate = {} + report_titles = [] + + for i, report in enumerate(reports): + device_name = report.info['build_device'] + report_titles.append(f'{i}_{device_name}') + + for module_name, abis in report.module_summaries.items(): + for abi, summary in abis.items(): + module_with_abi = f'{module_name}[{abi}]' + + pass_rate = summary.pass_rate + + if module_with_abi not in modules_min_rate: + modules_min_rate[module_with_abi] = pass_rate if i == 0 else 0.0 + elif pass_rate < modules_min_rate[module_with_abi]: + modules_min_rate[module_with_abi] = pass_rate + + module_names = modules_min_rate.keys() + module_order = sorted( + module_names, key=lambda module_name: modules_min_rate[module_name] + ) + + items = parse_cts_report.CtsReport.STATUS_ORDER + [TESTED_ITEMS, PASS_RATE] + + with open(diff_csv, 'w') as diff_csvfile: + diff_writer = csv.writer(diff_csvfile) + diff_writer.writerow(['module_with_abi', 'item'] + report_titles) + + for module_with_abi in module_order: + for item in items: + row = gen_summary_row(reports, module_with_abi, item) + diff_writer.writerow([module_with_abi, item] + row) + + def main(): parser = argparse.ArgumentParser() @@ -122,8 +222,9 @@ def main(): help=('Path to cts reports. Each flag -r is followed by' 'a group of files to be aggregated as one report.'), action='append') - parser.add_argument('--mode', '-m', required=True, choices=['1', '2'], - help='Comparison mode. 1: One-way mode. 2: Two-way mode.') + parser.add_argument('--mode', '-m', required=True, choices=['1', '2', 'n'], + help=('Comparison mode. 1: One-way mode. ' + '2: Two-way mode. n: N-way mode.')) parser.add_argument('--output-dir', '-d', required=True, help='Directory to store output files.') parser.add_argument('--csv', default='diff.csv', help='Path to csv output.') @@ -135,7 +236,7 @@ def main(): report_files = args.reports mode = args.mode if (mode in ['1', '2']) and (len(report_files) != 2): - msg = ('Two sets of reports are required for one-way and two-way mode.') + msg = 'Two sets of reports are required for one-way and two-way mode.' raise UserWarning(msg) output_dir = args.output_dir @@ -150,8 +251,9 @@ def main(): if args.output_files: device_name = report.info['build_device'] - sub_dir_name = tempfile.mkdtemp(prefix=f'{i}_{device_name}_', - dir=output_dir) + sub_dir_name = tempfile.mkdtemp( + prefix=f'{i}_{device_name}_', dir=output_dir + ) report.output_files(sub_dir_name) ctsreports.append(report) @@ -160,9 +262,10 @@ def main(): one_way_compare(ctsreports, diff_csv) elif args.mode == '2': two_way_compare(ctsreports, diff_csv) + elif args.mode == 'n': + n_way_compare(ctsreports, diff_csv) else: - # TODO(b/292453652): Implement N-way comparison. - print('Error: Arg --mode must be 1 or 2.') + raise ValueError(f'Unexpected argument for --mode: {args.mode}') if __name__ == '__main__': diff --git a/tools/compare_cts_reports/parse_cts_report.py b/tools/compare_cts_reports/parse_cts_report.py index 8b5efe3d5..88862713f 100755 --- a/tools/compare_cts_reports/parse_cts_report.py +++ b/tools/compare_cts_reports/parse_cts_report.py @@ -39,8 +39,16 @@ NO_DATA = 'null' class CtsReport: """Class to record the test result of a cts report.""" - STATUS_ORDER = ['pass', 'IGNORED', 'ASSUMPTION_FAILURE', 'fail', - 'TEST_ERROR', 'TEST_STATUS_UNSPECIFIED'] + STATUS_ORDER = [ + 'pass', + 'IGNORED', + 'ASSUMPTION_FAILURE', + 'fail', + 'TEST_ERROR', + 'TEST_STATUS_UNSPECIFIED', + ] + + FAIL_INDEX = STATUS_ORDER.index('fail') def __init__(self, info): self.info = info @@ -52,7 +60,7 @@ class CtsReport: if status == NO_DATA: return False else: - return (CtsReport.STATUS_ORDER.index(status) >= 3) + return CtsReport.STATUS_ORDER.index(status) >= CtsReport.FAIL_INDEX def gen_keys_list(self): """Generate a 2D-list of keys.""" @@ -66,6 +74,7 @@ class CtsReport: for class_name, tests in test_classes.items(): for test_name in tests.keys(): keys_list.append([module_name, abi, class_name, test_name]) + return keys_list def is_compatible(self, info): @@ -91,8 +100,9 @@ class CtsReport: return tests[test_name] - def set_test_status(self, module_name, abi, - class_name, test_name, test_status): + def set_test_status( + self, module_name, abi, class_name, test_name, test_status + ): """Set test status to the CtsReport object.""" previous = self.get_test_status(module_name, abi, class_name, test_name) @@ -133,8 +143,7 @@ class CtsReport: for test in testcase.iter('Test'): test_name = test.attrib['name'] result = test.attrib['result'] - self.set_test_status(module_name, abi, - class_name, test_name, result) + self.set_test_status(module_name, abi, class_name, test_name, result) def write_to_csv(self, result_csvfile, summary_csvfile): """Write the information of the report to the csv files. @@ -148,21 +157,26 @@ class CtsReport: summary_writer.writerow(['module_name', 'abi'] + CtsReport.STATUS_ORDER) result_writer = csv.writer(result_csvfile) - result_writer.writerow(['module_name', 'abi', - 'class_name', 'test_name', 'result']) + result_writer.writerow( + ['module_name', 'abi', 'class_name', 'test_name', 'result'] + ) modules = self.result_tree for module_name, abis in modules.items(): for abi, test_classes in abis.items(): module_summary = self.module_summaries[module_name][abi] + summary = module_summary.summary_list() - summary_writer.writerow([module_name, abi] + summary) + + row = [module_name, abi] + summary + summary_writer.writerow(row) for class_name, tests in test_classes.items(): for test_name, result in tests.items(): - result_writer.writerow([module_name, abi, - class_name, test_name, result]) + result_writer.writerow( + [module_name, abi, class_name, test_name, result] + ) def output_files(self, output_dir): """Produce output files into the directory.""" @@ -197,35 +211,57 @@ class CtsReport: def __init__(self): self.counter = dict.fromkeys(CtsReport.STATUS_ORDER, 0) + @property + def tested_items(self): + """All tested items.""" + items = 0 + for status in CtsReport.STATUS_ORDER: + items += self.counter[status] + return items + + @property + def pass_rate(self): + """Pass rate of the module.""" + if self.tested_items == 0: + return 0.0 + else: + pass_category = 0 + for status in CtsReport.STATUS_ORDER: + if not CtsReport.is_fail(status): + pass_category += self.counter[status] + return pass_category / self.tested_items + def print_summary(self): for key in CtsReport.STATUS_ORDER: print(f'{key}: {self.counter[key]}') print() def summary_list(self): - return [self.counter[type] for type in CtsReport.STATUS_ORDER] + return [self.counter[key] for key in CtsReport.STATUS_ORDER] -ATTRS_TO_SHOW = ['Result::Build.build_model', - 'Result::Build.build_id', - 'Result::Build.build_fingerprint', - 'Result::Build.build_device', - 'Result::Build.build_version_sdk', - 'Result::Build.build_version_security_patch', - 'Result::Build.build_board', - 'Result::Build.build_type', - 'Result::Build.build_version_release', - 'Result.suite_name', - 'Result.suite_version', - 'Result.suite_plan', - 'Result.suite_build_number',] +ATTRS_TO_SHOW = [ + 'Result::Build.build_model', + 'Result::Build.build_id', + 'Result::Build.build_fingerprint', + 'Result::Build.build_device', + 'Result::Build.build_version_sdk', + 'Result::Build.build_version_security_patch', + 'Result::Build.build_board', + 'Result::Build.build_type', + 'Result::Build.build_version_release', + 'Result.suite_name', + 'Result.suite_version', + 'Result.suite_plan', + 'Result.suite_build_number', +] def parse_attrib_path(attrib_path): """Parse the path into xml tag and attribute name.""" first_dot = attrib_path.index('.') tags = attrib_path[:first_dot].split('::') - attr_name = attrib_path[first_dot+1:] + attr_name = attrib_path[first_dot + 1 :] return tags, attr_name @@ -258,7 +294,7 @@ def print_test_info(info): max_key_len = max([len(k) for k in info]) max_value_len = max([len(info[k]) for k in info]) - table_len = (max_key_len + 2 + max_value_len) + table_len = max_key_len + 2 + max_value_len print('=' * table_len) @@ -288,7 +324,8 @@ def parse_report_file(report_file): xml_path = ( extract_xml_from_zip(report_file, temp_dir) if zipfile.is_zipfile(report_file) - else report_file) + else report_file + ) test_info = get_test_info_xml(xml_path) print_test_info(test_info) @@ -302,11 +339,20 @@ def parse_report_file(report_file): def main(): parser = argparse.ArgumentParser() - parser.add_argument('--report-file', required=True, - help=('Path to a cts report, where a cts report could ' - 'be a zip archive or a xml file.')) - parser.add_argument('-d', '--output-dir', required=True, - help=('Path to the directory to store output files.')) + parser.add_argument( + '--report-file', + required=True, + help=( + 'Path to a cts report, where a cts report could ' + 'be a zip archive or a xml file.' + ), + ) + parser.add_argument( + '-d', + '--output-dir', + required=True, + help='Path to the directory to store output files.', + ) args = parser.parse_args() @@ -320,5 +366,6 @@ def main(): report.output_files(output_dir) + if __name__ == '__main__': main()