poky/scripts/lib/resulttool/report.py
Richard Purdie 47eb3d00e9 resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:

* Avoid subprocess.run() as its a python 3.6 feature and we
  have autobuilder workers with 3.5.

* Avoid python keywords as variable names

* Simplify dict accesses using .get()

* Rename resultsutils -> resultutils to match the resultstool ->
  resulttool rename

* Formalised the handling of "file_name" to "TESTSERIES" which the code
  will now add into the json configuration data if its not present, based
  on the directory name.

* When we don't have failed test cases, print something saying so
  instead of an empty table

* Tweak the table headers in the report to be more readable (reference
  "Test Series" instead if file_id and ID instead of results_id)

* Improve/simplify the max string length handling

* Merge the counts and percentage data into one table in the report
  since printing two reports of the same data confuses the user

* Removed the confusing header in the regression report

* Show matches, then regressions, then unmatched runs in the regression
  report, also remove chatting unneeded output

* Try harder to "pair" up matching configurations to reduce noise in
  the regressions report

* Abstracted the "mapping" table concept used to pairing in the
  regression code to general code in resultutils

* Created multiple mappings for results analysis, results storage and
  'flattening' results data in a merge

* Simplify the merge command to take a source and a destination,
  letting the destination be a directory or a file, removing the need for
  an output directory parameter

* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
  mappings

* Have the store command place the testresults files in a layout from
  the mapping, making commits into the git repo for results storage more
  useful for simple comparison purposes

* Set the oe-git-archive tag format appropriately for oeqa results
  storage (and simplify the commit messages closer to their defaults)

* Fix oe-git-archive to use the commit/branch data from the results file

* Cleaned up the command option help to match other changes

* Follow the model of git branch/tag processing used by oe-build-perf-report
  and use that to read the data using git show to avoid branch change

* Add ptest summary to the report command

* Update the tests to match the above changes

(From OE-Core rev: ff2c029b568f70aa9960dde04ddd207829812ea0)

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2019-02-21 12:34:00 +00:00

135 lines
6.1 KiB
Python

# test result tool - report text based test results
#
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
import os
import glob
import json
import resulttool.resultutils as resultutils
from oeqa.utils.git import GitRepo
import oeqa.utils.gitarchive as gitarchive
class ResultsTextReport(object):
def __init__(self):
self.ptests = {}
self.result_types = {'passed': ['PASSED', 'passed'],
'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
'skipped': ['SKIPPED', 'skipped']}
def handle_ptest_result(self, k, status, result):
if k == 'ptestresult.sections':
return
_, suite, test = k.split(".", 2)
# Handle 'glib-2.0'
if suite not in result['ptestresult.sections']:
try:
_, suite, suite1, test = k.split(".", 3)
if suite + "." + suite1 in result['ptestresult.sections']:
suite = suite + "." + suite1
except ValueError:
pass
if suite not in self.ptests:
self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
self.ptests[suite][tk] += 1
if suite in result['ptestresult.sections']:
if 'duration' in result['ptestresult.sections'][suite]:
self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
if 'timeout' in result['ptestresult.sections'][suite]:
self.ptests[suite]['duration'] += " T"
def get_aggregated_test_result(self, logger, testresult):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
for k in result:
test_status = result[k].get('status', [])
for tk in self.result_types:
if test_status in self.result_types[tk]:
test_count_report[tk] += 1
if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
if k.startswith("ptestresult."):
self.handle_ptest_result(k, test_status, result)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
from jinja2 import Environment, FileSystemLoader
script_path = os.path.dirname(os.path.realpath(__file__))
file_loader = FileSystemLoader(script_path + '/template')
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
haveptest = bool(self.ptests)
reportvalues = []
cols = ['passed', 'failed', 'skipped']
maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
for line in test_count_reports:
total_tested = line['passed'] + line['failed'] + line['skipped']
vals = {}
vals['result_id'] = line['result_id']
vals['testseries'] = line['testseries']
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
for ptest in self.ptests:
if len(ptest) > maxlen['ptest']:
maxlen['ptest'] = len(ptest)
output = template.render(reportvalues=reportvalues,
havefailed=havefailed,
haveptest=haveptest,
ptests=self.ptests,
maxlen=maxlen)
print(output)
def view_test_report(self, logger, source_dir, tag):
test_count_reports = []
if tag:
repo = GitRepo(source_dir)
testresults = resultutils.git_get_result(repo, [tag])
else:
testresults = resultutils.load_resultsdata(source_dir)
for testsuite in testresults:
for resultid in testresults[testsuite]:
result = testresults[testsuite][resultid]
test_count_report = self.get_aggregated_test_result(logger, result)
test_count_report['testseries'] = result['configuration']['TESTSERIES']
test_count_report['result_id'] = resultid
test_count_reports.append(test_count_report)
self.print_test_report('test_report_full_text.txt', test_count_reports)
def report(args, logger):
report = ResultsTextReport()
report.view_test_report(logger, args.source_dir, args.tag)
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser_build = subparsers.add_parser('report', help='summarise test results',
description='print a text-based summary of the test results',
group='analysis')
parser_build.set_defaults(func=report)
parser_build.add_argument('source_dir',
help='source file/directory that contain the test result files to summarise')
parser_build.add_argument('-t', '--tag', default='',
help='source_dir is a git repository, report on the tag specified from that repository')