poky/meta/lib/oeqa/runtime/cases/ptest.py
Richard Purdie fcf55f58ae oeqa/runtime/ptest: Inject results+logs into stored json results file
This allows the ptest results from ptest-runner, run in an image to be
transferred over to the resulting json results output.

Each test is given a pass/skip/fail so individual results can be monitored
and the raw log output from the ptest-runner is also dumped into the
results json file as this means after the fact debugging becomes much easier.

Currently the log output is not split up per test but that would make a good
future enhancement.

I attempted to implement this as python subTests however it failed as the
output was too confusing, subTests don't support any kind of log
output handling, subTest successes aren't logged and it was making things
far more complex than they needed to be.

We mark ptest-runner as "EXPECTEDFAILURE" since its unlikely every ptest
will pass currently and we don't want that to fail the whole image test run.
Its assumed there would be later analysis of the json output to determine
regressions. We do have to change the test runner code so that
'unexpectedsuccess' is not a failure.

Also, the test names are manipuated to remove spaces and brackets with
"_" used as a replacement and any duplicate occurrences truncated.

(From OE-Core rev: a13e088942e2a3c3521e98954a394e61a15234e8)

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2018-11-07 23:08:55 +00:00

111 lines
4.5 KiB
Python

import unittest
import pprint
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.oeid import OETestID
from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.utils.logparser import Lparser, Result
class PtestRunnerTest(OERuntimeTestCase):
# a ptest log parser
def parse_ptest(self, logfile):
parser = Lparser(test_0_pass_regex="^PASS:(.+)",
test_0_fail_regex="^FAIL:(.+)",
test_0_skip_regex="^SKIP:(.+)",
section_0_begin_regex="^BEGIN: .*/(.+)/ptest",
section_0_end_regex="^END: .*/(.+)/ptest")
parser.init()
result = Result()
with open(logfile, errors='replace') as f:
for line in f:
result_tuple = parser.parse_line(line)
if not result_tuple:
continue
result_tuple = line_type, category, status, name = parser.parse_line(line)
if line_type == 'section' and status == 'begin':
current_section = name
continue
if line_type == 'section' and status == 'end':
current_section = None
continue
if line_type == 'test' and status == 'pass':
result.store(current_section, name, status)
continue
if line_type == 'test' and status == 'fail':
result.store(current_section, name, status)
continue
if line_type == 'test' and status == 'skip':
result.store(current_section, name, status)
continue
result.sort_tests()
return result
@OETestID(1600)
@skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
@OETestDepends(['ssh.SSHTest.test_ssh'])
@unittest.expectedFailure
def test_ptestrunner(self):
status, output = self.target.run('which ptest-runner', 0)
if status != 0:
self.skipTest("No -ptest packages are installed in the image")
import datetime
test_log_dir = self.td.get('TEST_LOG_DIR', '')
# The TEST_LOG_DIR maybe NULL when testimage is added after
# testdata.json is generated.
if not test_log_dir:
test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
# Don't use self.td.get('DATETIME'), it's from testdata.json, not
# up-to-date, and may cause "File exists" when re-reun.
datetime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
ptest_log_dir = '%s.%s' % (ptest_log_dir_link, datetime)
ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
status, output = self.target.run('ptest-runner', 0)
os.makedirs(ptest_log_dir)
with open(ptest_runner_log, 'w') as f:
f.write(output)
# status != 0 is OK since some ptest tests may fail
self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
if not hasattr(self.tc, "extraresults"):
self.tc.extraresults = {}
extras = self.tc.extraresults
extras['ptestresult.rawlogs'] = {'log': output}
# Parse and save results
parse_result = self.parse_ptest(ptest_runner_log)
parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip'])
if os.path.exists(ptest_log_dir_link):
# Remove the old link to create a new one
os.remove(ptest_log_dir_link)
os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
trans = str.maketrans("()", "__")
resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'}
for section in parse_result.result_dict:
for test, result in parse_result.result_dict[section]:
testname = "ptestresult." + section + "." + "_".join(test.translate(trans).split())
extras[testname] = {'status': resmap[result]}
failed_tests = {}
for section in parse_result.result_dict:
failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ]
if failed_testcases:
failed_tests[section] = failed_testcases
if failed_tests:
self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests))