mirror of
git://git.yoctoproject.org/poky.git
synced 2025-07-19 21:09:03 +02:00

This is a combined patch of the various tweaks and improvements I made to resulttool: * Avoid subprocess.run() as its a python 3.6 feature and we have autobuilder workers with 3.5. * Avoid python keywords as variable names * Simplify dict accesses using .get() * Rename resultsutils -> resultutils to match the resultstool -> resulttool rename * Formalised the handling of "file_name" to "TESTSERIES" which the code will now add into the json configuration data if its not present, based on the directory name. * When we don't have failed test cases, print something saying so instead of an empty table * Tweak the table headers in the report to be more readable (reference "Test Series" instead if file_id and ID instead of results_id) * Improve/simplify the max string length handling * Merge the counts and percentage data into one table in the report since printing two reports of the same data confuses the user * Removed the confusing header in the regression report * Show matches, then regressions, then unmatched runs in the regression report, also remove chatting unneeded output * Try harder to "pair" up matching configurations to reduce noise in the regressions report * Abstracted the "mapping" table concept used to pairing in the regression code to general code in resultutils * Created multiple mappings for results analysis, results storage and 'flattening' results data in a merge * Simplify the merge command to take a source and a destination, letting the destination be a directory or a file, removing the need for an output directory parameter * Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression mappings * Have the store command place the testresults files in a layout from the mapping, making commits into the git repo for results storage more useful for simple comparison purposes * Set the oe-git-archive tag format appropriately for oeqa results storage (and simplify the commit messages closer to their defaults) * Fix oe-git-archive to use the commit/branch data from the results file * Cleaned up the command option help to match other changes * Follow the model of git branch/tag processing used by oe-build-perf-report and use that to read the data using git show to avoid branch change * Add ptest summary to the report command * Update the tests to match the above changes (From OE-Core rev: ff2c029b568f70aa9960dde04ddd207829812ea0) Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
142 lines
6.5 KiB
Python
Executable File
142 lines
6.5 KiB
Python
Executable File
# test case management tool - manual execution from testopia test cases
|
|
#
|
|
# Copyright (c) 2018, Intel Corporation.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify it
|
|
# under the terms and conditions of the GNU General Public License,
|
|
# version 2, as published by the Free Software Foundation.
|
|
#
|
|
# This program is distributed in the hope it will be useful, but WITHOUT
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
# more details.
|
|
#
|
|
import argparse
|
|
import json
|
|
import os
|
|
import sys
|
|
import datetime
|
|
import re
|
|
from oeqa.core.runner import OETestResultJSONHelper
|
|
|
|
def load_json_file(file):
|
|
with open(file, "r") as f:
|
|
return json.load(f)
|
|
|
|
|
|
class ManualTestRunner(object):
|
|
def __init__(self):
|
|
self.jdata = ''
|
|
self.test_module = ''
|
|
self.test_suite = ''
|
|
self.test_cases = ''
|
|
self.configuration = ''
|
|
self.starttime = ''
|
|
self.result_id = ''
|
|
self.write_dir = ''
|
|
|
|
def _get_testcases(self, file):
|
|
self.jdata = load_json_file(file)
|
|
self.test_cases = []
|
|
self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
|
|
self.test_suite = self.jdata[0]['test']['@alias'].split('.', 2)[1]
|
|
for i in self.jdata:
|
|
self.test_cases.append(i['test']['@alias'].split('.', 2)[2])
|
|
|
|
def _get_input(self, config):
|
|
while True:
|
|
output = input('{} = '.format(config))
|
|
if re.match('^[a-zA-Z0-9_]+$', output):
|
|
break
|
|
print('Only alphanumeric and underscore are allowed. Please try again')
|
|
return output
|
|
|
|
def _create_config(self):
|
|
self.configuration = {}
|
|
while True:
|
|
try:
|
|
conf_total = int(input('\nPlease provide how many configuration you want to save \n'))
|
|
break
|
|
except ValueError:
|
|
print('Invalid input. Please provide input as a number not character.')
|
|
for i in range(conf_total):
|
|
print('---------------------------------------------')
|
|
print('This is configuration #%s ' % (i + 1) + '. Please provide configuration name and its value')
|
|
print('---------------------------------------------')
|
|
name_conf = self._get_input('Configuration Name')
|
|
value_conf = self._get_input('Configuration Value')
|
|
print('---------------------------------------------\n')
|
|
self.configuration[name_conf.upper()] = value_conf
|
|
current_datetime = datetime.datetime.now()
|
|
self.starttime = current_datetime.strftime('%Y%m%d%H%M%S')
|
|
self.configuration['STARTTIME'] = self.starttime
|
|
self.configuration['TEST_TYPE'] = self.test_module
|
|
|
|
def _create_result_id(self):
|
|
self.result_id = 'manual_' + self.test_module + '_' + self.starttime
|
|
|
|
def _execute_test_steps(self, test_id):
|
|
test_result = {}
|
|
testcase_id = self.test_module + '.' + self.test_suite + '.' + self.test_cases[test_id]
|
|
total_steps = len(self.jdata[test_id]['test']['execution'].keys())
|
|
print('------------------------------------------------------------------------')
|
|
print('Executing test case:' + '' '' + self.test_cases[test_id])
|
|
print('------------------------------------------------------------------------')
|
|
print('You have total ' + str(total_steps) + ' test steps to be executed.')
|
|
print('------------------------------------------------------------------------\n')
|
|
for step in sorted((self.jdata[test_id]['test']['execution']).keys()):
|
|
print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action'])
|
|
print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'])
|
|
done = input('\nPlease press ENTER when you are done to proceed to next step.\n')
|
|
while True:
|
|
done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n')
|
|
done = done.lower()
|
|
result_types = {'p':'PASSED',
|
|
'f':'FAILED',
|
|
'b':'BLOCKED',
|
|
's':'SKIPPED'}
|
|
if done in result_types:
|
|
for r in result_types:
|
|
if done == r:
|
|
res = result_types[r]
|
|
if res == 'FAILED':
|
|
log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
|
|
test_result.update({testcase_id: {'status': '%s' % res, 'log': '%s' % log_input}})
|
|
else:
|
|
test_result.update({testcase_id: {'status': '%s' % res}})
|
|
break
|
|
print('Invalid input!')
|
|
return test_result
|
|
|
|
def _create_write_dir(self):
|
|
basepath = os.environ['BUILDDIR']
|
|
self.write_dir = basepath + '/tmp/log/manual/'
|
|
|
|
def run_test(self, file):
|
|
self._get_testcases(file)
|
|
self._create_config()
|
|
self._create_result_id()
|
|
self._create_write_dir()
|
|
test_results = {}
|
|
print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata))
|
|
for i in range(0, len(self.jdata)):
|
|
test_result = self._execute_test_steps(i)
|
|
test_results.update(test_result)
|
|
return self.configuration, self.result_id, self.write_dir, test_results
|
|
|
|
def manualexecution(args, logger):
|
|
testrunner = ManualTestRunner()
|
|
get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
|
|
resultjsonhelper = OETestResultJSONHelper()
|
|
resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id,
|
|
get_test_results)
|
|
return 0
|
|
|
|
def register_commands(subparsers):
|
|
"""Register subcommands from this plugin"""
|
|
parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
|
|
description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
|
|
group='manualexecution')
|
|
parser_build.set_defaults(func=manualexecution)
|
|
parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
|