oeqa/runtime/cases/dldt: Enable inference engine and model optimizer tests

Add sanity tests for inference engine:
   - test inference engine c/cpp shared library
   - test inference engine python api
   - test inference engine cpu, gpu, myriad plugin

Add sanity tests for model optimizer
   - test model optmizer can generate ir

Licenses:
   - classification_sample.py
     license: Apache 2.0
     source: <install_root>/deployment_tools/inference_engine/samples/*

Signed-off-by: Yeoh Ee Peng <ee.peng.yeoh@intel.com>
Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
This commit is contained in:
Yeoh Ee Peng 2019-12-04 11:17:42 +08:00 committed by Anuj Mittal
parent f39ad91524
commit 1e514838df
8 changed files with 384 additions and 0 deletions

View File

@ -0,0 +1,101 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.runtime.decorator.package import OEHasPackage
from oeqa.core.decorator.depends import OETestDepends
from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget
from oeqa.runtime.miutils.tests.squeezenet_model_download_test import SqueezenetModelDownloadTest
from oeqa.runtime.miutils.tests.dldt_model_optimizer_test import DldtModelOptimizerTest
from oeqa.runtime.miutils.tests.dldt_inference_engine_test import DldtInferenceEngineTest
from oeqa.runtime.miutils.dldtutils import get_testdata_config
class DldtInferenceEngine(OERuntimeTestCase):
@classmethod
def setUpClass(cls):
cls.sqn_download = SqueezenetModelDownloadTest(OEQATarget(cls.tc.target), '/tmp/ie/md')
cls.sqn_download.setup()
cls.dldt_mo = DldtModelOptimizerTest(OEQATarget(cls.tc.target), '/tmp/ie/ir')
cls.dldt_mo.setup()
cls.dldt_ie = DldtInferenceEngineTest(OEQATarget(cls.tc.target), '/tmp/ie/inputs')
cls.dldt_ie.setup()
cls.ir_files_dir = cls.dldt_mo.work_dir
@classmethod
def tearDownClass(cls):
cls.dldt_ie.tear_down()
cls.dldt_mo.tear_down()
cls.sqn_download.tear_down()
@OEHasPackage(['dldt-model-optimizer'])
@OEHasPackage(['wget'])
def test_dldt_ie_can_create_ir_and_download_input(self):
proxy_port = get_testdata_config(self.tc.td, 'DLDT_PIP_PROXY')
if not proxy_port:
self.skipTest('Need to configure bitbake configuration (DLDT_PIP_PROXY="proxy.server:port").')
(status, output) = self.sqn_download.test_can_download_squeezenet_model(proxy_port)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
(status, output) = self.sqn_download.test_can_download_squeezenet_prototxt(proxy_port)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
mo_exe_dir = get_testdata_config(self.tc.td, 'DLDT_MO_EXE_DIR')
if not mo_exe_dir:
self.skipTest('Need to configure bitbake configuration (DLDT_MO_EXE_DIR="directory_to_mo.py").')
mo_files_dir = self.sqn_download.work_dir
(status, output) = self.dldt_mo.test_dldt_mo_can_create_ir(mo_exe_dir, mo_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
(status, output) = self.dldt_ie.test_can_download_input_file(proxy_port)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
@OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
@OEHasPackage(['dldt-inference-engine'])
@OEHasPackage(['dldt-inference-engine-samples'])
def test_dldt_ie_classification_with_cpu(self):
(status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('CPU', self.ir_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
@OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
@OEHasPackage(['dldt-inference-engine'])
@OEHasPackage(['dldt-inference-engine-samples'])
@OEHasPackage(['intel-compute-runtime'])
@OEHasPackage(['opencl-icd-loader'])
def test_dldt_ie_classification_with_gpu(self):
(status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('GPU', self.ir_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
@OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
@OEHasPackage(['dldt-inference-engine'])
@OEHasPackage(['dldt-inference-engine-samples'])
@OEHasPackage(['dldt-inference-engine-vpu-firmware'])
def test_dldt_ie_classification_with_myriad(self):
(status, output) = self.dldt_ie.test_dldt_ie_classification_with_device('MYRIAD', self.ir_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
@OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
@OEHasPackage(['dldt-inference-engine'])
@OEHasPackage(['dldt-inference-engine-python3'])
@OEHasPackage(['python3-opencv'])
@OEHasPackage(['python3-numpy'])
def test_dldt_ie_classification_python_api_with_cpu(self):
(status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('CPU', self.ir_files_dir, 'libcpu_extension.so')
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
@OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
@OEHasPackage(['dldt-inference-engine'])
@OEHasPackage(['dldt-inference-engine-python3'])
@OEHasPackage(['intel-compute-runtime'])
@OEHasPackage(['opencl-icd-loader'])
@OEHasPackage(['python3-opencv'])
@OEHasPackage(['python3-numpy'])
def test_dldt_ie_classification_python_api_with_gpu(self):
(status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('GPU', self.ir_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
@OETestDepends(['dldt_inference_engine.DldtInferenceEngine.test_dldt_ie_can_create_ir_and_download_input'])
@OEHasPackage(['dldt-inference-engine'])
@OEHasPackage(['dldt-inference-engine-python3'])
@OEHasPackage(['dldt-inference-engine-vpu-firmware'])
@OEHasPackage(['python3-opencv'])
@OEHasPackage(['python3-numpy'])
def test_dldt_ie_classification_python_api_with_myriad(self):
(status, output) = self.dldt_ie.test_dldt_ie_classification_python_api_with_device('MYRIAD', self.ir_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))

View File

@ -0,0 +1,38 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.runtime.decorator.package import OEHasPackage
from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget
from oeqa.runtime.miutils.tests.squeezenet_model_download_test import SqueezenetModelDownloadTest
from oeqa.runtime.miutils.tests.dldt_model_optimizer_test import DldtModelOptimizerTest
from oeqa.runtime.miutils.dldtutils import get_testdata_config
class DldtModelOptimizer(OERuntimeTestCase):
@classmethod
def setUpClass(cls):
cls.sqn_download = SqueezenetModelDownloadTest(OEQATarget(cls.tc.target), '/tmp/mo/md')
cls.sqn_download.setup()
cls.dldt_mo = DldtModelOptimizerTest(OEQATarget(cls.tc.target), '/tmp/mo/ir')
cls.dldt_mo.setup()
@classmethod
def tearDownClass(cls):
cls.dldt_mo.tear_down()
cls.sqn_download.tear_down()
@OEHasPackage(['dldt-model-optimizer'])
@OEHasPackage(['wget'])
def test_dldt_mo_can_create_ir(self):
proxy_port = get_testdata_config(self.tc.td, 'DLDT_PIP_PROXY')
if not proxy_port:
self.skipTest('Need to configure bitbake configuration (DLDT_PIP_PROXY="proxy.server:port").')
(status, output) = self.sqn_download.test_can_download_squeezenet_model(proxy_port)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
(status, output) = self.sqn_download.test_can_download_squeezenet_prototxt(proxy_port)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))
mo_exe_dir = get_testdata_config(self.tc.td, 'DLDT_MO_EXE_DIR')
if not mo_exe_dir:
self.skipTest('Need to configure bitbake configuration (DLDT_MO_EXE_DIR="directory_to_mo.py").')
mo_files_dir = self.sqn_download.work_dir
(status, output) = self.dldt_mo.test_dldt_mo_can_create_ir(mo_exe_dir, mo_files_dir)
self.assertEqual(status, 0, msg='status and output: %s and %s' % (status, output))

View File

@ -0,0 +1,135 @@
#!/usr/bin/env python3
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True,
type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True,
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the"
" kernels implementations.", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: is "
"acceptable. The sample will look for a suitable plugin for device specified. Default "
"value is CPU",
default="CPU", type=str)
args.add_argument("--labels", help="Optional. Path to a labels mapping file", default=None, type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
if "CPU" in args.device:
supported_layers = ie.query_network(net, "CPU")
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
log.info("Preparing input blobs")
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = len(args.input)
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
image = cv2.imread(args.input[i])
if image.shape[:-1] != (h, w):
log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
log.info("Batch size is {}".format(n))
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
# Start sync inference
log.info("Starting inference in synchronous mode")
res = exec_net.infer(inputs={input_blob: images})
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
log.info("Top {} results: ".format(args.number_top))
if args.labels:
with open(args.labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
else:
labels_map = None
classid_str = "classid"
probability_str = "probability"
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-args.number_top:][::-1]
print("Image {}\n".format(args.input[i]))
print(classid_str, probability_str)
print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
for id in top_ind:
det_label = labels_map[id] if labels_map else "{}".format(id)
label_length = len(det_label)
space_num_before = (len(classid_str) - label_length) // 2
space_num_after = len(classid_str) - (space_num_before + label_length) + 2
space_num_before_prob = (len(probability_str) - len(str(probs[id]))) // 2
print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
' ' * space_num_after, ' ' * space_num_before_prob,
probs[id]))
print("\n")
log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n")
if __name__ == '__main__':
sys.exit(main() or 0)

View File

@ -0,0 +1,3 @@
def get_testdata_config(testdata, config):
return testdata.get(config)

View File

@ -0,0 +1,11 @@
class OEQATarget(object):
def __init__(self, target):
self.target = target
def run(self, cmd):
return self.target.run(cmd)
def copy_to(self, source, destination_dir):
self.target.copyTo(source, destination_dir)

View File

@ -0,0 +1,48 @@
import os
script_path = os.path.dirname(os.path.realpath(__file__))
files_path = os.path.join(script_path, '../../files/')
class DldtInferenceEngineTest(object):
ie_input_files = {'ie_python_sample': 'classification_sample.py',
'input': 'chicky_512.png',
'input_download': 'https://raw.githubusercontent.com/opencv/opencv/master/samples/data/chicky_512.png',
'model': 'squeezenet_v1.1.xml'}
def __init__(self, target, work_dir):
self.target = target
self.work_dir = work_dir
def setup(self):
self.target.run('mkdir -p %s' % self.work_dir)
self.target.copy_to(os.path.join(files_path, 'dldt-inference-engine', self.ie_input_files['ie_python_sample']),
self.work_dir)
def tear_down(self):
self.target.run('rm -rf %s' % self.work_dir)
def test_can_download_input_file(self, proxy_port):
return self.target.run('cd %s; wget %s -e https_proxy=%s' %
(self.work_dir,
self.ie_input_files['input_download'],
proxy_port))
def test_dldt_ie_classification_with_device(self, device, ir_files_dir):
return self.target.run('classification_sample_async -d %s -i %s -m %s' %
(device,
os.path.join(self.work_dir, self.ie_input_files['input']),
os.path.join(ir_files_dir, self.ie_input_files['model'])))
def test_dldt_ie_classification_python_api_with_device(self, device, ir_files_dir, extension=''):
if extension:
return self.target.run('python3 %s -d %s -i %s -m %s -l %s' %
(os.path.join(self.work_dir, self.ie_input_files['ie_python_sample']),
device,
os.path.join(self.work_dir, self.ie_input_files['input']),
os.path.join(ir_files_dir, self.ie_input_files['model']),
extension))
else:
return self.target.run('python3 %s -d %s -i %s -m %s' %
(os.path.join(self.work_dir, self.ie_input_files['ie_python_sample']),
device,
os.path.join(self.work_dir, self.ie_input_files['input']),
os.path.join(ir_files_dir, self.ie_input_files['model'])))

View File

@ -0,0 +1,23 @@
import os
class DldtModelOptimizerTest(object):
mo_input_files = {'model': 'squeezenet_v1.1.caffemodel',
'prototxt': 'deploy.prototxt'}
mo_exe = 'mo.py'
def __init__(self, target, work_dir):
self.target = target
self.work_dir = work_dir
def setup(self):
self.target.run('mkdir -p %s' % self.work_dir)
def tear_down(self):
self.target.run('rm -rf %s' % self.work_dir)
def test_dldt_mo_can_create_ir(self, mo_exe_dir, mo_files_dir):
return self.target.run('python3 %s --input_model %s --input_proto %s --output_dir %s --data_type FP16' %
(os.path.join(mo_exe_dir, self.mo_exe),
os.path.join(mo_files_dir, self.mo_input_files['model']),
os.path.join(mo_files_dir, self.mo_input_files['prototxt']),
self.work_dir))

View File

@ -0,0 +1,25 @@
class SqueezenetModelDownloadTest(object):
download_files = {'squeezenet1.1.prototxt': 'https://raw.githubusercontent.com/DeepScale/SqueezeNet/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/deploy.prototxt',
'squeezenet1.1.caffemodel': 'https://github.com/DeepScale/SqueezeNet/raw/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel'}
def __init__(self, target, work_dir):
self.target = target
self.work_dir = work_dir
def setup(self):
self.target.run('mkdir -p %s' % self.work_dir)
def tear_down(self):
self.target.run('rm -rf %s' % self.work_dir)
def test_can_download_squeezenet_model(self, proxy_port):
return self.target.run('cd %s; wget %s -e https_proxy=%s' %
(self.work_dir,
self.download_files['squeezenet1.1.caffemodel'],
proxy_port))
def test_can_download_squeezenet_prototxt(self, proxy_port):
return self.target.run('cd %s; wget %s -e https_proxy=%s' %
(self.work_dir,
self.download_files['squeezenet1.1.prototxt'],
proxy_port))