mirror of
git://git.yoctoproject.org/poky.git
synced 2025-07-19 21:09:03 +02:00

Default network configuration requires tun/tap module and while being usable it conflicts with tap devices created by VPN clients sometimes and requires root permissions to use . While it's possible to work this around it's not always feasible if network is not required Add nonetwork option which can be specified if the network connectivity is not needed and SDL/serial is enough to communicate with the image. (From OE-Core rev: d4073dedbb234ff3c6bbebafc836fedf90d96569) Signed-off-by: Pavel Zhukov <pazhukov@suse.de> Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
1743 lines
72 KiB
Python
Executable File
1743 lines
72 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
# Handle running OE images standalone with QEMU
|
|
#
|
|
# Copyright (C) 2006-2011 Linux Foundation
|
|
# Copyright (c) 2016 Wind River Systems, Inc.
|
|
#
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
|
#
|
|
|
|
import os
|
|
import sys
|
|
import logging
|
|
import subprocess
|
|
import re
|
|
import fcntl
|
|
import shutil
|
|
import glob
|
|
import configparser
|
|
import signal
|
|
import time
|
|
|
|
class RunQemuError(Exception):
|
|
"""Custom exception to raise on known errors."""
|
|
pass
|
|
|
|
class OEPathError(RunQemuError):
|
|
"""Custom Exception to give better guidance on missing binaries"""
|
|
def __init__(self, message):
|
|
super().__init__("In order for this script to dynamically infer paths\n \
|
|
kernels or filesystem images, you either need bitbake in your PATH\n \
|
|
or to source oe-init-build-env before running this script.\n\n \
|
|
Dynamic path inference can be avoided by passing a *.qemuboot.conf to\n \
|
|
runqemu, i.e. `runqemu /path/to/my-image-name.qemuboot.conf`\n\n %s" % message)
|
|
|
|
|
|
def create_logger():
|
|
logger = logging.getLogger('runqemu')
|
|
logger.setLevel(logging.INFO)
|
|
|
|
# create console handler and set level to debug
|
|
ch = logging.StreamHandler()
|
|
ch.setLevel(logging.DEBUG)
|
|
|
|
# create formatter
|
|
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
|
|
|
|
# add formatter to ch
|
|
ch.setFormatter(formatter)
|
|
|
|
# add ch to logger
|
|
logger.addHandler(ch)
|
|
|
|
return logger
|
|
|
|
logger = create_logger()
|
|
|
|
def print_usage():
|
|
print("""
|
|
Usage: you can run this script with any valid combination
|
|
of the following environment variables (in any order):
|
|
KERNEL - the kernel image file to use
|
|
BIOS - the bios image file to use
|
|
ROOTFS - the rootfs image file or nfsroot directory to use
|
|
DEVICE_TREE - the device tree blob to use
|
|
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
|
|
Simplified QEMU command-line options can be passed with:
|
|
nographic - disable video console
|
|
nonetwork - disable network connectivity
|
|
novga - Disable VGA emulation completely
|
|
sdl - choose the SDL UI frontend
|
|
gtk - choose the Gtk UI frontend
|
|
gl - enable virgl-based GL acceleration (also needs gtk or sdl options)
|
|
gl-es - enable virgl-based GL acceleration, using OpenGL ES (also needs gtk or sdl options)
|
|
egl-headless - enable headless EGL output; use vnc (via publicvnc option) or spice to see it
|
|
(hint: if /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create
|
|
one suitable for mesa llvmpipe software renderer)
|
|
serial - enable a serial console on /dev/ttyS0
|
|
serialstdio - enable a serial console on the console (regardless of graphics mode)
|
|
slirp - enable user networking, no root privilege is required
|
|
snapshot - don't write changes back to images
|
|
kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
|
|
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
|
|
publicvnc - enable a VNC server open to all hosts
|
|
audio - enable audio
|
|
guestagent - enable guest agent communication
|
|
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
|
|
tcpserial=<port> - specify tcp serial port number
|
|
qemuparams=<xyz> - specify custom parameters to QEMU
|
|
bootparams=<xyz> - specify custom kernel parameters during boot
|
|
help, -h, --help: print this text
|
|
-d, --debug: Enable debug output
|
|
-q, --quiet: Hide most output except error messages
|
|
|
|
Examples:
|
|
runqemu
|
|
runqemu qemuarm
|
|
runqemu tmp/deploy/images/qemuarm
|
|
runqemu tmp/deploy/images/qemux86/<qemuboot.conf>
|
|
runqemu qemux86-64 core-image-sato ext4
|
|
runqemu qemux86-64 wic-image-minimal wic
|
|
runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
|
|
runqemu qemux86 iso/hddimg/wic.vmdk/wic.vhd/wic.vhdx/wic.qcow2/wic.vdi/ramfs/cpio.gz...
|
|
runqemu qemux86 qemuparams="-m 256"
|
|
runqemu qemux86 bootparams="psplash=false"
|
|
runqemu path/to/<image>-<machine>.wic
|
|
runqemu path/to/<image>-<machine>.wic.vmdk
|
|
runqemu path/to/<image>-<machine>.wic.vhdx
|
|
runqemu path/to/<image>-<machine>.wic.vhd
|
|
""")
|
|
|
|
def check_tun():
|
|
"""Check /dev/net/tun"""
|
|
dev_tun = '/dev/net/tun'
|
|
if not os.path.exists(dev_tun):
|
|
raise RunQemuError("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun)
|
|
|
|
if not os.access(dev_tun, os.W_OK):
|
|
raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
|
|
|
|
def get_first_file(globs):
|
|
"""Return first file found in wildcard globs"""
|
|
for g in globs:
|
|
all_files = glob.glob(g)
|
|
if all_files:
|
|
for f in all_files:
|
|
if not os.path.isdir(f):
|
|
return f
|
|
return ''
|
|
|
|
class BaseConfig(object):
|
|
def __init__(self):
|
|
# The self.d saved vars from self.set(), part of them are from qemuboot.conf
|
|
self.d = {'QB_KERNEL_ROOT': '/dev/vda'}
|
|
|
|
# Supported env vars, add it here if a var can be got from env,
|
|
# and don't use os.getenv in the code.
|
|
self.env_vars = ('MACHINE',
|
|
'ROOTFS',
|
|
'KERNEL',
|
|
'BIOS',
|
|
'DEVICE_TREE',
|
|
'DEPLOY_DIR_IMAGE',
|
|
'OE_TMPDIR',
|
|
'OECORE_NATIVE_SYSROOT',
|
|
'MULTICONFIG',
|
|
'SERIAL_CONSOLES',
|
|
)
|
|
|
|
self.qemu_opt = ''
|
|
self.qemu_opt_script = ''
|
|
self.qemuparams = ''
|
|
self.nfs_server = ''
|
|
self.rootfs = ''
|
|
# File name(s) of a OVMF firmware file or variable store,
|
|
# to be added with -drive if=pflash.
|
|
# Found in the same places as the rootfs, with or without one of
|
|
# these suffices: qcow2, bin.
|
|
self.ovmf_bios = []
|
|
# When enrolling default Secure Boot keys, the hypervisor
|
|
# must provide the Platform Key and the first Key Exchange Key
|
|
# certificate in the Type 11 SMBIOS table.
|
|
self.ovmf_secboot_pkkek1 = ''
|
|
self.qemuboot = ''
|
|
self.qbconfload = False
|
|
self.kernel = ''
|
|
self.bios = ''
|
|
self.kernel_cmdline = ''
|
|
self.kernel_cmdline_script = ''
|
|
self.bootparams = ''
|
|
self.dtb = ''
|
|
self.fstype = ''
|
|
self.kvm_enabled = False
|
|
self.vhost_enabled = False
|
|
self.slirp_enabled = False
|
|
self.net_bridge = None
|
|
self.nfs_instance = 0
|
|
self.nfs_running = False
|
|
self.serialconsole = False
|
|
self.serialstdio = False
|
|
self.nographic = False
|
|
self.nonetwork = False
|
|
self.sdl = False
|
|
self.gtk = False
|
|
self.gl = False
|
|
self.gl_es = False
|
|
self.egl_headless = False
|
|
self.publicvnc = False
|
|
self.novga = False
|
|
self.cleantap = False
|
|
self.saved_stty = ''
|
|
self.audio_enabled = False
|
|
self.tcpserial_portnum = ''
|
|
self.taplock = ''
|
|
self.taplock_descriptor = None
|
|
self.portlocks = {}
|
|
self.bitbake_e = ''
|
|
self.snapshot = False
|
|
self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi', "wic.vhd", "wic.vhdx")
|
|
self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
|
|
'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
|
|
self.vmtypes = ('hddimg', 'iso')
|
|
self.fsinfo = {}
|
|
self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
|
|
self.cmdline_ip_slirp = "ip=dhcp"
|
|
self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
|
|
# Use different mac section for tap and slirp to avoid
|
|
# conflicts, e.g., when one is running with tap, the other is
|
|
# running with slirp.
|
|
# The last section is dynamic, which is for avoiding conflicts,
|
|
# when multiple qemus are running, e.g., when multiple tap or
|
|
# slirp qemus are running.
|
|
self.mac_tap = "52:54:00:12:34:"
|
|
self.mac_slirp = "52:54:00:12:35:"
|
|
# pid of the actual qemu process
|
|
self.qemu_environ = os.environ.copy()
|
|
self.qemuprocess = None
|
|
# avoid cleanup twice
|
|
self.cleaned = False
|
|
# Files to cleanup after run
|
|
self.cleanup_files = []
|
|
self.guest_agent = False
|
|
self.guest_agent_sockpath = '/tmp/qga.sock'
|
|
|
|
def acquire_taplock(self, error=True):
|
|
logger.debug("Acquiring lockfile %s..." % self.taplock)
|
|
try:
|
|
self.taplock_descriptor = open(self.taplock, 'w')
|
|
fcntl.flock(self.taplock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
|
except Exception as e:
|
|
msg = "Acquiring lockfile %s failed: %s" % (self.taplock, e)
|
|
if error:
|
|
logger.error(msg)
|
|
else:
|
|
logger.info(msg)
|
|
if self.taplock_descriptor:
|
|
self.taplock_descriptor.close()
|
|
self.taplock_descriptor = None
|
|
return False
|
|
return True
|
|
|
|
def release_taplock(self):
|
|
if self.taplock_descriptor:
|
|
logger.debug("Releasing lockfile for tap device '%s'" % self.tap)
|
|
# We pass the fd to the qemu process and if we unlock here, it would unlock for
|
|
# that too. Therefore don't unlock, just close
|
|
# fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
|
|
self.taplock_descriptor.close()
|
|
# Removing the file is a potential race, don't do that either
|
|
# os.remove(self.taplock)
|
|
self.taplock_descriptor = None
|
|
|
|
def check_free_port(self, host, port, lockdir):
|
|
""" Check whether the port is free or not """
|
|
import socket
|
|
from contextlib import closing
|
|
|
|
lockfile = os.path.join(lockdir, str(port) + '.lock')
|
|
if self.acquire_portlock(lockfile):
|
|
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
|
if sock.connect_ex((host, port)) == 0:
|
|
# Port is open, so not free
|
|
self.release_portlock(lockfile)
|
|
return False
|
|
else:
|
|
# Port is not open, so free
|
|
return True
|
|
else:
|
|
return False
|
|
|
|
def acquire_portlock(self, lockfile):
|
|
logger.debug("Acquiring lockfile %s..." % lockfile)
|
|
try:
|
|
portlock_descriptor = open(lockfile, 'w')
|
|
self.portlocks.update({lockfile: portlock_descriptor})
|
|
fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_EX|fcntl.LOCK_NB)
|
|
except Exception as e:
|
|
msg = "Acquiring lockfile %s failed: %s" % (lockfile, e)
|
|
logger.info(msg)
|
|
if lockfile in self.portlocks.keys() and self.portlocks[lockfile]:
|
|
self.portlocks[lockfile].close()
|
|
del self.portlocks[lockfile]
|
|
return False
|
|
return True
|
|
|
|
def release_portlock(self, lockfile=None):
|
|
if lockfile != None:
|
|
logger.debug("Releasing lockfile '%s'" % lockfile)
|
|
# We pass the fd to the qemu process and if we unlock here, it would unlock for
|
|
# that too. Therefore don't unlock, just close
|
|
# fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
|
|
self.portlocks[lockfile].close()
|
|
# Removing the file is a potential race, don't do that either
|
|
# os.remove(lockfile)
|
|
del self.portlocks[lockfile]
|
|
elif len(self.portlocks):
|
|
for lockfile, descriptor in self.portlocks.items():
|
|
logger.debug("Releasing lockfile '%s'" % lockfile)
|
|
# We pass the fd to the qemu process and if we unlock here, it would unlock for
|
|
# that too. Therefore don't unlock, just close
|
|
# fcntl.flock(descriptor, fcntl.LOCK_UN)
|
|
descriptor.close()
|
|
# Removing the file is a potential race, don't do that either
|
|
# os.remove(lockfile)
|
|
self.portlocks = {}
|
|
|
|
def get(self, key):
|
|
if key in self.d:
|
|
return self.d.get(key)
|
|
elif os.getenv(key):
|
|
return os.getenv(key)
|
|
else:
|
|
return ''
|
|
|
|
def set(self, key, value):
|
|
self.d[key] = value
|
|
|
|
def is_deploy_dir_image(self, p):
|
|
if os.path.isdir(p):
|
|
if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M):
|
|
logger.debug("Can't find required *.qemuboot.conf in %s" % p)
|
|
return False
|
|
if not any(map(lambda name: '-image-' in name, os.listdir(p))):
|
|
logger.debug("Can't find *-image-* in %s" % p)
|
|
return False
|
|
return True
|
|
else:
|
|
return False
|
|
|
|
def check_arg_fstype(self, fst):
|
|
"""Check and set FSTYPE"""
|
|
if fst not in self.fstypes + self.vmtypes + self.wictypes:
|
|
logger.warning("Maybe unsupported FSTYPE: %s" % fst)
|
|
if not self.fstype or self.fstype == fst:
|
|
if fst == 'ramfs':
|
|
fst = 'cpio.gz'
|
|
if fst in ('tar.bz2', 'tar.gz'):
|
|
fst = 'nfs'
|
|
self.fstype = fst
|
|
else:
|
|
raise RunQemuError("Conflicting: FSTYPE %s and %s" % (self.fstype, fst))
|
|
|
|
def set_machine_deploy_dir(self, machine, deploy_dir_image):
|
|
"""Set MACHINE and DEPLOY_DIR_IMAGE"""
|
|
logger.debug('MACHINE: %s' % machine)
|
|
self.set("MACHINE", machine)
|
|
logger.debug('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image)
|
|
self.set("DEPLOY_DIR_IMAGE", deploy_dir_image)
|
|
|
|
def check_arg_nfs(self, p):
|
|
if os.path.isdir(p):
|
|
self.rootfs = p
|
|
else:
|
|
m = re.match('(.*):(.*)', p)
|
|
self.nfs_server = m.group(1)
|
|
self.rootfs = m.group(2)
|
|
self.check_arg_fstype('nfs')
|
|
|
|
def check_arg_path(self, p):
|
|
"""
|
|
- Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf
|
|
- Check whether it is a kernel file
|
|
- Check whether it is an image file
|
|
- Check whether it is an NFS dir
|
|
- Check whether it is an OVMF flash file
|
|
"""
|
|
if p.endswith('.qemuboot.conf'):
|
|
self.qemuboot = p
|
|
self.qbconfload = True
|
|
elif re.search('\.bin$', p) or re.search('bzImage', p) or \
|
|
re.search('zImage', p) or re.search('vmlinux', p) or \
|
|
re.search('fitImage', p) or re.search('uImage', p):
|
|
self.kernel = p
|
|
elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
|
|
self.rootfs = p
|
|
# Check filename against self.fstypes can handle <file>.cpio.gz,
|
|
# otherwise, its type would be "gz", which is incorrect.
|
|
fst = ""
|
|
for t in self.fstypes:
|
|
if p.endswith(t):
|
|
fst = t
|
|
break
|
|
if not fst:
|
|
m = re.search('.*\.(.*)$', self.rootfs)
|
|
if m:
|
|
fst = m.group(1)
|
|
if fst:
|
|
self.check_arg_fstype(fst)
|
|
qb = re.sub('\.' + fst + "$", '.qemuboot.conf', self.rootfs)
|
|
if os.path.exists(qb):
|
|
self.qemuboot = qb
|
|
self.qbconfload = True
|
|
else:
|
|
logger.warning("%s doesn't exist, will try to remove '.rootfs' from filename" % qb)
|
|
# They to remove .rootfs (IMAGE_NAME_SUFFIX) as well
|
|
qb = re.sub('\.rootfs.qemuboot.conf$', '.qemuboot.conf', qb)
|
|
if os.path.exists(qb):
|
|
self.qemuboot = qb
|
|
self.qbconfload = True
|
|
else:
|
|
logger.warning("%s doesn't exist" % qb)
|
|
else:
|
|
raise RunQemuError("Can't find FSTYPE from: %s" % p)
|
|
|
|
elif os.path.isdir(p) or re.search(':', p) and re.search('/', p):
|
|
if self.is_deploy_dir_image(p):
|
|
logger.debug('DEPLOY_DIR_IMAGE: %s' % p)
|
|
self.set("DEPLOY_DIR_IMAGE", p)
|
|
else:
|
|
logger.debug("Assuming %s is an nfs rootfs" % p)
|
|
self.check_arg_nfs(p)
|
|
elif os.path.basename(p).startswith('ovmf'):
|
|
self.ovmf_bios.append(p)
|
|
else:
|
|
raise RunQemuError("Unknown path arg %s" % p)
|
|
|
|
def check_arg_machine(self, arg):
|
|
"""Check whether it is a machine"""
|
|
if self.get('MACHINE') == arg:
|
|
return
|
|
elif self.get('MACHINE') and self.get('MACHINE') != arg:
|
|
raise RunQemuError("Maybe conflicted MACHINE: %s vs %s" % (self.get('MACHINE'), arg))
|
|
elif re.search('/', arg):
|
|
raise RunQemuError("Unknown arg: %s" % arg)
|
|
|
|
logger.debug('Assuming MACHINE = %s' % arg)
|
|
|
|
# if we're running under testimage, or similarly as a child
|
|
# of an existing bitbake invocation, we can't invoke bitbake
|
|
# to validate the MACHINE setting and must assume it's correct...
|
|
# FIXME: testimage.bbclass exports these two variables into env,
|
|
# are there other scenarios in which we need to support being
|
|
# invoked by bitbake?
|
|
deploy = self.get('DEPLOY_DIR_IMAGE')
|
|
image_link_name = self.get('IMAGE_LINK_NAME')
|
|
bbchild = deploy and self.get('OE_TMPDIR')
|
|
if bbchild:
|
|
self.set_machine_deploy_dir(arg, deploy)
|
|
return
|
|
# also check whether we're running under a sourced toolchain
|
|
# environment file
|
|
if self.get('OECORE_NATIVE_SYSROOT'):
|
|
self.set("MACHINE", arg)
|
|
return
|
|
|
|
self.bitbake_e = self.run_bitbake_env(arg)
|
|
# bitbake -e doesn't report invalid MACHINE as an error, so
|
|
# let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
|
|
# MACHINE.
|
|
s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
|
|
if s:
|
|
deploy_dir_image = s.group(1)
|
|
else:
|
|
raise RunQemuError("bitbake -e %s" % self.bitbake_e)
|
|
if self.is_deploy_dir_image(deploy_dir_image):
|
|
self.set_machine_deploy_dir(arg, deploy_dir_image)
|
|
else:
|
|
logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
|
|
self.set("MACHINE", arg)
|
|
if not image_link_name:
|
|
s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
|
|
if s:
|
|
image_link_name = s.group(1)
|
|
self.set("IMAGE_LINK_NAME", image_link_name)
|
|
logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
|
|
|
|
def set_dri_path(self):
|
|
drivers_path = os.path.join(self.bindir_native, '../lib/dri')
|
|
if not os.path.exists(drivers_path) or not os.listdir(drivers_path):
|
|
raise RunQemuError("""
|
|
qemu has been built without opengl support and accelerated graphics support is not available.
|
|
To enable it, add:
|
|
DISTRO_FEATURES_NATIVE:append = " opengl"
|
|
DISTRO_FEATURES_NATIVESDK:append = " opengl"
|
|
to your build configuration.
|
|
""")
|
|
self.qemu_environ['LIBGL_DRIVERS_PATH'] = drivers_path
|
|
|
|
def check_args(self):
|
|
for debug in ("-d", "--debug"):
|
|
if debug in sys.argv:
|
|
logger.setLevel(logging.DEBUG)
|
|
sys.argv.remove(debug)
|
|
|
|
for quiet in ("-q", "--quiet"):
|
|
if quiet in sys.argv:
|
|
logger.setLevel(logging.ERROR)
|
|
sys.argv.remove(quiet)
|
|
|
|
if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]:
|
|
self.qemu_environ['SDL_RENDER_DRIVER'] = 'software'
|
|
self.qemu_environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
|
|
|
|
unknown_arg = ""
|
|
for arg in sys.argv[1:]:
|
|
if arg in self.fstypes + self.vmtypes + self.wictypes:
|
|
self.check_arg_fstype(arg)
|
|
elif arg == 'nographic':
|
|
self.nographic = True
|
|
elif arg == "nonetwork":
|
|
self.nonetwork = True
|
|
elif arg == 'sdl':
|
|
self.sdl = True
|
|
elif arg == 'gtk':
|
|
self.gtk = True
|
|
elif arg == 'gl':
|
|
self.gl = True
|
|
elif arg == 'gl-es':
|
|
self.gl_es = True
|
|
elif arg == 'egl-headless':
|
|
self.egl_headless = True
|
|
elif arg == 'novga':
|
|
self.novga = True
|
|
elif arg == 'serial':
|
|
self.serialconsole = True
|
|
elif arg == "serialstdio":
|
|
self.serialstdio = True
|
|
elif arg == 'audio':
|
|
logger.info("Enabling audio in qemu")
|
|
logger.info("Please install sound drivers in linux host")
|
|
self.audio_enabled = True
|
|
elif arg == 'kvm':
|
|
self.kvm_enabled = True
|
|
elif arg == 'kvm-vhost':
|
|
self.vhost_enabled = True
|
|
elif arg == 'slirp':
|
|
self.slirp_enabled = True
|
|
elif arg.startswith('bridge='):
|
|
self.net_bridge = '%s' % arg[len('bridge='):]
|
|
elif arg == 'snapshot':
|
|
self.snapshot = True
|
|
elif arg == 'publicvnc':
|
|
self.publicvnc = True
|
|
self.qemu_opt_script += ' -vnc :0'
|
|
elif arg == 'guestagent':
|
|
self.guest_agent = True
|
|
elif arg.startswith('guestagent-sockpath='):
|
|
self.guest_agent_sockpath = '%s' % arg[len('guestagent-sockpath='):]
|
|
elif arg.startswith('tcpserial='):
|
|
self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
|
|
elif arg.startswith('qemuparams='):
|
|
self.qemuparams = ' %s' % arg[len('qemuparams='):]
|
|
elif arg.startswith('bootparams='):
|
|
self.bootparams = arg[len('bootparams='):]
|
|
elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
|
|
self.check_arg_path(os.path.abspath(arg))
|
|
elif re.search(r'-image-|-image$', arg):
|
|
# Lazy rootfs
|
|
self.rootfs = arg
|
|
elif arg.startswith('ovmf'):
|
|
self.ovmf_bios.append(arg)
|
|
else:
|
|
# At last, assume it is the MACHINE
|
|
if (not unknown_arg) or unknown_arg == arg:
|
|
unknown_arg = arg
|
|
else:
|
|
raise RunQemuError("Can't handle two unknown args: %s %s\n"
|
|
"Try 'runqemu help' on how to use it" % \
|
|
(unknown_arg, arg))
|
|
# Check to make sure it is a valid machine
|
|
if unknown_arg and self.get('MACHINE') != unknown_arg:
|
|
if self.get('DEPLOY_DIR_IMAGE'):
|
|
machine = os.path.basename(self.get('DEPLOY_DIR_IMAGE'))
|
|
if unknown_arg == machine:
|
|
self.set("MACHINE", machine)
|
|
|
|
self.check_arg_machine(unknown_arg)
|
|
|
|
if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload):
|
|
self.load_bitbake_env(target=self.rootfs)
|
|
s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
|
|
if s:
|
|
self.set("DEPLOY_DIR_IMAGE", s.group(1))
|
|
|
|
if not self.get('IMAGE_LINK_NAME') and self.rootfs:
|
|
s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
|
|
if s:
|
|
image_link_name = s.group(1)
|
|
self.set("IMAGE_LINK_NAME", image_link_name)
|
|
logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
|
|
|
|
def check_kvm(self):
|
|
"""Check kvm and kvm-host"""
|
|
if not (self.kvm_enabled or self.vhost_enabled):
|
|
self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'), self.get('QB_SMP'))
|
|
return
|
|
|
|
if not self.get('QB_CPU_KVM'):
|
|
raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm")
|
|
|
|
self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'), self.get('QB_SMP'))
|
|
yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
|
|
yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
|
|
dev_kvm = '/dev/kvm'
|
|
dev_vhost = '/dev/vhost-net'
|
|
if self.qemu_system.endswith(('i386', 'x86_64')):
|
|
with open('/proc/cpuinfo', 'r') as f:
|
|
kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
|
|
if not kvm_cap:
|
|
logger.error("You are trying to enable KVM on a cpu without VT support.")
|
|
logger.error("Remove kvm from the command-line, or refer:")
|
|
raise RunQemuError(yocto_kvm_wiki)
|
|
|
|
if not os.path.exists(dev_kvm):
|
|
logger.error("Missing KVM device. Have you inserted kvm modules?")
|
|
logger.error("For further help see:")
|
|
raise RunQemuError(yocto_kvm_wiki)
|
|
|
|
if os.access(dev_kvm, os.W_OK|os.R_OK):
|
|
self.qemu_opt_script += ' -enable-kvm'
|
|
else:
|
|
logger.error("You have no read or write permission on /dev/kvm.")
|
|
logger.error("Please change the ownership of this file as described at:")
|
|
raise RunQemuError(yocto_kvm_wiki)
|
|
|
|
if self.vhost_enabled:
|
|
if not os.path.exists(dev_vhost):
|
|
logger.error("Missing virtio net device. Have you inserted vhost-net module?")
|
|
logger.error("For further help see:")
|
|
raise RunQemuError(yocto_paravirt_kvm_wiki)
|
|
|
|
if not os.access(dev_vhost, os.W_OK|os.R_OK):
|
|
logger.error("You have no read or write permission on /dev/vhost-net.")
|
|
logger.error("Please change the ownership of this file as described at:")
|
|
raise RunQemuError(yocto_paravirt_kvm_wiki)
|
|
|
|
def check_fstype(self):
|
|
"""Check and setup FSTYPE"""
|
|
if not self.fstype:
|
|
fstype = self.get('QB_DEFAULT_FSTYPE')
|
|
if fstype:
|
|
self.fstype = fstype
|
|
else:
|
|
raise RunQemuError("FSTYPE is NULL!")
|
|
|
|
# parse QB_FSINFO into dict, e.g. { 'wic': ['no-kernel-in-fs', 'a-flag'], 'ext4': ['another-flag']}
|
|
wic_fs = False
|
|
qb_fsinfo = self.get('QB_FSINFO')
|
|
if qb_fsinfo:
|
|
qb_fsinfo = qb_fsinfo.split()
|
|
for fsinfo in qb_fsinfo:
|
|
try:
|
|
fstype, fsflag = fsinfo.split(':')
|
|
|
|
if fstype == 'wic':
|
|
if fsflag == 'no-kernel-in-fs':
|
|
wic_fs = True
|
|
elif fsflag == 'kernel-in-fs':
|
|
wic_fs = False
|
|
else:
|
|
logger.warn('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
|
|
continue
|
|
else:
|
|
logger.warn('QB_FSINFO is not supported for image type "%s"', fstype)
|
|
continue
|
|
|
|
if fstype in self.fsinfo:
|
|
self.fsinfo[fstype].append(fsflag)
|
|
else:
|
|
self.fsinfo[fstype] = [fsflag]
|
|
except Exception:
|
|
logger.error('Invalid parameter "%s" in QB_FSINFO', fsinfo)
|
|
|
|
# treat wic images as vmimages (with kernel) or as fsimages (rootfs only)
|
|
if wic_fs:
|
|
self.fstypes = self.fstypes + self.wictypes
|
|
else:
|
|
self.vmtypes = self.vmtypes + self.wictypes
|
|
|
|
def check_rootfs(self):
|
|
"""Check and set rootfs"""
|
|
|
|
if self.fstype == "none":
|
|
return
|
|
|
|
if self.get('ROOTFS'):
|
|
if not self.rootfs:
|
|
self.rootfs = self.get('ROOTFS')
|
|
elif self.get('ROOTFS') != self.rootfs:
|
|
raise RunQemuError("Maybe conflicted ROOTFS: %s vs %s" % (self.get('ROOTFS'), self.rootfs))
|
|
|
|
if self.fstype == 'nfs':
|
|
return
|
|
|
|
if self.rootfs and not os.path.exists(self.rootfs):
|
|
# Lazy rootfs
|
|
self.rootfs = "%s/%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
|
|
self.get('IMAGE_LINK_NAME'),
|
|
self.fstype)
|
|
elif not self.rootfs:
|
|
glob_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
|
|
glob_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
|
|
globs = (glob_name, glob_link)
|
|
self.rootfs = get_first_file(globs)
|
|
if not self.rootfs:
|
|
raise RunQemuError("Failed to find rootfs: %s or %s" % globs)
|
|
|
|
if not os.path.exists(self.rootfs):
|
|
raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
|
|
|
|
def setup_pkkek1(self):
|
|
"""
|
|
Extract from PEM certificate the Platform Key and first Key
|
|
Exchange Key certificate string. The hypervisor needs to provide
|
|
it in the Type 11 SMBIOS table
|
|
"""
|
|
pemcert = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), 'OvmfPkKek1.pem')
|
|
try:
|
|
with open(pemcert, 'r') as pemfile:
|
|
key = pemfile.read().replace('\n', ''). \
|
|
replace('-----BEGIN CERTIFICATE-----', ''). \
|
|
replace('-----END CERTIFICATE-----', '')
|
|
self.ovmf_secboot_pkkek1 = key
|
|
|
|
except FileNotFoundError:
|
|
raise RunQemuError("Can't open PEM certificate %s " % pemcert)
|
|
|
|
def check_ovmf(self):
|
|
"""Check and set full path for OVMF firmware and variable file(s)."""
|
|
|
|
for index, ovmf in enumerate(self.ovmf_bios):
|
|
if os.path.exists(ovmf):
|
|
continue
|
|
for suffix in ('qcow2', 'bin'):
|
|
path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
|
|
if os.path.exists(path):
|
|
self.ovmf_bios[index] = path
|
|
if ovmf.endswith('secboot'):
|
|
self.setup_pkkek1()
|
|
break
|
|
else:
|
|
raise RunQemuError("Can't find OVMF firmware: %s" % ovmf)
|
|
|
|
def check_kernel(self):
|
|
"""Check and set kernel"""
|
|
# The vm image doesn't need a kernel
|
|
if self.fstype in self.vmtypes:
|
|
return
|
|
|
|
# See if the user supplied a KERNEL option
|
|
if self.get('KERNEL'):
|
|
self.kernel = self.get('KERNEL')
|
|
|
|
# QB_DEFAULT_KERNEL is always a full file path
|
|
kernel_name = os.path.basename(self.get('QB_DEFAULT_KERNEL'))
|
|
|
|
# The user didn't want a kernel to be loaded
|
|
if kernel_name == "none" and not self.kernel:
|
|
return
|
|
|
|
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
|
|
if not self.kernel:
|
|
kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
|
|
kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
|
|
kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
|
|
globs = (kernel_match_name, kernel_match_link, kernel_startswith)
|
|
self.kernel = get_first_file(globs)
|
|
if not self.kernel:
|
|
raise RunQemuError('KERNEL not found: %s, %s or %s' % globs)
|
|
|
|
if not os.path.exists(self.kernel):
|
|
raise RunQemuError("KERNEL %s not found" % self.kernel)
|
|
|
|
def check_dtb(self):
|
|
"""Check and set dtb"""
|
|
# Did the user specify a device tree?
|
|
if self.get('DEVICE_TREE'):
|
|
self.dtb = self.get('DEVICE_TREE')
|
|
if not os.path.exists(self.dtb):
|
|
raise RunQemuError('Specified DTB not found: %s' % self.dtb)
|
|
return
|
|
|
|
dtb = self.get('QB_DTB')
|
|
if dtb:
|
|
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
|
|
glob_match = "%s/%s" % (deploy_dir_image, dtb)
|
|
glob_startswith = "%s/%s*" % (deploy_dir_image, dtb)
|
|
glob_wild = "%s/*.dtb" % deploy_dir_image
|
|
globs = (glob_match, glob_startswith, glob_wild)
|
|
self.dtb = get_first_file(globs)
|
|
if not os.path.exists(self.dtb):
|
|
raise RunQemuError('DTB not found: %s, %s or %s' % globs)
|
|
|
|
def check_bios(self):
|
|
"""Check and set bios"""
|
|
|
|
# See if the user supplied a BIOS option
|
|
if self.get('BIOS'):
|
|
self.bios = self.get('BIOS')
|
|
|
|
# QB_DEFAULT_BIOS is always a full file path
|
|
bios_name = os.path.basename(self.get('QB_DEFAULT_BIOS'))
|
|
|
|
# The user didn't want a bios to be loaded
|
|
if (bios_name == "" or bios_name == "none") and not self.bios:
|
|
return
|
|
|
|
if not self.bios:
|
|
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
|
|
self.bios = "%s/%s" % (deploy_dir_image, bios_name)
|
|
|
|
if not self.bios:
|
|
raise RunQemuError('BIOS not found: %s' % bios_match_name)
|
|
|
|
if not os.path.exists(self.bios):
|
|
raise RunQemuError("BIOS %s not found" % self.bios)
|
|
|
|
|
|
def check_mem(self):
|
|
"""
|
|
Both qemu and kernel needs memory settings, so check QB_MEM and set it
|
|
for both.
|
|
"""
|
|
s = re.search('-m +([0-9]+)', self.qemuparams)
|
|
if s:
|
|
self.set('QB_MEM', '-m %s' % s.group(1))
|
|
elif not self.get('QB_MEM'):
|
|
logger.info('QB_MEM is not set, use 256M by default')
|
|
self.set('QB_MEM', '-m 256')
|
|
|
|
# Check and remove M or m suffix
|
|
qb_mem = self.get('QB_MEM')
|
|
if qb_mem.endswith('M') or qb_mem.endswith('m'):
|
|
qb_mem = qb_mem[:-1]
|
|
|
|
# Add -m prefix it not present
|
|
if not qb_mem.startswith('-m'):
|
|
qb_mem = '-m %s' % qb_mem
|
|
|
|
self.set('QB_MEM', qb_mem)
|
|
|
|
mach = self.get('MACHINE')
|
|
if not mach.startswith(('qemumips', 'qemux86', 'qemuloongarch64')):
|
|
self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
|
|
|
|
self.qemu_opt_script += ' %s' % self.get('QB_MEM')
|
|
|
|
def check_tcpserial(self):
|
|
if self.tcpserial_portnum:
|
|
ports = self.tcpserial_portnum.split(':')
|
|
port = ports[0]
|
|
if self.get('QB_TCPSERIAL_OPT'):
|
|
self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
|
|
else:
|
|
self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
|
|
|
|
if len(ports) > 1:
|
|
for port in ports[1:]:
|
|
self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
|
|
|
|
def check_and_set(self):
|
|
"""Check configs sanity and set when needed"""
|
|
self.validate_paths()
|
|
if not self.slirp_enabled and not self.net_bridge:
|
|
check_tun()
|
|
# Check audio
|
|
if self.audio_enabled:
|
|
if not self.get('QB_AUDIO_DRV'):
|
|
raise RunQemuError("QB_AUDIO_DRV is NULL, this board doesn't support audio")
|
|
if not self.get('QB_AUDIO_OPT'):
|
|
logger.warning('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
|
|
else:
|
|
self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT')
|
|
os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV'))
|
|
else:
|
|
os.putenv('QEMU_AUDIO_DRV', 'none')
|
|
|
|
self.check_qemu_system()
|
|
self.check_kvm()
|
|
self.check_fstype()
|
|
self.check_rootfs()
|
|
self.check_ovmf()
|
|
self.check_kernel()
|
|
self.check_dtb()
|
|
self.check_bios()
|
|
self.check_mem()
|
|
self.check_tcpserial()
|
|
|
|
def read_qemuboot(self):
|
|
if not self.qemuboot:
|
|
if self.get('DEPLOY_DIR_IMAGE'):
|
|
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
|
|
else:
|
|
logger.warning("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
|
|
return
|
|
|
|
if self.rootfs and not os.path.exists(self.rootfs):
|
|
# Lazy rootfs
|
|
machine = self.get('MACHINE')
|
|
if not machine:
|
|
machine = os.path.basename(deploy_dir_image)
|
|
if not self.get('IMAGE_LINK_NAME'):
|
|
raise RunQemuError("IMAGE_LINK_NAME wasn't set to find corresponding .qemuboot.conf file")
|
|
self.qemuboot = "%s/%s.qemuboot.conf" % (deploy_dir_image,
|
|
self.get('IMAGE_LINK_NAME'))
|
|
else:
|
|
cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
|
|
logger.debug('Running %s...' % cmd)
|
|
try:
|
|
qbs = subprocess.check_output(cmd, shell=True).decode('utf-8')
|
|
except subprocess.CalledProcessError as err:
|
|
raise RunQemuError(err)
|
|
if qbs:
|
|
for qb in qbs.split():
|
|
# Don't use initramfs when other choices unless fstype is ramfs
|
|
if '-initramfs-' in os.path.basename(qb) and self.fstype != 'cpio.gz':
|
|
continue
|
|
self.qemuboot = qb
|
|
break
|
|
if not self.qemuboot:
|
|
# Use the first one when no choice
|
|
self.qemuboot = qbs.split()[0]
|
|
self.qbconfload = True
|
|
|
|
if not self.qemuboot:
|
|
# If we haven't found a .qemuboot.conf at this point it probably
|
|
# doesn't exist, continue without
|
|
return
|
|
|
|
if not os.path.exists(self.qemuboot):
|
|
raise RunQemuError("Failed to find %s (wrong image name or BSP does not support running under qemu?)." % self.qemuboot)
|
|
|
|
logger.debug('CONFFILE: %s' % self.qemuboot)
|
|
|
|
cf = configparser.ConfigParser()
|
|
cf.read(self.qemuboot)
|
|
for k, v in cf.items('config_bsp'):
|
|
k_upper = k.upper()
|
|
if v.startswith("../"):
|
|
v = os.path.abspath(os.path.dirname(self.qemuboot) + "/" + v)
|
|
elif v == ".":
|
|
v = os.path.dirname(self.qemuboot)
|
|
self.set(k_upper, v)
|
|
|
|
def validate_paths(self):
|
|
"""Ensure all relevant path variables are set"""
|
|
# When we're started with a *.qemuboot.conf arg assume that image
|
|
# artefacts are relative to that file, rather than in whatever
|
|
# directory DEPLOY_DIR_IMAGE in the conf file points to.
|
|
if self.qbconfload:
|
|
imgdir = os.path.realpath(os.path.dirname(self.qemuboot))
|
|
if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')):
|
|
logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))
|
|
self.set('DEPLOY_DIR_IMAGE', imgdir)
|
|
|
|
# If the STAGING_*_NATIVE directories from the config file don't exist
|
|
# and we're in a sourced OE build directory try to extract the paths
|
|
# from `bitbake -e`
|
|
havenative = os.path.exists(self.get('STAGING_DIR_NATIVE')) and \
|
|
os.path.exists(self.get('STAGING_BINDIR_NATIVE'))
|
|
|
|
if not havenative:
|
|
if not self.bitbake_e:
|
|
self.load_bitbake_env()
|
|
|
|
if self.bitbake_e:
|
|
native_vars = ['STAGING_DIR_NATIVE']
|
|
for nv in native_vars:
|
|
s = re.search('^%s="(.*)"' % nv, self.bitbake_e, re.M)
|
|
if s and s.group(1) != self.get(nv):
|
|
logger.info('Overriding conf file setting of %s to %s from Bitbake environment' % (nv, s.group(1)))
|
|
self.set(nv, s.group(1))
|
|
else:
|
|
# when we're invoked from a running bitbake instance we won't
|
|
# be able to call `bitbake -e`, then try:
|
|
# - get OE_TMPDIR from environment and guess paths based on it
|
|
# - get OECORE_NATIVE_SYSROOT from environment (for sdk)
|
|
tmpdir = self.get('OE_TMPDIR')
|
|
oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT')
|
|
if tmpdir:
|
|
logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)
|
|
hostos, _, _, _, machine = os.uname()
|
|
buildsys = '%s-%s' % (machine, hostos.lower())
|
|
staging_dir_native = '%s/sysroots/%s' % (tmpdir, buildsys)
|
|
self.set('STAGING_DIR_NATIVE', staging_dir_native)
|
|
elif oecore_native_sysroot:
|
|
logger.info('Setting STAGING_DIR_NATIVE to OECORE_NATIVE_SYSROOT (%s)' % oecore_native_sysroot)
|
|
self.set('STAGING_DIR_NATIVE', oecore_native_sysroot)
|
|
if self.get('STAGING_DIR_NATIVE'):
|
|
# we have to assume that STAGING_BINDIR_NATIVE is at usr/bin
|
|
staging_bindir_native = '%s/usr/bin' % self.get('STAGING_DIR_NATIVE')
|
|
logger.info('Setting STAGING_BINDIR_NATIVE to %s' % staging_bindir_native)
|
|
self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
|
|
|
|
def print_config(self):
|
|
logoutput = ['Continuing with the following parameters:']
|
|
if not self.fstype in self.vmtypes:
|
|
logoutput.append('KERNEL: [%s]' % self.kernel)
|
|
if self.bios:
|
|
logoutput.append('BIOS: [%s]' % self.bios)
|
|
if self.dtb:
|
|
logoutput.append('DTB: [%s]' % self.dtb)
|
|
logoutput.append('MACHINE: [%s]' % self.get('MACHINE'))
|
|
try:
|
|
fstype_flags = ' (' + ', '.join(self.fsinfo[self.fstype]) + ')'
|
|
except KeyError:
|
|
fstype_flags = ''
|
|
logoutput.append('FSTYPE: [%s%s]' % (self.fstype, fstype_flags))
|
|
if self.fstype == 'nfs':
|
|
logoutput.append('NFS_DIR: [%s]' % self.rootfs)
|
|
else:
|
|
logoutput.append('ROOTFS: [%s]' % self.rootfs)
|
|
if self.ovmf_bios:
|
|
logoutput.append('OVMF: %s' % self.ovmf_bios)
|
|
if (self.ovmf_secboot_pkkek1):
|
|
logoutput.append('SECBOOT PKKEK1: [%s...]' % self.ovmf_secboot_pkkek1[0:100])
|
|
logoutput.append('CONFFILE: [%s]' % self.qemuboot)
|
|
logoutput.append('')
|
|
logger.info('\n'.join(logoutput))
|
|
|
|
def setup_nfs(self):
|
|
if not self.nfs_server:
|
|
if self.slirp_enabled:
|
|
self.nfs_server = '10.0.2.2'
|
|
else:
|
|
self.nfs_server = '192.168.7.@GATEWAY@'
|
|
|
|
# Figure out a new nfs_instance to allow multiple qemus running.
|
|
ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
|
|
pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
|
|
all_instances = re.findall(pattern, ps, re.M)
|
|
if all_instances:
|
|
all_instances.sort(key=int)
|
|
self.nfs_instance = int(all_instances.pop()) + 1
|
|
|
|
nfsd_port = 3049 + 2 * self.nfs_instance
|
|
mountd_port = 3048 + 2 * self.nfs_instance
|
|
|
|
# Export vars for runqemu-export-rootfs
|
|
export_dict = {
|
|
'NFS_INSTANCE': self.nfs_instance,
|
|
'NFSD_PORT': nfsd_port,
|
|
'MOUNTD_PORT': mountd_port,
|
|
}
|
|
for k, v in export_dict.items():
|
|
# Use '%s' since they are integers
|
|
os.putenv(k, '%s' % v)
|
|
|
|
qb_nfsrootfs_extra_opt = self.get("QB_NFSROOTFS_EXTRA_OPT")
|
|
if qb_nfsrootfs_extra_opt and not qb_nfsrootfs_extra_opt.startswith(","):
|
|
qb_nfsrootfs_extra_opt = "," + qb_nfsrootfs_extra_opt
|
|
|
|
self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s%s" % (nfsd_port, mountd_port, qb_nfsrootfs_extra_opt)
|
|
|
|
# Extract .tar.bz2 or .tar.bz if no nfs dir
|
|
if not (self.rootfs and os.path.isdir(self.rootfs)):
|
|
src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'))
|
|
dest = "%s-nfsroot" % src_prefix
|
|
if os.path.exists('%s.pseudo_state' % dest):
|
|
logger.info('Use %s as NFS_DIR' % dest)
|
|
self.rootfs = dest
|
|
else:
|
|
src = ""
|
|
src1 = '%s.tar.bz2' % src_prefix
|
|
src2 = '%s.tar.gz' % src_prefix
|
|
if os.path.exists(src1):
|
|
src = src1
|
|
elif os.path.exists(src2):
|
|
src = src2
|
|
if not src:
|
|
raise RunQemuError("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2))
|
|
logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest))
|
|
cmd = ('runqemu-extract-sdk', src, dest)
|
|
logger.info('Running %s...' % str(cmd))
|
|
if subprocess.call(cmd) != 0:
|
|
raise RunQemuError('Failed to run %s' % str(cmd))
|
|
self.rootfs = dest
|
|
self.cleanup_files.append(self.rootfs)
|
|
self.cleanup_files.append('%s.pseudo_state' % self.rootfs)
|
|
|
|
# Start the userspace NFS server
|
|
cmd = ('runqemu-export-rootfs', 'start', self.rootfs)
|
|
logger.info('Running %s...' % str(cmd))
|
|
if subprocess.call(cmd) != 0:
|
|
raise RunQemuError('Failed to run %s' % str(cmd))
|
|
|
|
self.nfs_running = True
|
|
|
|
def setup_cmd(self):
|
|
cmd = self.get('QB_SETUP_CMD')
|
|
if cmd != '':
|
|
logger.info('Running setup command %s' % str(cmd))
|
|
if subprocess.call(cmd, shell=True) != 0:
|
|
raise RunQemuError('Failed to run %s' % str(cmd))
|
|
|
|
def setup_net_bridge(self):
|
|
self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
|
|
self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
|
|
|
|
def setup_slirp(self):
|
|
"""Setup user networking"""
|
|
|
|
if self.fstype == 'nfs':
|
|
self.setup_nfs()
|
|
netconf = " " + self.cmdline_ip_slirp
|
|
logger.info("Network configuration:%s", netconf)
|
|
self.kernel_cmdline_script += netconf
|
|
# Port mapping
|
|
hostfwd = ",hostfwd=tcp:127.0.0.1:2222-:22,hostfwd=tcp:127.0.0.1:2323-:23"
|
|
qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
|
|
qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
|
|
# Figure out the port
|
|
ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
|
|
ports = [int(i) for i in ports]
|
|
mac = 2
|
|
|
|
lockdir = "/tmp/qemu-port-locks"
|
|
if not os.path.exists(lockdir):
|
|
# There might be a race issue when multi runqemu processess are
|
|
# running at the same time.
|
|
try:
|
|
os.mkdir(lockdir)
|
|
os.chmod(lockdir, 0o777)
|
|
except FileExistsError:
|
|
pass
|
|
|
|
# Find a free port to avoid conflicts
|
|
for p in ports[:]:
|
|
p_new = p
|
|
while not self.check_free_port('localhost', p_new, lockdir):
|
|
p_new += 1
|
|
mac += 1
|
|
while p_new in ports:
|
|
p_new += 1
|
|
mac += 1
|
|
if p != p_new:
|
|
ports.append(p_new)
|
|
qb_slirp_opt = re.sub(':%s-' % p, ':%s-' % p_new, qb_slirp_opt)
|
|
logger.info("Port forward changed: %s -> %s" % (p, p_new))
|
|
mac = "%s%02x" % (self.mac_slirp, mac)
|
|
self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qb_slirp_opt))
|
|
# Print out port foward
|
|
hostfwd = re.findall('(hostfwd=[^,]*)', qb_slirp_opt)
|
|
if hostfwd:
|
|
logger.info('Port forward: %s' % ' '.join(hostfwd))
|
|
|
|
def setup_tap(self):
|
|
"""Setup tap"""
|
|
|
|
# This file is created when runqemu-gen-tapdevs creates a bank of tap
|
|
# devices, indicating that the user should not bring up new ones using
|
|
# sudo.
|
|
nosudo_flag = '/etc/runqemu-nosudo'
|
|
self.qemuifup = shutil.which('runqemu-ifup')
|
|
self.qemuifdown = shutil.which('runqemu-ifdown')
|
|
ip = shutil.which('ip')
|
|
lockdir = "/tmp/qemu-tap-locks"
|
|
|
|
if not (self.qemuifup and self.qemuifdown and ip):
|
|
logger.error("runqemu-ifup: %s" % self.qemuifup)
|
|
logger.error("runqemu-ifdown: %s" % self.qemuifdown)
|
|
logger.error("ip: %s" % ip)
|
|
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
|
|
|
|
if not os.path.exists(lockdir):
|
|
# There might be a race issue when multi runqemu processess are
|
|
# running at the same time.
|
|
try:
|
|
os.mkdir(lockdir)
|
|
os.chmod(lockdir, 0o777)
|
|
except FileExistsError:
|
|
pass
|
|
|
|
cmd = (ip, 'link')
|
|
logger.debug('Running %s...' % str(cmd))
|
|
ip_link = subprocess.check_output(cmd).decode('utf-8')
|
|
# Matches line like: 6: tap0: <foo>
|
|
possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
|
|
tap = ""
|
|
for p in possibles:
|
|
lockfile = os.path.join(lockdir, p)
|
|
if os.path.exists('%s.skip' % lockfile):
|
|
logger.info('Found %s.skip, skipping %s' % (lockfile, p))
|
|
continue
|
|
self.taplock = lockfile + '.lock'
|
|
if self.acquire_taplock(error=False):
|
|
tap = p
|
|
logger.info("Using preconfigured tap device %s" % tap)
|
|
logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
|
|
break
|
|
|
|
if not tap:
|
|
if os.path.exists(nosudo_flag):
|
|
logger.error("Error: There are no available tap devices to use for networking,")
|
|
logger.error("and I see %s exists, so I am not going to try creating" % nosudo_flag)
|
|
raise RunQemuError("a new one with sudo.")
|
|
|
|
gid = os.getgid()
|
|
uid = os.getuid()
|
|
logger.info("Setting up tap interface under sudo")
|
|
cmd = ('sudo', self.qemuifup, str(uid), str(gid), self.bindir_native)
|
|
try:
|
|
tap = subprocess.check_output(cmd).decode('utf-8').strip()
|
|
except subprocess.CalledProcessError as e:
|
|
logger.error('Setting up tap device failed:\n%s\nRun runqemu-gen-tapdevs to manually create one.' % str(e))
|
|
sys.exit(1)
|
|
lockfile = os.path.join(lockdir, tap)
|
|
self.taplock = lockfile + '.lock'
|
|
self.acquire_taplock()
|
|
self.cleantap = True
|
|
logger.debug('Created tap: %s' % tap)
|
|
|
|
if not tap:
|
|
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
|
|
sys.exit(1)
|
|
self.tap = tap
|
|
tapnum = int(tap[3:])
|
|
gateway = tapnum * 2 + 1
|
|
client = gateway + 1
|
|
if self.fstype == 'nfs':
|
|
self.setup_nfs()
|
|
netconf = " " + self.cmdline_ip_tap
|
|
netconf = netconf.replace('@CLIENT@', str(client))
|
|
netconf = netconf.replace('@GATEWAY@', str(gateway))
|
|
self.nfs_server = self.nfs_server.replace('@GATEWAY@', str(gateway))
|
|
logger.info("Network configuration:%s", netconf)
|
|
self.kernel_cmdline_script += netconf
|
|
mac = "%s%02x" % (self.mac_tap, client)
|
|
qb_tap_opt = self.get('QB_TAP_OPT')
|
|
if qb_tap_opt:
|
|
qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap)
|
|
else:
|
|
qemu_tap_opt = "-netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (self.tap)
|
|
|
|
if self.vhost_enabled:
|
|
qemu_tap_opt += ',vhost=on'
|
|
|
|
self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
|
|
|
|
def setup_network(self):
|
|
if self.nonetwork or self.get('QB_NET') == 'none':
|
|
self.set('NETWORK_CMD', '-nic none')
|
|
return
|
|
if sys.stdin.isatty():
|
|
self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
|
|
self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
|
|
if self.net_bridge:
|
|
self.setup_net_bridge()
|
|
elif self.slirp_enabled:
|
|
self.cmdline_ip_slirp = self.get('QB_CMDLINE_IP_SLIRP') or self.cmdline_ip_slirp
|
|
self.setup_slirp()
|
|
else:
|
|
self.cmdline_ip_tap = self.get('QB_CMDLINE_IP_TAP') or self.cmdline_ip_tap
|
|
self.setup_tap()
|
|
|
|
def setup_rootfs(self):
|
|
if self.get('QB_ROOTFS') == 'none':
|
|
return
|
|
if 'wic.' in self.fstype:
|
|
self.fstype = self.fstype[4:]
|
|
rootfs_format = self.fstype if self.fstype in ('vmdk', 'vhd', 'vhdx', 'qcow2', 'vdi') else 'raw'
|
|
|
|
tmpfsdir = os.environ.get("RUNQEMU_TMPFS_DIR", None)
|
|
if self.snapshot and tmpfsdir:
|
|
newrootfs = os.path.join(tmpfsdir, os.path.basename(self.rootfs)) + "." + str(os.getpid())
|
|
logger.info("Copying rootfs to %s" % newrootfs)
|
|
copy_start = time.time()
|
|
shutil.copyfile(self.rootfs, newrootfs)
|
|
logger.info("Copy done in %s seconds" % (time.time() - copy_start))
|
|
self.rootfs = newrootfs
|
|
# Don't need a second copy now!
|
|
self.snapshot = False
|
|
self.cleanup_files.append(newrootfs)
|
|
|
|
qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
|
|
if qb_rootfs_opt:
|
|
self.rootfs_options = qb_rootfs_opt.replace('@ROOTFS@', self.rootfs)
|
|
else:
|
|
self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
|
|
|
|
qb_rootfs_extra_opt = self.get("QB_ROOTFS_EXTRA_OPT")
|
|
if qb_rootfs_extra_opt and not qb_rootfs_extra_opt.startswith(","):
|
|
qb_rootfs_extra_opt = "," + qb_rootfs_extra_opt
|
|
|
|
if self.fstype in ('cpio.gz', 'cpio'):
|
|
self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
|
|
self.rootfs_options = '-initrd %s' % self.rootfs
|
|
else:
|
|
vm_drive = ''
|
|
if self.fstype in self.vmtypes:
|
|
if self.fstype == 'iso':
|
|
vm_drive = '-drive file=%s,if=virtio,media=cdrom' % self.rootfs
|
|
elif self.get('QB_DRIVE_TYPE'):
|
|
drive_type = self.get('QB_DRIVE_TYPE')
|
|
if drive_type.startswith("/dev/sd"):
|
|
logger.info('Using scsi drive')
|
|
vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd%s' \
|
|
% (self.rootfs, rootfs_format, qb_rootfs_extra_opt)
|
|
elif drive_type.startswith("/dev/hd"):
|
|
logger.info('Using ide drive')
|
|
vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format)
|
|
elif drive_type.startswith("/dev/vdb"):
|
|
logger.info('Using block virtio drive');
|
|
vm_drive = '-drive id=disk0,file=%s,if=none,format=%s -device virtio-blk-device,drive=disk0%s' \
|
|
% (self.rootfs, rootfs_format,qb_rootfs_extra_opt)
|
|
else:
|
|
# virtio might have been selected explicitly (just use it), or
|
|
# is used as fallback (then warn about that).
|
|
if not drive_type.startswith("/dev/vd"):
|
|
logger.warning("Unknown QB_DRIVE_TYPE: %s" % drive_type)
|
|
logger.warning("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE")
|
|
logger.warning('Trying to use virtio block drive')
|
|
vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
|
|
|
|
# All branches above set vm_drive.
|
|
self.rootfs_options = vm_drive
|
|
if not self.fstype in self.vmtypes:
|
|
self.rootfs_options += ' -no-reboot'
|
|
|
|
# By default, ' rw' is appended to QB_KERNEL_ROOT unless either ro or rw is explicitly passed.
|
|
qb_kernel_root = self.get('QB_KERNEL_ROOT')
|
|
qb_kernel_root_l = qb_kernel_root.split()
|
|
if not ('ro' in qb_kernel_root_l or 'rw' in qb_kernel_root_l):
|
|
qb_kernel_root += ' rw'
|
|
self.kernel_cmdline = 'root=%s' % qb_kernel_root
|
|
|
|
if self.fstype == 'nfs':
|
|
self.rootfs_options = ''
|
|
k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts)
|
|
self.kernel_cmdline = 'root=%s rw' % k_root
|
|
|
|
if self.fstype == 'none':
|
|
self.rootfs_options = ''
|
|
|
|
self.set('ROOTFS_OPTIONS', self.rootfs_options)
|
|
|
|
def guess_qb_system(self):
|
|
"""attempt to determine the appropriate qemu-system binary"""
|
|
mach = self.get('MACHINE')
|
|
if not mach:
|
|
search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemuloongarch64|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
|
|
if self.rootfs:
|
|
match = re.match(search, self.rootfs)
|
|
if match:
|
|
mach = match.group(1)
|
|
elif self.kernel:
|
|
match = re.match(search, self.kernel)
|
|
if match:
|
|
mach = match.group(1)
|
|
|
|
if not mach:
|
|
return None
|
|
|
|
if mach == 'qemuarm':
|
|
qbsys = 'arm'
|
|
elif mach == 'qemuarm64':
|
|
qbsys = 'aarch64'
|
|
elif mach == 'qemux86':
|
|
qbsys = 'i386'
|
|
elif mach == 'qemux86-64':
|
|
qbsys = 'x86_64'
|
|
elif mach == 'qemuppc':
|
|
qbsys = 'ppc'
|
|
elif mach == 'qemuloongarch64':
|
|
qbsys = 'loongarch64'
|
|
elif mach == 'qemumips':
|
|
qbsys = 'mips'
|
|
elif mach == 'qemumips64':
|
|
qbsys = 'mips64'
|
|
elif mach == 'qemumipsel':
|
|
qbsys = 'mipsel'
|
|
elif mach == 'qemumips64el':
|
|
qbsys = 'mips64el'
|
|
elif mach == 'qemuriscv64':
|
|
qbsys = 'riscv64'
|
|
elif mach == 'qemuriscv32':
|
|
qbsys = 'riscv32'
|
|
else:
|
|
logger.error("Unable to determine QEMU PC System emulator for %s machine." % mach)
|
|
logger.error("As %s is not among valid QEMU machines such as," % mach)
|
|
logger.error("qemux86-64, qemux86, qemuarm64, qemuarm, qemumips64, qemumips64el, qemumipsel, qemumips, qemuppc")
|
|
raise RunQemuError("Set qb_system_name with suitable QEMU PC System emulator in .*qemuboot.conf.")
|
|
|
|
return 'qemu-system-%s' % qbsys
|
|
|
|
def check_qemu_system(self):
|
|
qemu_system = self.get('QB_SYSTEM_NAME')
|
|
if not qemu_system:
|
|
qemu_system = self.guess_qb_system()
|
|
if not qemu_system:
|
|
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
|
|
self.qemu_system = qemu_system
|
|
|
|
def check_render_nodes(self):
|
|
render_hint = """If /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create one suitable for mesa llvmpipe software renderer."""
|
|
try:
|
|
content = os.listdir("/dev/dri")
|
|
if len([i for i in content if i.startswith('render')]) == 0:
|
|
raise RunQemuError("No render nodes found in /dev/dri: %s. %s" %(content, render_hint))
|
|
except FileNotFoundError:
|
|
raise RunQemuError("/dev/dri directory does not exist; no render nodes available on this machine. %s" %(render_hint))
|
|
|
|
def setup_guest_agent(self):
|
|
if self.guest_agent == True:
|
|
self.qemu_opt += ' -chardev socket,path=' + self.guest_agent_sockpath + ',server,nowait,id=qga0 '
|
|
self.qemu_opt += ' -device virtio-serial '
|
|
self.qemu_opt += ' -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 '
|
|
|
|
def setup_vga(self):
|
|
if self.nographic == True:
|
|
if self.sdl == True:
|
|
raise RunQemuError('Option nographic makes no sense alongside the sdl option.')
|
|
if self.gtk == True:
|
|
raise RunQemuError('Option nographic makes no sense alongside the gtk option.')
|
|
self.qemu_opt += ' -nographic'
|
|
|
|
if self.novga == True:
|
|
self.qemu_opt += ' -vga none'
|
|
return
|
|
|
|
if (self.gl_es == True or self.gl == True) and (self.sdl == False and self.gtk == False):
|
|
raise RunQemuError('Option gl/gl-es needs gtk or sdl option.')
|
|
|
|
# If we have no display option, we autodetect based upon what qemu supports. We
|
|
# need our font setup and show-cusor below so we need to see what qemu --help says
|
|
# is supported so we can pass our correct config in.
|
|
if not self.nographic and not self.sdl and not self.gtk and not self.publicvnc and not self.egl_headless == True:
|
|
output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True, env=self.qemu_environ)
|
|
if "-display gtk" in output:
|
|
self.gtk = True
|
|
elif "-display sdl" in output:
|
|
self.sdl = True
|
|
else:
|
|
self.qemu_opt += ' -display none'
|
|
|
|
if self.sdl == True or self.gtk == True or self.egl_headless == True:
|
|
|
|
if self.qemu_system.endswith(('i386', 'x86_64')):
|
|
if self.gl or self.gl_es or self.egl_headless:
|
|
self.qemu_opt += ' -device virtio-vga-gl '
|
|
else:
|
|
self.qemu_opt += ' -device virtio-vga '
|
|
|
|
self.qemu_opt += ' -display '
|
|
if self.egl_headless == True:
|
|
self.check_render_nodes()
|
|
self.set_dri_path()
|
|
self.qemu_opt += 'egl-headless,'
|
|
else:
|
|
if self.sdl == True:
|
|
self.qemu_opt += 'sdl,'
|
|
elif self.gtk == True:
|
|
self.qemu_environ['FONTCONFIG_PATH'] = '/etc/fonts'
|
|
self.qemu_opt += 'gtk,'
|
|
|
|
if self.gl == True:
|
|
self.set_dri_path()
|
|
self.qemu_opt += 'gl=on,'
|
|
elif self.gl_es == True:
|
|
self.set_dri_path()
|
|
self.qemu_opt += 'gl=es,'
|
|
self.qemu_opt += 'show-cursor=on'
|
|
|
|
self.qemu_opt += ' %s' %self.get('QB_GRAPHICS')
|
|
|
|
def setup_serial(self):
|
|
# Setup correct kernel command line for serial
|
|
if self.get('SERIAL_CONSOLES') and (self.serialstdio == True or self.serialconsole == True or self.nographic == True or self.tcpserial_portnum):
|
|
for entry in self.get('SERIAL_CONSOLES').split(' '):
|
|
self.kernel_cmdline_script += ' console=%s' %entry.split(';')[1]
|
|
|
|
if self.serialstdio == True or self.nographic == True:
|
|
self.qemu_opt += " -serial mon:stdio"
|
|
else:
|
|
self.qemu_opt += " -serial mon:vc"
|
|
if self.serialconsole:
|
|
if sys.stdin.isatty():
|
|
subprocess.check_call(("stty", "intr", "^]"))
|
|
logger.info("Interrupt character is '^]'")
|
|
|
|
self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
|
|
|
|
# We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES).
|
|
# If no serial or serialtcp options were specified, only ttyS0 is created
|
|
# and sysvinit shows an error trying to enable ttyS1:
|
|
# INIT: Id "S1" respawning too fast: disabled for 5 minutes
|
|
serial_num = len(re.findall("-serial", self.qemu_opt))
|
|
if serial_num < 2:
|
|
self.qemu_opt += " -serial null"
|
|
|
|
def find_qemu(self):
|
|
qemu_bin = os.path.join(self.bindir_native, self.qemu_system)
|
|
|
|
# It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
|
|
# find QEMU in sysroot, it needs to use host's qemu.
|
|
if not os.path.exists(qemu_bin):
|
|
logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
|
|
for path in (os.environ['PATH'] or '').split(':'):
|
|
qemu_bin_tmp = os.path.join(path, self.qemu_system)
|
|
logger.info("Trying: %s" % qemu_bin_tmp)
|
|
if os.path.exists(qemu_bin_tmp):
|
|
qemu_bin = qemu_bin_tmp
|
|
if not os.path.isabs(qemu_bin):
|
|
qemu_bin = os.path.abspath(qemu_bin)
|
|
logger.info("Using host's QEMU: %s" % qemu_bin)
|
|
break
|
|
|
|
if not os.access(qemu_bin, os.X_OK):
|
|
raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
|
|
self.qemu_bin = qemu_bin
|
|
|
|
def setup_final(self):
|
|
|
|
self.find_qemu()
|
|
|
|
self.qemu_opt = "%s %s %s %s %s" % (self.qemu_bin, self.get('NETWORK_CMD'), self.get('QB_RNG'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND').replace('@DEPLOY_DIR_IMAGE@', self.get('DEPLOY_DIR_IMAGE')))
|
|
|
|
for ovmf in self.ovmf_bios:
|
|
format = ovmf.rsplit('.', 1)[-1]
|
|
if format == "bin":
|
|
format = "raw"
|
|
self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
|
|
|
|
self.qemu_opt += ' ' + self.qemu_opt_script
|
|
|
|
if self.ovmf_secboot_pkkek1:
|
|
# Provide the Platform Key and first Key Exchange Key certificate as an
|
|
# OEM string in the SMBIOS Type 11 table. Prepend the certificate string
|
|
# with "application prefix" of the EnrollDefaultKeys.efi application
|
|
self.qemu_opt += ' -smbios type=11,value=4e32566d-8e9e-4f52-81d3-5bb9715f9727:' \
|
|
+ self.ovmf_secboot_pkkek1
|
|
|
|
# Append qemuparams to override previous settings
|
|
if self.qemuparams:
|
|
self.qemu_opt += ' ' + self.qemuparams
|
|
|
|
if self.snapshot:
|
|
self.qemu_opt += " -snapshot"
|
|
|
|
self.setup_guest_agent()
|
|
self.setup_serial()
|
|
self.setup_vga()
|
|
|
|
def start_qemu(self):
|
|
import shlex
|
|
if self.kernel:
|
|
kernel_opts = "-kernel %s" % (self.kernel)
|
|
if self.get('QB_KERNEL_CMDLINE') == "none":
|
|
if self.bootparams:
|
|
kernel_opts += " -append '%s'" % (self.bootparams)
|
|
else:
|
|
kernel_opts += " -append '%s %s %s %s'" % (self.kernel_cmdline,
|
|
self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'),
|
|
self.bootparams)
|
|
if self.dtb:
|
|
kernel_opts += " -dtb %s" % self.dtb
|
|
else:
|
|
kernel_opts = ""
|
|
|
|
if self.bios:
|
|
self.qemu_opt += " -bios %s" % self.bios
|
|
|
|
cmd = "%s %s" % (self.qemu_opt, kernel_opts)
|
|
cmds = shlex.split(cmd)
|
|
logger.info('Running %s\n' % cmd)
|
|
with open('/proc/uptime', 'r') as f:
|
|
uptime_seconds = f.readline().split()[0]
|
|
logger.info('Host uptime: %s\n' % uptime_seconds)
|
|
pass_fds = []
|
|
if self.taplock_descriptor:
|
|
pass_fds = [self.taplock_descriptor.fileno()]
|
|
if len(self.portlocks):
|
|
for descriptor in self.portlocks.values():
|
|
pass_fds.append(descriptor.fileno())
|
|
process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds, env=self.qemu_environ)
|
|
self.qemuprocess = process
|
|
retcode = process.wait()
|
|
if retcode:
|
|
if retcode == -signal.SIGTERM:
|
|
logger.info("Qemu terminated by SIGTERM")
|
|
else:
|
|
logger.error("Failed to run qemu: %s", process.stderr.read().decode())
|
|
|
|
def cleanup_cmd(self):
|
|
cmd = self.get('QB_CLEANUP_CMD')
|
|
if cmd != '':
|
|
logger.info('Running cleanup command %s' % str(cmd))
|
|
if subprocess.call(cmd, shell=True) != 0:
|
|
raise RunQemuError('Failed to run %s' % str(cmd))
|
|
|
|
def cleanup(self):
|
|
if self.cleaned:
|
|
return
|
|
|
|
# avoid dealing with SIGTERM when cleanup function is running
|
|
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
|
|
|
logger.info("Cleaning up")
|
|
|
|
if self.qemuprocess:
|
|
try:
|
|
# give it some time to shut down, ignore return values and output
|
|
self.qemuprocess.send_signal(signal.SIGTERM)
|
|
self.qemuprocess.communicate(timeout=5)
|
|
except subprocess.TimeoutExpired:
|
|
self.qemuprocess.kill()
|
|
|
|
with open('/proc/uptime', 'r') as f:
|
|
uptime_seconds = f.readline().split()[0]
|
|
logger.info('Host uptime: %s\n' % uptime_seconds)
|
|
if self.cleantap:
|
|
cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
|
|
logger.debug('Running %s' % str(cmd))
|
|
subprocess.check_call(cmd)
|
|
self.release_taplock()
|
|
self.release_portlock()
|
|
|
|
if self.nfs_running:
|
|
logger.info("Shutting down the userspace NFS server...")
|
|
cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
|
|
logger.debug('Running %s' % str(cmd))
|
|
subprocess.check_call(cmd)
|
|
|
|
if self.saved_stty:
|
|
subprocess.check_call(("stty", self.saved_stty))
|
|
|
|
if self.cleanup_files:
|
|
for ent in self.cleanup_files:
|
|
logger.info('Removing %s' % ent)
|
|
if os.path.isfile(ent):
|
|
os.remove(ent)
|
|
else:
|
|
shutil.rmtree(ent)
|
|
|
|
# Deliberately ignore the return code of 'tput smam'.
|
|
subprocess.call(["tput", "smam"])
|
|
|
|
self.cleaned = True
|
|
|
|
def run_bitbake_env(self, mach=None, target=''):
|
|
bitbake = shutil.which('bitbake')
|
|
if not bitbake:
|
|
return
|
|
|
|
if not mach:
|
|
mach = self.get('MACHINE')
|
|
|
|
multiconfig = self.get('MULTICONFIG')
|
|
if multiconfig:
|
|
multiconfig = "mc:%s" % multiconfig
|
|
|
|
if mach:
|
|
cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
|
|
else:
|
|
cmd = 'bitbake -e %s %s' % (multiconfig, target)
|
|
|
|
logger.info('Running %s...' % cmd)
|
|
try:
|
|
return subprocess.check_output(cmd, shell=True).decode('utf-8')
|
|
except subprocess.CalledProcessError as err:
|
|
logger.warning("Couldn't run '%s' to gather environment information, maybe the target wasn't an image name, will retry with virtual/kernel as a target:\n%s" % (cmd, err.output.decode('utf-8')))
|
|
# need something with IMAGE_NAME_SUFFIX/IMAGE_LINK_NAME defined (kernel also inherits image-artifact-names.bbclass)
|
|
target = 'virtual/kernel'
|
|
if mach:
|
|
cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
|
|
else:
|
|
cmd = 'bitbake -e %s %s' % (multiconfig, target)
|
|
try:
|
|
return subprocess.check_output(cmd, shell=True).decode('utf-8')
|
|
except subprocess.CalledProcessError as err:
|
|
logger.warning("Couldn't run '%s' to gather environment information, giving up with 'bitbake -e':\n%s" % (cmd, err.output.decode('utf-8')))
|
|
return ''
|
|
|
|
|
|
def load_bitbake_env(self, mach=None, target=None):
|
|
if self.bitbake_e:
|
|
return
|
|
|
|
self.bitbake_e = self.run_bitbake_env(mach=mach, target=target)
|
|
|
|
def validate_combos(self):
|
|
if (self.fstype in self.vmtypes) and self.kernel:
|
|
raise RunQemuError("%s doesn't need kernel %s!" % (self.fstype, self.kernel))
|
|
|
|
@property
|
|
def bindir_native(self):
|
|
result = self.get('STAGING_BINDIR_NATIVE')
|
|
if result and os.path.exists(result):
|
|
return result
|
|
|
|
cmd = ['bitbake', '-e']
|
|
multiconfig = self.get('MULTICONFIG')
|
|
if multiconfig:
|
|
cmd.append('mc:%s:qemu-helper-native' % multiconfig)
|
|
else:
|
|
cmd.append('qemu-helper-native')
|
|
|
|
logger.info('Running %s...' % str(cmd))
|
|
out = subprocess.check_output(cmd).decode('utf-8')
|
|
|
|
match = re.search('^STAGING_BINDIR_NATIVE="(.*)"', out, re.M)
|
|
if match:
|
|
result = match.group(1)
|
|
if os.path.exists(result):
|
|
self.set('STAGING_BINDIR_NATIVE', result)
|
|
return result
|
|
raise RunQemuError("Native sysroot directory %s doesn't exist" % result)
|
|
else:
|
|
raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % str(cmd))
|
|
|
|
|
|
def main():
|
|
if "help" in sys.argv or '-h' in sys.argv or '--help' in sys.argv:
|
|
print_usage()
|
|
return 0
|
|
try:
|
|
config = BaseConfig()
|
|
|
|
renice = os.path.expanduser("~/bin/runqemu-renice")
|
|
if os.path.exists(renice):
|
|
logger.info('Using %s to renice' % renice)
|
|
subprocess.check_call([renice, str(os.getpid())])
|
|
|
|
def sigterm_handler(signum, frame):
|
|
logger.info("Received signal: %s" % (signum))
|
|
config.cleanup()
|
|
signal.signal(signal.SIGTERM, sigterm_handler)
|
|
|
|
config.check_args()
|
|
config.read_qemuboot()
|
|
config.check_and_set()
|
|
# Check whether the combos is valid or not
|
|
config.validate_combos()
|
|
config.print_config()
|
|
config.setup_network()
|
|
config.setup_rootfs()
|
|
config.setup_final()
|
|
config.setup_cmd()
|
|
config.start_qemu()
|
|
except RunQemuError as err:
|
|
logger.error(err)
|
|
return 1
|
|
except Exception as err:
|
|
import traceback
|
|
traceback.print_exc()
|
|
return 1
|
|
finally:
|
|
config.cleanup_cmd()
|
|
config.cleanup()
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|