mirror of
git://git.yoctoproject.org/poky.git
synced 2025-07-19 12:59:02 +02:00

Now that deferred inherits are extension specific, we can pass this list to an event, which our metadata can use to set class overrides earlier (as an example). There are limitations to this, the list of classes is unexpanded and recursive classes are not visible. There isn't much that can be done about this, the ones we are interested in would usually be visible at the top level (such as class extensions). (Bitbake rev: 205d461c05fc7b4a7c81039af3bc3fd71cbb982c) Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
887 lines
26 KiB
Python
887 lines
26 KiB
Python
"""
|
|
BitBake 'Event' implementation
|
|
|
|
Classes and functions for manipulating 'events' in the
|
|
BitBake build tools.
|
|
"""
|
|
|
|
# Copyright (C) 2003, 2004 Chris Larson
|
|
#
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
|
#
|
|
|
|
import ast
|
|
import atexit
|
|
import collections
|
|
import logging
|
|
import pickle
|
|
import sys
|
|
import threading
|
|
import traceback
|
|
|
|
import bb.utils
|
|
|
|
# This is the pid for which we should generate the event. This is set when
|
|
# the runqueue forks off.
|
|
worker_pid = 0
|
|
worker_fire = None
|
|
|
|
logger = logging.getLogger('BitBake.Event')
|
|
|
|
class Event(object):
|
|
"""Base class for events"""
|
|
|
|
def __init__(self):
|
|
self.pid = worker_pid
|
|
|
|
|
|
class HeartbeatEvent(Event):
|
|
"""Triggered at regular time intervals of 10 seconds. Other events can fire much more often
|
|
(runQueueTaskStarted when there are many short tasks) or not at all for long periods
|
|
of time (again runQueueTaskStarted, when there is just one long-running task), so this
|
|
event is more suitable for doing some task-independent work occasionally."""
|
|
def __init__(self, time):
|
|
Event.__init__(self)
|
|
self.time = time
|
|
|
|
Registered = 10
|
|
AlreadyRegistered = 14
|
|
|
|
def get_class_handlers():
|
|
return _handlers
|
|
|
|
def set_class_handlers(h):
|
|
global _handlers
|
|
_handlers = h
|
|
|
|
def clean_class_handlers():
|
|
return collections.OrderedDict()
|
|
|
|
# Internal
|
|
_handlers = clean_class_handlers()
|
|
_ui_handlers = {}
|
|
_ui_logfilters = {}
|
|
_ui_handler_seq = 0
|
|
_event_handler_map = {}
|
|
_catchall_handlers = {}
|
|
_eventfilter = None
|
|
_uiready = False
|
|
_thread_lock = threading.Lock()
|
|
_heartbeat_enabled = False
|
|
_should_exit = threading.Event()
|
|
|
|
def enable_threadlock():
|
|
# Always needed now
|
|
return
|
|
|
|
def disable_threadlock():
|
|
# Always needed now
|
|
return
|
|
|
|
def enable_heartbeat():
|
|
global _heartbeat_enabled
|
|
_heartbeat_enabled = True
|
|
|
|
def disable_heartbeat():
|
|
global _heartbeat_enabled
|
|
_heartbeat_enabled = False
|
|
|
|
#
|
|
# In long running code, this function should be called periodically
|
|
# to check if we should exit due to an interuption (.e.g Ctrl+C from the UI)
|
|
#
|
|
def check_for_interrupts(d):
|
|
global _should_exit
|
|
if _should_exit.is_set():
|
|
bb.warn("Exiting due to interrupt.")
|
|
raise bb.BBHandledException()
|
|
|
|
def execute_handler(name, handler, event, d):
|
|
event.data = d
|
|
try:
|
|
ret = handler(event, d)
|
|
except (bb.parse.SkipRecipe, bb.BBHandledException):
|
|
raise
|
|
except Exception:
|
|
etype, value, tb = sys.exc_info()
|
|
logger.error("Execution of event handler '%s' failed" % name,
|
|
exc_info=(etype, value, tb.tb_next))
|
|
raise
|
|
except SystemExit as exc:
|
|
if exc.code != 0:
|
|
logger.error("Execution of event handler '%s' failed" % name)
|
|
raise
|
|
finally:
|
|
del event.data
|
|
|
|
|
|
def fire_class_handlers(event, d):
|
|
if isinstance(event, logging.LogRecord):
|
|
return
|
|
|
|
eid = str(event.__class__)[8:-2]
|
|
evt_hmap = _event_handler_map.get(eid, {})
|
|
for name, handler in list(_handlers.items()):
|
|
if name in _catchall_handlers or name in evt_hmap:
|
|
if _eventfilter:
|
|
if not _eventfilter(name, handler, event, d):
|
|
continue
|
|
if d is not None and not name in (d.getVar("__BBHANDLERS_MC") or set()):
|
|
continue
|
|
execute_handler(name, handler, event, d)
|
|
|
|
ui_queue = []
|
|
@atexit.register
|
|
def print_ui_queue():
|
|
global ui_queue
|
|
"""If we're exiting before a UI has been spawned, display any queued
|
|
LogRecords to the console."""
|
|
logger = logging.getLogger("BitBake")
|
|
if not _uiready:
|
|
from bb.msg import BBLogFormatter
|
|
# Flush any existing buffered content
|
|
try:
|
|
sys.stdout.flush()
|
|
except:
|
|
pass
|
|
try:
|
|
sys.stderr.flush()
|
|
except:
|
|
pass
|
|
stdout = logging.StreamHandler(sys.stdout)
|
|
stderr = logging.StreamHandler(sys.stderr)
|
|
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
|
stdout.setFormatter(formatter)
|
|
stderr.setFormatter(formatter)
|
|
|
|
# First check to see if we have any proper messages
|
|
msgprint = False
|
|
msgerrs = False
|
|
|
|
# Should we print to stderr?
|
|
for event in ui_queue[:]:
|
|
if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING:
|
|
msgerrs = True
|
|
break
|
|
|
|
if msgerrs:
|
|
logger.addHandler(stderr)
|
|
else:
|
|
logger.addHandler(stdout)
|
|
|
|
for event in ui_queue[:]:
|
|
if isinstance(event, logging.LogRecord):
|
|
if event.levelno > logging.DEBUG:
|
|
logger.handle(event)
|
|
msgprint = True
|
|
|
|
# Nope, so just print all of the messages we have (including debug messages)
|
|
if not msgprint:
|
|
for event in ui_queue[:]:
|
|
if isinstance(event, logging.LogRecord):
|
|
logger.handle(event)
|
|
if msgerrs:
|
|
logger.removeHandler(stderr)
|
|
else:
|
|
logger.removeHandler(stdout)
|
|
ui_queue = []
|
|
|
|
def fire_ui_handlers(event, d):
|
|
global _thread_lock
|
|
|
|
if not _uiready:
|
|
# No UI handlers registered yet, queue up the messages
|
|
ui_queue.append(event)
|
|
return
|
|
|
|
with bb.utils.lock_timeout_nocheck(_thread_lock) as lock:
|
|
if not lock:
|
|
# If we can't get the lock, we may be recursively called, queue and return
|
|
ui_queue.append(event)
|
|
return
|
|
|
|
errors = []
|
|
for h in _ui_handlers:
|
|
#print "Sending event %s" % event
|
|
try:
|
|
if not _ui_logfilters[h].filter(event):
|
|
continue
|
|
# We use pickle here since it better handles object instances
|
|
# which xmlrpc's marshaller does not. Events *must* be serializable
|
|
# by pickle.
|
|
if hasattr(_ui_handlers[h].event, "sendpickle"):
|
|
_ui_handlers[h].event.sendpickle((pickle.dumps(event)))
|
|
else:
|
|
_ui_handlers[h].event.send(event)
|
|
except:
|
|
errors.append(h)
|
|
for h in errors:
|
|
del _ui_handlers[h]
|
|
|
|
while ui_queue:
|
|
fire_ui_handlers(ui_queue.pop(), d)
|
|
|
|
def fire(event, d):
|
|
"""Fire off an Event"""
|
|
|
|
# We can fire class handlers in the worker process context and this is
|
|
# desired so they get the task based datastore.
|
|
# UI handlers need to be fired in the server context so we defer this. They
|
|
# don't have a datastore so the datastore context isn't a problem.
|
|
|
|
fire_class_handlers(event, d)
|
|
if worker_fire:
|
|
worker_fire(event, d)
|
|
else:
|
|
# If messages have been queued up, clear the queue
|
|
global _uiready, ui_queue
|
|
if _uiready and ui_queue:
|
|
for queue_event in ui_queue:
|
|
fire_ui_handlers(queue_event, d)
|
|
ui_queue = []
|
|
fire_ui_handlers(event, d)
|
|
|
|
def fire_from_worker(event, d):
|
|
fire_ui_handlers(event, d)
|
|
|
|
noop = lambda _: None
|
|
def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
|
"""Register an Event handler"""
|
|
|
|
if data is not None and data.getVar("BB_CURRENT_MC"):
|
|
mc = data.getVar("BB_CURRENT_MC")
|
|
name = '%s%s' % (mc.replace('-', '_'), name)
|
|
|
|
# already registered
|
|
if name in _handlers:
|
|
if data is not None:
|
|
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
|
bbhands_mc.add(name)
|
|
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
|
return AlreadyRegistered
|
|
|
|
if handler is not None:
|
|
# handle string containing python code
|
|
if isinstance(handler, str):
|
|
tmp = "def %s(e, d):\n%s" % (name, handler)
|
|
# Inject empty lines to make code match lineno in filename
|
|
if lineno is not None:
|
|
tmp = "\n" * (lineno-1) + tmp
|
|
try:
|
|
code = bb.methodpool.compile_cache(tmp)
|
|
if not code:
|
|
if filename is None:
|
|
filename = "%s(e, d)" % name
|
|
code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
|
|
code = compile(code, filename, "exec")
|
|
bb.methodpool.compile_cache_add(tmp, code)
|
|
except SyntaxError:
|
|
logger.error("Unable to register event handler '%s':\n%s", name,
|
|
''.join(traceback.format_exc(limit=0)))
|
|
_handlers[name] = noop
|
|
return
|
|
env = {}
|
|
bb.utils.better_exec(code, env)
|
|
func = bb.utils.better_eval(name, env)
|
|
_handlers[name] = func
|
|
else:
|
|
_handlers[name] = handler
|
|
|
|
if not mask or '*' in mask:
|
|
_catchall_handlers[name] = True
|
|
else:
|
|
for m in mask:
|
|
if _event_handler_map.get(m, None) is None:
|
|
_event_handler_map[m] = {}
|
|
_event_handler_map[m][name] = True
|
|
|
|
if data is not None:
|
|
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
|
bbhands_mc.add(name)
|
|
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
|
|
|
return Registered
|
|
|
|
def remove(name, handler, data=None):
|
|
"""Remove an Event handler"""
|
|
if data is not None:
|
|
if data.getVar("BB_CURRENT_MC"):
|
|
mc = data.getVar("BB_CURRENT_MC")
|
|
name = '%s%s' % (mc.replace('-', '_'), name)
|
|
|
|
_handlers.pop(name)
|
|
if name in _catchall_handlers:
|
|
_catchall_handlers.pop(name)
|
|
for event in _event_handler_map.keys():
|
|
if name in _event_handler_map[event]:
|
|
_event_handler_map[event].pop(name)
|
|
|
|
if data is not None:
|
|
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
|
if name in bbhands_mc:
|
|
bbhands_mc.remove(name)
|
|
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
|
|
|
def get_handlers():
|
|
return _handlers
|
|
|
|
def set_handlers(handlers):
|
|
global _handlers
|
|
_handlers = handlers
|
|
|
|
def set_eventfilter(func):
|
|
global _eventfilter
|
|
_eventfilter = func
|
|
|
|
def register_UIHhandler(handler, mainui=False):
|
|
with bb.utils.lock_timeout(_thread_lock):
|
|
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
|
_ui_handlers[_ui_handler_seq] = handler
|
|
level, debug_domains = bb.msg.constructLogOptions()
|
|
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
|
if mainui:
|
|
global _uiready
|
|
_uiready = _ui_handler_seq
|
|
return _ui_handler_seq
|
|
|
|
def unregister_UIHhandler(handlerNum, mainui=False):
|
|
if mainui:
|
|
global _uiready
|
|
_uiready = False
|
|
with bb.utils.lock_timeout(_thread_lock):
|
|
if handlerNum in _ui_handlers:
|
|
del _ui_handlers[handlerNum]
|
|
return
|
|
|
|
def get_uihandler():
|
|
if _uiready is False:
|
|
return None
|
|
return _uiready
|
|
|
|
# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC
|
|
class UIEventFilter(object):
|
|
def __init__(self, level, debug_domains):
|
|
self.update(None, level, debug_domains)
|
|
|
|
def update(self, eventmask, level, debug_domains):
|
|
self.eventmask = eventmask
|
|
self.stdlevel = level
|
|
self.debug_domains = debug_domains
|
|
|
|
def filter(self, event):
|
|
if isinstance(event, logging.LogRecord):
|
|
if event.levelno >= self.stdlevel:
|
|
return True
|
|
if event.name in self.debug_domains and event.levelno >= self.debug_domains[event.name]:
|
|
return True
|
|
return False
|
|
eid = str(event.__class__)[8:-2]
|
|
if self.eventmask and eid not in self.eventmask:
|
|
return False
|
|
return True
|
|
|
|
def set_UIHmask(handlerNum, level, debug_domains, mask):
|
|
if not handlerNum in _ui_handlers:
|
|
return False
|
|
if '*' in mask:
|
|
_ui_logfilters[handlerNum].update(None, level, debug_domains)
|
|
else:
|
|
_ui_logfilters[handlerNum].update(mask, level, debug_domains)
|
|
return True
|
|
|
|
def getName(e):
|
|
"""Returns the name of a class or class instance"""
|
|
if getattr(e, "__name__", None) is None:
|
|
return e.__class__.__name__
|
|
else:
|
|
return e.__name__
|
|
|
|
class OperationStarted(Event):
|
|
"""An operation has begun"""
|
|
def __init__(self, msg = "Operation Started"):
|
|
Event.__init__(self)
|
|
self.msg = msg
|
|
|
|
class OperationCompleted(Event):
|
|
"""An operation has completed"""
|
|
def __init__(self, total, msg = "Operation Completed"):
|
|
Event.__init__(self)
|
|
self.total = total
|
|
self.msg = msg
|
|
|
|
class OperationProgress(Event):
|
|
"""An operation is in progress"""
|
|
def __init__(self, current, total, msg = "Operation in Progress"):
|
|
Event.__init__(self)
|
|
self.current = current
|
|
self.total = total
|
|
self.msg = msg + ": %s/%s" % (current, total);
|
|
|
|
class ConfigParsed(Event):
|
|
"""Configuration Parsing Complete"""
|
|
|
|
class MultiConfigParsed(Event):
|
|
"""Multi-Config Parsing Complete"""
|
|
def __init__(self, mcdata):
|
|
self.mcdata = mcdata
|
|
Event.__init__(self)
|
|
|
|
class RecipeEvent(Event):
|
|
def __init__(self, fn):
|
|
self.fn = fn
|
|
Event.__init__(self)
|
|
|
|
class RecipePreDeferredInherits(RecipeEvent):
|
|
"""
|
|
Called before deferred inherits are processed so code can snoop on class extensions for example
|
|
Limitations: It won't see inherits of inherited classes and the data is unexpanded
|
|
"""
|
|
def __init__(self, fn, inherits):
|
|
self.fn = fn
|
|
self.inherits = inherits
|
|
Event.__init__(self)
|
|
|
|
class RecipePreFinalise(RecipeEvent):
|
|
""" Recipe Parsing Complete but not yet finalised"""
|
|
|
|
class RecipePostKeyExpansion(RecipeEvent):
|
|
""" Recipe Parsing Complete but not yet finalised"""
|
|
|
|
|
|
class RecipeTaskPreProcess(RecipeEvent):
|
|
"""
|
|
Recipe Tasks about to be finalised
|
|
The list of tasks should be final at this point and handlers
|
|
are only able to change interdependencies
|
|
"""
|
|
def __init__(self, fn, tasklist):
|
|
self.fn = fn
|
|
self.tasklist = tasklist
|
|
Event.__init__(self)
|
|
|
|
class RecipeParsed(RecipeEvent):
|
|
""" Recipe Parsing Complete """
|
|
|
|
class BuildBase(Event):
|
|
"""Base class for bitbake build events"""
|
|
|
|
def __init__(self, n, p, failures = 0):
|
|
self._name = n
|
|
self._pkgs = p
|
|
Event.__init__(self)
|
|
self._failures = failures
|
|
|
|
def getPkgs(self):
|
|
return self._pkgs
|
|
|
|
def setPkgs(self, pkgs):
|
|
self._pkgs = pkgs
|
|
|
|
def getName(self):
|
|
return self._name
|
|
|
|
def setName(self, name):
|
|
self._name = name
|
|
|
|
def getFailures(self):
|
|
"""
|
|
Return the number of failed packages
|
|
"""
|
|
return self._failures
|
|
|
|
pkgs = property(getPkgs, setPkgs, None, "pkgs property")
|
|
name = property(getName, setName, None, "name property")
|
|
|
|
class BuildInit(BuildBase):
|
|
"""buildFile or buildTargets was invoked"""
|
|
def __init__(self, p=[]):
|
|
name = None
|
|
BuildBase.__init__(self, name, p)
|
|
|
|
class BuildStarted(BuildBase, OperationStarted):
|
|
"""Event when builds start"""
|
|
def __init__(self, n, p, failures = 0):
|
|
OperationStarted.__init__(self, "Building Started")
|
|
BuildBase.__init__(self, n, p, failures)
|
|
|
|
class BuildCompleted(BuildBase, OperationCompleted):
|
|
"""Event when builds have completed"""
|
|
def __init__(self, total, n, p, failures=0, interrupted=0):
|
|
if not failures:
|
|
OperationCompleted.__init__(self, total, "Building Succeeded")
|
|
else:
|
|
OperationCompleted.__init__(self, total, "Building Failed")
|
|
self._interrupted = interrupted
|
|
BuildBase.__init__(self, n, p, failures)
|
|
|
|
class DiskFull(Event):
|
|
"""Disk full case build halted"""
|
|
def __init__(self, dev, type, freespace, mountpoint):
|
|
Event.__init__(self)
|
|
self._dev = dev
|
|
self._type = type
|
|
self._free = freespace
|
|
self._mountpoint = mountpoint
|
|
|
|
class DiskUsageSample:
|
|
def __init__(self, available_bytes, free_bytes, total_bytes):
|
|
# Number of bytes available to non-root processes.
|
|
self.available_bytes = available_bytes
|
|
# Number of bytes available to root processes.
|
|
self.free_bytes = free_bytes
|
|
# Total capacity of the volume.
|
|
self.total_bytes = total_bytes
|
|
|
|
class MonitorDiskEvent(Event):
|
|
"""If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked.
|
|
Provides information about devices that are getting monitored."""
|
|
def __init__(self, disk_usage):
|
|
Event.__init__(self)
|
|
# hash of device root path -> DiskUsageSample
|
|
self.disk_usage = disk_usage
|
|
|
|
class NoProvider(Event):
|
|
"""No Provider for an Event"""
|
|
|
|
def __init__(self, item, runtime=False, dependees=None, reasons=None, close_matches=None):
|
|
Event.__init__(self)
|
|
self._item = item
|
|
self._runtime = runtime
|
|
self._dependees = dependees
|
|
self._reasons = reasons
|
|
self._close_matches = close_matches
|
|
|
|
def getItem(self):
|
|
return self._item
|
|
|
|
def isRuntime(self):
|
|
return self._runtime
|
|
|
|
def __str__(self):
|
|
msg = ''
|
|
if self._runtime:
|
|
r = "R"
|
|
else:
|
|
r = ""
|
|
|
|
extra = ''
|
|
if not self._reasons:
|
|
if self._close_matches:
|
|
extra = ". Close matches:\n %s" % '\n '.join(sorted(set(self._close_matches)))
|
|
|
|
if self._dependees:
|
|
msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra)
|
|
else:
|
|
msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra)
|
|
if self._reasons:
|
|
for reason in self._reasons:
|
|
msg += '\n' + reason
|
|
return msg
|
|
|
|
|
|
class MultipleProviders(Event):
|
|
"""Multiple Providers"""
|
|
|
|
def __init__(self, item, candidates, runtime = False):
|
|
Event.__init__(self)
|
|
self._item = item
|
|
self._candidates = candidates
|
|
self._is_runtime = runtime
|
|
|
|
def isRuntime(self):
|
|
"""
|
|
Is this a runtime issue?
|
|
"""
|
|
return self._is_runtime
|
|
|
|
def getItem(self):
|
|
"""
|
|
The name for the to be build item
|
|
"""
|
|
return self._item
|
|
|
|
def getCandidates(self):
|
|
"""
|
|
Get the possible Candidates for a PROVIDER.
|
|
"""
|
|
return self._candidates
|
|
|
|
def __str__(self):
|
|
msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "",
|
|
self._item,
|
|
", ".join(self._candidates))
|
|
rtime = ""
|
|
if self._is_runtime:
|
|
rtime = "R"
|
|
msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item)
|
|
return msg
|
|
|
|
class ParseStarted(OperationStarted):
|
|
"""Recipe parsing for the runqueue has begun"""
|
|
def __init__(self, total):
|
|
OperationStarted.__init__(self, "Recipe parsing Started")
|
|
self.total = total
|
|
|
|
class ParseCompleted(OperationCompleted):
|
|
"""Recipe parsing for the runqueue has completed"""
|
|
def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total):
|
|
OperationCompleted.__init__(self, total, "Recipe parsing Completed")
|
|
self.cached = cached
|
|
self.parsed = parsed
|
|
self.skipped = skipped
|
|
self.virtuals = virtuals
|
|
self.masked = masked
|
|
self.errors = errors
|
|
self.sofar = cached + parsed
|
|
|
|
class ParseProgress(OperationProgress):
|
|
"""Recipe parsing progress"""
|
|
def __init__(self, current, total):
|
|
OperationProgress.__init__(self, current, total, "Recipe parsing")
|
|
|
|
|
|
class CacheLoadStarted(OperationStarted):
|
|
"""Loading of the dependency cache has begun"""
|
|
def __init__(self, total):
|
|
OperationStarted.__init__(self, "Loading cache Started")
|
|
self.total = total
|
|
|
|
class CacheLoadProgress(OperationProgress):
|
|
"""Cache loading progress"""
|
|
def __init__(self, current, total):
|
|
OperationProgress.__init__(self, current, total, "Loading cache")
|
|
|
|
class CacheLoadCompleted(OperationCompleted):
|
|
"""Cache loading is complete"""
|
|
def __init__(self, total, num_entries):
|
|
OperationCompleted.__init__(self, total, "Loading cache Completed")
|
|
self.num_entries = num_entries
|
|
|
|
class TreeDataPreparationStarted(OperationStarted):
|
|
"""Tree data preparation started"""
|
|
def __init__(self):
|
|
OperationStarted.__init__(self, "Preparing tree data Started")
|
|
|
|
class TreeDataPreparationProgress(OperationProgress):
|
|
"""Tree data preparation is in progress"""
|
|
def __init__(self, current, total):
|
|
OperationProgress.__init__(self, current, total, "Preparing tree data")
|
|
|
|
class TreeDataPreparationCompleted(OperationCompleted):
|
|
"""Tree data preparation completed"""
|
|
def __init__(self, total):
|
|
OperationCompleted.__init__(self, total, "Preparing tree data Completed")
|
|
|
|
class DepTreeGenerated(Event):
|
|
"""
|
|
Event when a dependency tree has been generated
|
|
"""
|
|
|
|
def __init__(self, depgraph):
|
|
Event.__init__(self)
|
|
self._depgraph = depgraph
|
|
|
|
class TargetsTreeGenerated(Event):
|
|
"""
|
|
Event when a set of buildable targets has been generated
|
|
"""
|
|
def __init__(self, model):
|
|
Event.__init__(self)
|
|
self._model = model
|
|
|
|
class ReachableStamps(Event):
|
|
"""
|
|
An event listing all stamps reachable after parsing
|
|
which the metadata may use to clean up stale data
|
|
"""
|
|
|
|
def __init__(self, stamps):
|
|
Event.__init__(self)
|
|
self.stamps = stamps
|
|
|
|
class StaleSetSceneTasks(Event):
|
|
"""
|
|
An event listing setscene tasks which are 'stale' and will
|
|
be rerun. The metadata may use to clean up stale data.
|
|
tasks is a mapping of tasks and matching stale stamps.
|
|
"""
|
|
|
|
def __init__(self, tasks):
|
|
Event.__init__(self)
|
|
self.tasks = tasks
|
|
|
|
class FilesMatchingFound(Event):
|
|
"""
|
|
Event when a list of files matching the supplied pattern has
|
|
been generated
|
|
"""
|
|
def __init__(self, pattern, matches):
|
|
Event.__init__(self)
|
|
self._pattern = pattern
|
|
self._matches = matches
|
|
|
|
class ConfigFilesFound(Event):
|
|
"""
|
|
Event when a list of appropriate config files has been generated
|
|
"""
|
|
def __init__(self, variable, values):
|
|
Event.__init__(self)
|
|
self._variable = variable
|
|
self._values = values
|
|
|
|
class ConfigFilePathFound(Event):
|
|
"""
|
|
Event when a path for a config file has been found
|
|
"""
|
|
def __init__(self, path):
|
|
Event.__init__(self)
|
|
self._path = path
|
|
|
|
class MsgBase(Event):
|
|
"""Base class for messages"""
|
|
|
|
def __init__(self, msg):
|
|
self._message = msg
|
|
Event.__init__(self)
|
|
|
|
class MsgDebug(MsgBase):
|
|
"""Debug Message"""
|
|
|
|
class MsgNote(MsgBase):
|
|
"""Note Message"""
|
|
|
|
class MsgWarn(MsgBase):
|
|
"""Warning Message"""
|
|
|
|
class MsgError(MsgBase):
|
|
"""Error Message"""
|
|
|
|
class MsgFatal(MsgBase):
|
|
"""Fatal Message"""
|
|
|
|
class MsgPlain(MsgBase):
|
|
"""General output"""
|
|
|
|
class LogExecTTY(Event):
|
|
"""Send event containing program to spawn on tty of the logger"""
|
|
def __init__(self, msg, prog, sleep_delay, retries):
|
|
Event.__init__(self)
|
|
self.msg = msg
|
|
self.prog = prog
|
|
self.sleep_delay = sleep_delay
|
|
self.retries = retries
|
|
|
|
class LogHandler(logging.Handler):
|
|
"""Dispatch logging messages as bitbake events"""
|
|
|
|
def emit(self, record):
|
|
if record.exc_info:
|
|
record.bb_exc_formatted = traceback.format_exception(*record.exc_info)
|
|
record.exc_info = None
|
|
fire(record, None)
|
|
|
|
def filter(self, record):
|
|
record.taskpid = worker_pid
|
|
return True
|
|
|
|
class MetadataEvent(Event):
|
|
"""
|
|
Generic event that target for OE-Core classes
|
|
to report information during asynchronous execution
|
|
"""
|
|
def __init__(self, eventtype, eventdata):
|
|
Event.__init__(self)
|
|
self.type = eventtype
|
|
self._localdata = eventdata
|
|
|
|
class ProcessStarted(Event):
|
|
"""
|
|
Generic process started event (usually part of the initial startup)
|
|
where further progress events will be delivered
|
|
"""
|
|
def __init__(self, processname, total):
|
|
Event.__init__(self)
|
|
self.processname = processname
|
|
self.total = total
|
|
|
|
class ProcessProgress(Event):
|
|
"""
|
|
Generic process progress event (usually part of the initial startup)
|
|
"""
|
|
def __init__(self, processname, progress):
|
|
Event.__init__(self)
|
|
self.processname = processname
|
|
self.progress = progress
|
|
|
|
class ProcessFinished(Event):
|
|
"""
|
|
Generic process finished event (usually part of the initial startup)
|
|
"""
|
|
def __init__(self, processname):
|
|
Event.__init__(self)
|
|
self.processname = processname
|
|
|
|
class SanityCheck(Event):
|
|
"""
|
|
Event to run sanity checks, either raise errors or generate events as return status.
|
|
"""
|
|
def __init__(self, generateevents = True):
|
|
Event.__init__(self)
|
|
self.generateevents = generateevents
|
|
|
|
class SanityCheckPassed(Event):
|
|
"""
|
|
Event to indicate sanity check has passed
|
|
"""
|
|
|
|
class SanityCheckFailed(Event):
|
|
"""
|
|
Event to indicate sanity check has failed
|
|
"""
|
|
def __init__(self, msg, network_error=False):
|
|
Event.__init__(self)
|
|
self._msg = msg
|
|
self._network_error = network_error
|
|
|
|
class NetworkTest(Event):
|
|
"""
|
|
Event to run network connectivity tests, either raise errors or generate events as return status.
|
|
"""
|
|
def __init__(self, generateevents = True):
|
|
Event.__init__(self)
|
|
self.generateevents = generateevents
|
|
|
|
class NetworkTestPassed(Event):
|
|
"""
|
|
Event to indicate network test has passed
|
|
"""
|
|
|
|
class NetworkTestFailed(Event):
|
|
"""
|
|
Event to indicate network test has failed
|
|
"""
|
|
|
|
class FindSigInfoResult(Event):
|
|
"""
|
|
Event to return results from findSigInfo command
|
|
"""
|
|
def __init__(self, result):
|
|
Event.__init__(self)
|
|
self.result = result
|
|
|
|
class GetTaskSignatureResult(Event):
|
|
"""
|
|
Event to return results from GetTaskSignatures command
|
|
"""
|
|
def __init__(self, sig):
|
|
Event.__init__(self)
|
|
self.sig = sig
|
|
|
|
class ParseError(Event):
|
|
"""
|
|
Event to indicate parse failed
|
|
"""
|
|
def __init__(self, msg):
|
|
super().__init__()
|
|
self._msg = msg
|