summaryrefslogtreecommitdiffstats
path: root/poky/bitbake/lib/bb
diff options
context:
space:
mode:
authorDave Cobbley <david.j.cobbley@linux.intel.com>2018-08-14 10:05:37 -0700
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2018-08-22 21:26:31 -0400
commiteb8dc40360f0cfef56fb6947cc817a547d6d9bc6 (patch)
treede291a73dc37168da6370e2cf16c347d1eba9df8 /poky/bitbake/lib/bb
parent9c3cf826d853102535ead04cebc2d6023eff3032 (diff)
downloadtalos-openbmc-eb8dc40360f0cfef56fb6947cc817a547d6d9bc6.tar.gz
talos-openbmc-eb8dc40360f0cfef56fb6947cc817a547d6d9bc6.zip
[Subtree] Removing import-layers directory
As part of the move to subtrees, need to bring all the import layers content to the top level. Change-Id: I4a163d10898cbc6e11c27f776f60e1a470049d8f Signed-off-by: Dave Cobbley <david.j.cobbley@linux.intel.com> Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Diffstat (limited to 'poky/bitbake/lib/bb')
-rw-r--r--poky/bitbake/lib/bb/COW.py319
-rw-r--r--poky/bitbake/lib/bb/__init__.py144
-rw-r--r--poky/bitbake/lib/bb/build.py913
-rw-r--r--poky/bitbake/lib/bb/cache.py891
-rw-r--r--poky/bitbake/lib/bb/cache_extra.py75
-rw-r--r--poky/bitbake/lib/bb/checksum.py134
-rw-r--r--poky/bitbake/lib/bb/codeparser.py476
-rw-r--r--poky/bitbake/lib/bb/command.py765
-rw-r--r--poky/bitbake/lib/bb/compat.py6
-rw-r--r--poky/bitbake/lib/bb/cooker.py2161
-rw-r--r--poky/bitbake/lib/bb/cookerdata.py434
-rw-r--r--poky/bitbake/lib/bb/daemonize.py82
-rw-r--r--poky/bitbake/lib/bb/data.py403
-rw-r--r--poky/bitbake/lib/bb/data_smart.py1037
-rw-r--r--poky/bitbake/lib/bb/event.py831
-rw-r--r--poky/bitbake/lib/bb/exceptions.py91
-rw-r--r--poky/bitbake/lib/bb/fetch2/__init__.py1864
-rw-r--r--poky/bitbake/lib/bb/fetch2/bzr.py139
-rw-r--r--poky/bitbake/lib/bb/fetch2/clearcase.py260
-rw-r--r--poky/bitbake/lib/bb/fetch2/cvs.py172
-rw-r--r--poky/bitbake/lib/bb/fetch2/git.py664
-rw-r--r--poky/bitbake/lib/bb/fetch2/gitannex.py91
-rw-r--r--poky/bitbake/lib/bb/fetch2/gitsm.py135
-rw-r--r--poky/bitbake/lib/bb/fetch2/hg.py270
-rw-r--r--poky/bitbake/lib/bb/fetch2/local.py119
-rw-r--r--poky/bitbake/lib/bb/fetch2/npm.py309
-rw-r--r--poky/bitbake/lib/bb/fetch2/osc.py132
-rw-r--r--poky/bitbake/lib/bb/fetch2/perforce.py209
-rw-r--r--poky/bitbake/lib/bb/fetch2/repo.py97
-rw-r--r--poky/bitbake/lib/bb/fetch2/s3.py98
-rw-r--r--poky/bitbake/lib/bb/fetch2/sftp.py125
-rw-r--r--poky/bitbake/lib/bb/fetch2/ssh.py125
-rw-r--r--poky/bitbake/lib/bb/fetch2/svn.py193
-rw-r--r--poky/bitbake/lib/bb/fetch2/wget.py626
-rwxr-xr-xpoky/bitbake/lib/bb/main.py508
-rw-r--r--poky/bitbake/lib/bb/methodpool.py40
-rw-r--r--poky/bitbake/lib/bb/monitordisk.py268
-rw-r--r--poky/bitbake/lib/bb/msg.py225
-rw-r--r--poky/bitbake/lib/bb/namedtuple_with_abc.py255
-rw-r--r--poky/bitbake/lib/bb/parse/__init__.py175
-rw-r--r--poky/bitbake/lib/bb/parse/ast.py442
-rw-r--r--poky/bitbake/lib/bb/parse/parse_py/BBHandler.py251
-rw-r--r--poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py210
-rw-r--r--poky/bitbake/lib/bb/parse/parse_py/__init__.py33
-rw-r--r--poky/bitbake/lib/bb/persist_data.py214
-rw-r--r--poky/bitbake/lib/bb/process.py179
-rw-r--r--poky/bitbake/lib/bb/progress.py276
-rw-r--r--poky/bitbake/lib/bb/providers.py430
-rw-r--r--poky/bitbake/lib/bb/pysh/__init__.py0
-rw-r--r--poky/bitbake/lib/bb/pysh/builtin.py710
-rw-r--r--poky/bitbake/lib/bb/pysh/interp.py1367
-rw-r--r--poky/bitbake/lib/bb/pysh/lsprof.py116
-rw-r--r--poky/bitbake/lib/bb/pysh/pysh.py167
-rw-r--r--poky/bitbake/lib/bb/pysh/pyshlex.py888
-rw-r--r--poky/bitbake/lib/bb/pysh/pyshyacc.py779
-rw-r--r--poky/bitbake/lib/bb/pysh/sherrors.py41
-rw-r--r--poky/bitbake/lib/bb/pysh/subprocess_fix.py77
-rw-r--r--poky/bitbake/lib/bb/remotedata.py116
-rw-r--r--poky/bitbake/lib/bb/runqueue.py2682
-rw-r--r--poky/bitbake/lib/bb/server/__init__.py21
-rw-r--r--poky/bitbake/lib/bb/server/process.py624
-rw-r--r--poky/bitbake/lib/bb/server/xmlrpcclient.py154
-rw-r--r--poky/bitbake/lib/bb/server/xmlrpcserver.py158
-rw-r--r--poky/bitbake/lib/bb/siggen.py729
-rw-r--r--poky/bitbake/lib/bb/taskdata.py578
-rw-r--r--poky/bitbake/lib/bb/tests/__init__.py0
-rw-r--r--poky/bitbake/lib/bb/tests/codeparser.py428
-rw-r--r--poky/bitbake/lib/bb/tests/cow.py136
-rw-r--r--poky/bitbake/lib/bb/tests/data.py607
-rw-r--r--poky/bitbake/lib/bb/tests/event.py986
-rw-r--r--poky/bitbake/lib/bb/tests/fetch.py1573
-rw-r--r--poky/bitbake/lib/bb/tests/parse.py185
-rw-r--r--poky/bitbake/lib/bb/tests/utils.py603
-rw-r--r--poky/bitbake/lib/bb/tinfoil.py900
-rw-r--r--poky/bitbake/lib/bb/ui/__init__.py17
-rw-r--r--poky/bitbake/lib/bb/ui/buildinfohelper.py2002
-rw-r--r--poky/bitbake/lib/bb/ui/icons/images/images_display.pngbin0 -> 6898 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/images/images_hover.pngbin0 -> 7051 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/add-hover.pngbin0 -> 1212 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/add.pngbin0 -> 1176 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/alert.pngbin0 -> 3954 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/confirmation.pngbin0 -> 5789 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/denied.pngbin0 -> 3955 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/error.pngbin0 -> 6482 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/info.pngbin0 -> 3311 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/issues.pngbin0 -> 4549 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/refresh.pngbin0 -> 5250 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/remove-hover.pngbin0 -> 2809 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/remove.pngbin0 -> 1971 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/indicators/tick.pngbin0 -> 4563 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/info/info_display.pngbin0 -> 4117 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/info/info_hover.pngbin0 -> 4167 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/layers/layers_display.pngbin0 -> 4840 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/layers/layers_hover.pngbin0 -> 5257 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/packages/packages_display.pngbin0 -> 7011 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/packages/packages_hover.pngbin0 -> 7121 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/recipe/recipe_display.pngbin0 -> 4723 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/recipe/recipe_hover.pngbin0 -> 4866 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/settings/settings_display.pngbin0 -> 6076 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/settings/settings_hover.pngbin0 -> 6269 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/templates/templates_display.pngbin0 -> 5651 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/icons/templates/templates_hover.pngbin0 -> 5791 bytes
-rw-r--r--poky/bitbake/lib/bb/ui/knotty.py728
-rw-r--r--poky/bitbake/lib/bb/ui/ncurses.py373
-rw-r--r--poky/bitbake/lib/bb/ui/taskexp.py328
-rw-r--r--poky/bitbake/lib/bb/ui/toasterui.py487
-rw-r--r--poky/bitbake/lib/bb/ui/uievent.py161
-rw-r--r--poky/bitbake/lib/bb/ui/uihelper.py70
-rw-r--r--poky/bitbake/lib/bb/utils.py1539
109 files changed, 38226 insertions, 0 deletions
diff --git a/poky/bitbake/lib/bb/COW.py b/poky/bitbake/lib/bb/COW.py
new file mode 100644
index 000000000..bec620809
--- /dev/null
+++ b/poky/bitbake/lib/bb/COW.py
@@ -0,0 +1,319 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
+#
+# Copyright (C) 2006 Tim Ansell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#Please Note:
+# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
+# Assign a file to __warn__ to get warnings about slow operations.
+#
+
+
+import copy
+import types
+ImmutableTypes = (
+ bool,
+ complex,
+ float,
+ int,
+ tuple,
+ frozenset,
+ str
+)
+
+MUTABLE = "__mutable__"
+
+class COWMeta(type):
+ pass
+
+class COWDictMeta(COWMeta):
+ __warn__ = False
+ __hasmutable__ = False
+ __marker__ = tuple()
+
+ def __str__(cls):
+ # FIXME: I have magic numbers!
+ return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
+ __repr__ = __str__
+
+ def cow(cls):
+ class C(cls):
+ __count__ = cls.__count__ + 1
+ return C
+ copy = cow
+ __call__ = cow
+
+ def __setitem__(cls, key, value):
+ if value is not None and not isinstance(value, ImmutableTypes):
+ if not isinstance(value, COWMeta):
+ cls.__hasmutable__ = True
+ key += MUTABLE
+ setattr(cls, key, value)
+
+ def __getmutable__(cls, key, readonly=False):
+ nkey = key + MUTABLE
+ try:
+ return cls.__dict__[nkey]
+ except KeyError:
+ pass
+
+ value = getattr(cls, nkey)
+ if readonly:
+ return value
+
+ if not cls.__warn__ is False and not isinstance(value, COWMeta):
+ print("Warning: Doing a copy because %s is a mutable type." % key, file=cls.__warn__)
+ try:
+ value = value.copy()
+ except AttributeError as e:
+ value = copy.copy(value)
+ setattr(cls, nkey, value)
+ return value
+
+ __getmarker__ = []
+ def __getreadonly__(cls, key, default=__getmarker__):
+ """\
+ Get a value (even if mutable) which you promise not to change.
+ """
+ return cls.__getitem__(key, default, True)
+
+ def __getitem__(cls, key, default=__getmarker__, readonly=False):
+ try:
+ try:
+ value = getattr(cls, key)
+ except AttributeError:
+ value = cls.__getmutable__(key, readonly)
+
+ # This is for values which have been deleted
+ if value is cls.__marker__:
+ raise AttributeError("key %s does not exist." % key)
+
+ return value
+ except AttributeError as e:
+ if not default is cls.__getmarker__:
+ return default
+
+ raise KeyError(str(e))
+
+ def __delitem__(cls, key):
+ cls.__setitem__(key, cls.__marker__)
+
+ def __revertitem__(cls, key):
+ if key not in cls.__dict__:
+ key += MUTABLE
+ delattr(cls, key)
+
+ def __contains__(cls, key):
+ return cls.has_key(key)
+
+ def has_key(cls, key):
+ value = cls.__getreadonly__(key, cls.__marker__)
+ if value is cls.__marker__:
+ return False
+ return True
+
+ def iter(cls, type, readonly=False):
+ for key in dir(cls):
+ if key.startswith("__"):
+ continue
+
+ if key.endswith(MUTABLE):
+ key = key[:-len(MUTABLE)]
+
+ if type == "keys":
+ yield key
+
+ try:
+ if readonly:
+ value = cls.__getreadonly__(key)
+ else:
+ value = cls[key]
+ except KeyError:
+ continue
+
+ if type == "values":
+ yield value
+ if type == "items":
+ yield (key, value)
+ raise StopIteration()
+
+ def iterkeys(cls):
+ return cls.iter("keys")
+ def itervalues(cls, readonly=False):
+ if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
+ print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__)
+ return cls.iter("values", readonly)
+ def iteritems(cls, readonly=False):
+ if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
+ print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__)
+ return cls.iter("items", readonly)
+
+class COWSetMeta(COWDictMeta):
+ def __str__(cls):
+ # FIXME: I have magic numbers!
+ return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) -3)
+ __repr__ = __str__
+
+ def cow(cls):
+ class C(cls):
+ __count__ = cls.__count__ + 1
+ return C
+
+ def add(cls, value):
+ COWDictMeta.__setitem__(cls, repr(hash(value)), value)
+
+ def remove(cls, value):
+ COWDictMeta.__delitem__(cls, repr(hash(value)))
+
+ def __in__(cls, value):
+ return repr(hash(value)) in COWDictMeta
+
+ def iterkeys(cls):
+ raise TypeError("sets don't have keys")
+
+ def iteritems(cls):
+ raise TypeError("sets don't have 'items'")
+
+# These are the actual classes you use!
+class COWDictBase(object, metaclass = COWDictMeta):
+ __count__ = 0
+
+class COWSetBase(object, metaclass = COWSetMeta):
+ __count__ = 0
+
+if __name__ == "__main__":
+ import sys
+ COWDictBase.__warn__ = sys.stderr
+ a = COWDictBase()
+ print("a", a)
+
+ a['a'] = 'a'
+ a['b'] = 'b'
+ a['dict'] = {}
+
+ b = a.copy()
+ print("b", b)
+ b['c'] = 'b'
+
+ print()
+
+ print("a", a)
+ for x in a.iteritems():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b.iteritems():
+ print(x)
+ print()
+
+ b['dict']['a'] = 'b'
+ b['a'] = 'c'
+
+ print("a", a)
+ for x in a.iteritems():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b.iteritems():
+ print(x)
+ print()
+
+ try:
+ b['dict2']
+ except KeyError as e:
+ print("Okay!")
+
+ a['set'] = COWSetBase()
+ a['set'].add("o1")
+ a['set'].add("o1")
+ a['set'].add("o2")
+
+ print("a", a)
+ for x in a['set'].itervalues():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b['set'].itervalues():
+ print(x)
+ print()
+
+ b['set'].add('o3')
+
+ print("a", a)
+ for x in a['set'].itervalues():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b['set'].itervalues():
+ print(x)
+ print()
+
+ a['set2'] = set()
+ a['set2'].add("o1")
+ a['set2'].add("o1")
+ a['set2'].add("o2")
+
+ print("a", a)
+ for x in a.iteritems():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b.iteritems(readonly=True):
+ print(x)
+ print()
+
+ del b['b']
+ try:
+ print(b['b'])
+ except KeyError:
+ print("Yay! deleted key raises error")
+
+ if 'b' in b:
+ print("Boo!")
+ else:
+ print("Yay - has_key with delete works!")
+
+ print("a", a)
+ for x in a.iteritems():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b.iteritems(readonly=True):
+ print(x)
+ print()
+
+ b.__revertitem__('b')
+
+ print("a", a)
+ for x in a.iteritems():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b.iteritems(readonly=True):
+ print(x)
+ print()
+
+ b.__revertitem__('dict')
+ print("a", a)
+ for x in a.iteritems():
+ print(x)
+ print("--")
+ print("b", b)
+ for x in b.iteritems(readonly=True):
+ print(x)
+ print()
diff --git a/poky/bitbake/lib/bb/__init__.py b/poky/bitbake/lib/bb/__init__.py
new file mode 100644
index 000000000..d24adb8ea
--- /dev/null
+++ b/poky/bitbake/lib/bb/__init__.py
@@ -0,0 +1,144 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Build System Python Library
+#
+# Copyright (C) 2003 Holger Schurig
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# Based on Gentoo's portage.py.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+__version__ = "1.38.0"
+
+import sys
+if sys.version_info < (3, 4, 0):
+ raise RuntimeError("Sorry, python 3.4.0 or later is required for this version of bitbake")
+
+
+class BBHandledException(Exception):
+ """
+ The big dilemma for generic bitbake code is what information to give the user
+ when an exception occurs. Any exception inheriting this base exception class
+ has already provided information to the user via some 'fired' message type such as
+ an explicitly fired event using bb.fire, or a bb.error message. If bitbake
+ encounters an exception derived from this class, no backtrace or other information
+ will be given to the user, its assumed the earlier event provided the relevant information.
+ """
+ pass
+
+import os
+import logging
+
+
+class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+Logger = logging.getLoggerClass()
+class BBLogger(Logger):
+ def __init__(self, name):
+ if name.split(".")[0] == "BitBake":
+ self.debug = self.bbdebug
+ Logger.__init__(self, name)
+
+ def bbdebug(self, level, msg, *args, **kwargs):
+ return self.log(logging.DEBUG - level + 1, msg, *args, **kwargs)
+
+ def plain(self, msg, *args, **kwargs):
+ return self.log(logging.INFO + 1, msg, *args, **kwargs)
+
+ def verbose(self, msg, *args, **kwargs):
+ return self.log(logging.INFO - 1, msg, *args, **kwargs)
+
+logging.raiseExceptions = False
+logging.setLoggerClass(BBLogger)
+
+logger = logging.getLogger("BitBake")
+logger.addHandler(NullHandler())
+logger.setLevel(logging.DEBUG - 2)
+
+mainlogger = logging.getLogger("BitBake.Main")
+
+# This has to be imported after the setLoggerClass, as the import of bb.msg
+# can result in construction of the various loggers.
+import bb.msg
+
+from bb import fetch2 as fetch
+sys.modules['bb.fetch'] = sys.modules['bb.fetch2']
+
+# Messaging convenience functions
+def plain(*args):
+ mainlogger.plain(''.join(args))
+
+def debug(lvl, *args):
+ if isinstance(lvl, str):
+ mainlogger.warning("Passed invalid debug level '%s' to bb.debug", lvl)
+ args = (lvl,) + args
+ lvl = 1
+ mainlogger.debug(lvl, ''.join(args))
+
+def note(*args):
+ mainlogger.info(''.join(args))
+
+def warn(*args):
+ mainlogger.warning(''.join(args))
+
+def error(*args, **kwargs):
+ mainlogger.error(''.join(args), extra=kwargs)
+
+def fatal(*args, **kwargs):
+ mainlogger.critical(''.join(args), extra=kwargs)
+ raise BBHandledException()
+
+def deprecated(func, name=None, advice=""):
+ """This is a decorator which can be used to mark functions
+ as deprecated. It will result in a warning being emitted
+ when the function is used."""
+ import warnings
+
+ if advice:
+ advice = ": %s" % advice
+ if name is None:
+ name = func.__name__
+
+ def newFunc(*args, **kwargs):
+ warnings.warn("Call to deprecated function %s%s." % (name,
+ advice),
+ category=DeprecationWarning,
+ stacklevel=2)
+ return func(*args, **kwargs)
+ newFunc.__name__ = func.__name__
+ newFunc.__doc__ = func.__doc__
+ newFunc.__dict__.update(func.__dict__)
+ return newFunc
+
+# For compatibility
+def deprecate_import(current, modulename, fromlist, renames = None):
+ """Import objects from one module into another, wrapping them with a DeprecationWarning"""
+ import sys
+
+ module = __import__(modulename, fromlist = fromlist)
+ for position, objname in enumerate(fromlist):
+ obj = getattr(module, objname)
+ newobj = deprecated(obj, "{0}.{1}".format(current, objname),
+ "Please use {0}.{1} instead".format(modulename, objname))
+ if renames:
+ newname = renames[position]
+ else:
+ newname = objname
+
+ setattr(sys.modules[current], newname, newobj)
+
diff --git a/poky/bitbake/lib/bb/build.py b/poky/bitbake/lib/bb/build.py
new file mode 100644
index 000000000..4631abdde
--- /dev/null
+++ b/poky/bitbake/lib/bb/build.py
@@ -0,0 +1,913 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake 'Build' implementation
+#
+# Core code for function execution and task handling in the
+# BitBake build tools.
+#
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# Based on Gentoo's portage.py.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import sys
+import logging
+import shlex
+import glob
+import time
+import stat
+import bb
+import bb.msg
+import bb.process
+import bb.progress
+from bb import data, event, utils
+
+bblogger = logging.getLogger('BitBake')
+logger = logging.getLogger('BitBake.Build')
+
+NULL = open(os.devnull, 'r+')
+
+__mtime_cache = {}
+
+def cached_mtime_noerror(f):
+ if f not in __mtime_cache:
+ try:
+ __mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
+ except OSError:
+ return 0
+ return __mtime_cache[f]
+
+def reset_cache():
+ global __mtime_cache
+ __mtime_cache = {}
+
+# When we execute a Python function, we'd like certain things
+# in all namespaces, hence we add them to __builtins__.
+# If we do not do this and use the exec globals, they will
+# not be available to subfunctions.
+if hasattr(__builtins__, '__setitem__'):
+ builtins = __builtins__
+else:
+ builtins = __builtins__.__dict__
+
+builtins['bb'] = bb
+builtins['os'] = os
+
+class FuncFailed(Exception):
+ def __init__(self, name = None, logfile = None):
+ self.logfile = logfile
+ self.name = name
+ if name:
+ self.msg = 'Function failed: %s' % name
+ else:
+ self.msg = "Function failed"
+
+ def __str__(self):
+ if self.logfile and os.path.exists(self.logfile):
+ msg = ("%s (log file is located at %s)" %
+ (self.msg, self.logfile))
+ else:
+ msg = self.msg
+ return msg
+
+class TaskBase(event.Event):
+ """Base class for task events"""
+
+ def __init__(self, t, logfile, d):
+ self._task = t
+ self._package = d.getVar("PF")
+ self._mc = d.getVar("BB_CURRENT_MC")
+ self.taskfile = d.getVar("FILE")
+ self.taskname = self._task
+ self.logfile = logfile
+ self.time = time.time()
+ event.Event.__init__(self)
+ self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName())
+
+ def getTask(self):
+ return self._task
+
+ def setTask(self, task):
+ self._task = task
+
+ def getDisplayName(self):
+ return bb.event.getName(self)[4:]
+
+ task = property(getTask, setTask, None, "task property")
+
+class TaskStarted(TaskBase):
+ """Task execution started"""
+ def __init__(self, t, logfile, taskflags, d):
+ super(TaskStarted, self).__init__(t, logfile, d)
+ self.taskflags = taskflags
+
+class TaskSucceeded(TaskBase):
+ """Task execution completed"""
+
+class TaskFailed(TaskBase):
+ """Task execution failed"""
+
+ def __init__(self, task, logfile, metadata, errprinted = False):
+ self.errprinted = errprinted
+ super(TaskFailed, self).__init__(task, logfile, metadata)
+
+class TaskFailedSilent(TaskBase):
+ """Task execution failed (silently)"""
+ def getDisplayName(self):
+ # Don't need to tell the user it was silent
+ return "Failed"
+
+class TaskInvalid(TaskBase):
+
+ def __init__(self, task, metadata):
+ super(TaskInvalid, self).__init__(task, None, metadata)
+ self._message = "No such task '%s'" % task
+
+class TaskProgress(event.Event):
+ """
+ Task made some progress that could be reported to the user, usually in
+ the form of a progress bar or similar.
+ NOTE: this class does not inherit from TaskBase since it doesn't need
+ to - it's fired within the task context itself, so we don't have any of
+ the context information that you do in the case of the other events.
+ The event PID can be used to determine which task it came from.
+ The progress value is normally 0-100, but can also be negative
+ indicating that progress has been made but we aren't able to determine
+ how much.
+ The rate is optional, this is simply an extra string to display to the
+ user if specified.
+ """
+ def __init__(self, progress, rate=None):
+ self.progress = progress
+ self.rate = rate
+ event.Event.__init__(self)
+
+
+class LogTee(object):
+ def __init__(self, logger, outfile):
+ self.outfile = outfile
+ self.logger = logger
+ self.name = self.outfile.name
+
+ def write(self, string):
+ self.logger.plain(string)
+ self.outfile.write(string)
+
+ def __enter__(self):
+ self.outfile.__enter__()
+ return self
+
+ def __exit__(self, *excinfo):
+ self.outfile.__exit__(*excinfo)
+
+ def __repr__(self):
+ return '<LogTee {0}>'.format(self.name)
+ def flush(self):
+ self.outfile.flush()
+
+#
+# pythonexception allows the python exceptions generated to be raised
+# as the real exceptions (not FuncFailed) and without a backtrace at the
+# origin of the failure.
+#
+def exec_func(func, d, dirs = None, pythonexception=False):
+ """Execute a BB 'function'"""
+
+ try:
+ oldcwd = os.getcwd()
+ except:
+ oldcwd = None
+
+ flags = d.getVarFlags(func)
+ cleandirs = flags.get('cleandirs') if flags else None
+ if cleandirs:
+ for cdir in d.expand(cleandirs).split():
+ bb.utils.remove(cdir, True)
+ bb.utils.mkdirhier(cdir)
+
+ if flags and dirs is None:
+ dirs = flags.get('dirs')
+ if dirs:
+ dirs = d.expand(dirs).split()
+
+ if dirs:
+ for adir in dirs:
+ bb.utils.mkdirhier(adir)
+ adir = dirs[-1]
+ else:
+ adir = None
+
+ body = d.getVar(func, False)
+ if not body:
+ if body is None:
+ logger.warning("Function %s doesn't exist", func)
+ return
+
+ ispython = flags.get('python')
+
+ lockflag = flags.get('lockfiles')
+ if lockflag:
+ lockfiles = [f for f in d.expand(lockflag).split()]
+ else:
+ lockfiles = None
+
+ tempdir = d.getVar('T')
+
+ # or func allows items to be executed outside of the normal
+ # task set, such as buildhistory
+ task = d.getVar('BB_RUNTASK') or func
+ if task == func:
+ taskfunc = task
+ else:
+ taskfunc = "%s.%s" % (task, func)
+
+ runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
+ runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
+ runfile = os.path.join(tempdir, runfn)
+ bb.utils.mkdirhier(os.path.dirname(runfile))
+
+ # Setup the courtesy link to the runfn, only for tasks
+ # we create the link 'just' before the run script is created
+ # if we create it after, and if the run script fails, then the
+ # link won't be created as an exception would be fired.
+ if task == func:
+ runlink = os.path.join(tempdir, 'run.{0}'.format(task))
+ if runlink:
+ bb.utils.remove(runlink)
+
+ try:
+ os.symlink(runfn, runlink)
+ except OSError:
+ pass
+
+ with bb.utils.fileslocked(lockfiles):
+ if ispython:
+ exec_func_python(func, d, runfile, cwd=adir, pythonexception=pythonexception)
+ else:
+ exec_func_shell(func, d, runfile, cwd=adir)
+
+ try:
+ curcwd = os.getcwd()
+ except:
+ curcwd = None
+
+ if oldcwd and curcwd != oldcwd:
+ try:
+ bb.warn("Task %s changed cwd to %s" % (func, curcwd))
+ os.chdir(oldcwd)
+ except:
+ pass
+
+_functionfmt = """
+{function}(d)
+"""
+logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
+def exec_func_python(func, d, runfile, cwd=None, pythonexception=False):
+ """Execute a python BB 'function'"""
+
+ code = _functionfmt.format(function=func)
+ bb.utils.mkdirhier(os.path.dirname(runfile))
+ with open(runfile, 'w') as script:
+ bb.data.emit_func_python(func, script, d)
+
+ if cwd:
+ try:
+ olddir = os.getcwd()
+ except OSError as e:
+ bb.warn("%s: Cannot get cwd: %s" % (func, e))
+ olddir = None
+ os.chdir(cwd)
+
+ bb.debug(2, "Executing python function %s" % func)
+
+ try:
+ text = "def %s(d):\n%s" % (func, d.getVar(func, False))
+ fn = d.getVarFlag(func, "filename", False)
+ lineno = int(d.getVarFlag(func, "lineno", False))
+ bb.methodpool.insert_method(func, text, fn, lineno - 1)
+
+ comp = utils.better_compile(code, func, "exec_python_func() autogenerated")
+ utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated", pythonexception=pythonexception)
+ except (bb.parse.SkipRecipe, bb.build.FuncFailed):
+ raise
+ except:
+ if pythonexception:
+ raise
+ raise FuncFailed(func, None)
+ finally:
+ bb.debug(2, "Python function %s finished" % func)
+
+ if cwd and olddir:
+ try:
+ os.chdir(olddir)
+ except OSError as e:
+ bb.warn("%s: Cannot restore cwd %s: %s" % (func, olddir, e))
+
+def shell_trap_code():
+ return '''#!/bin/sh\n
+# Emit a useful diagnostic if something fails:
+bb_exit_handler() {
+ ret=$?
+ case $ret in
+ 0) ;;
+ *) case $BASH_VERSION in
+ "") echo "WARNING: exit code $ret from a shell command.";;
+ *) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from '$BASH_COMMAND'";;
+ esac
+ exit $ret
+ esac
+}
+trap 'bb_exit_handler' 0
+set -e
+'''
+
+def exec_func_shell(func, d, runfile, cwd=None):
+ """Execute a shell function from the metadata
+
+ Note on directory behavior. The 'dirs' varflag should contain a list
+ of the directories you need created prior to execution. The last
+ item in the list is where we will chdir/cd to.
+ """
+
+ # Don't let the emitted shell script override PWD
+ d.delVarFlag('PWD', 'export')
+
+ with open(runfile, 'w') as script:
+ script.write(shell_trap_code())
+
+ bb.data.emit_func(func, script, d)
+
+ if bb.msg.loggerVerboseLogs:
+ script.write("set -x\n")
+ if cwd:
+ script.write("cd '%s'\n" % cwd)
+ script.write("%s\n" % func)
+ script.write('''
+# cleanup
+ret=$?
+trap '' 0
+exit $ret
+''')
+
+ os.chmod(runfile, 0o775)
+
+ cmd = runfile
+ if d.getVarFlag(func, 'fakeroot', False):
+ fakerootcmd = d.getVar('FAKEROOT')
+ if fakerootcmd:
+ cmd = [fakerootcmd, runfile]
+
+ if bb.msg.loggerDefaultVerbose:
+ logfile = LogTee(logger, sys.stdout)
+ else:
+ logfile = sys.stdout
+
+ progress = d.getVarFlag(func, 'progress')
+ if progress:
+ if progress == 'percent':
+ # Use default regex
+ logfile = bb.progress.BasicProgressHandler(d, outfile=logfile)
+ elif progress.startswith('percent:'):
+ # Use specified regex
+ logfile = bb.progress.BasicProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile)
+ elif progress.startswith('outof:'):
+ # Use specified regex
+ logfile = bb.progress.OutOfProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile)
+ else:
+ bb.warn('%s: invalid task progress varflag value "%s", ignoring' % (func, progress))
+
+ fifobuffer = bytearray()
+ def readfifo(data):
+ nonlocal fifobuffer
+ fifobuffer.extend(data)
+ while fifobuffer:
+ message, token, nextmsg = fifobuffer.partition(b"\00")
+ if token:
+ splitval = message.split(b' ', 1)
+ cmd = splitval[0].decode("utf-8")
+ if len(splitval) > 1:
+ value = splitval[1].decode("utf-8")
+ else:
+ value = ''
+ if cmd == 'bbplain':
+ bb.plain(value)
+ elif cmd == 'bbnote':
+ bb.note(value)
+ elif cmd == 'bbwarn':
+ bb.warn(value)
+ elif cmd == 'bberror':
+ bb.error(value)
+ elif cmd == 'bbfatal':
+ # The caller will call exit themselves, so bb.error() is
+ # what we want here rather than bb.fatal()
+ bb.error(value)
+ elif cmd == 'bbfatal_log':
+ bb.error(value, forcelog=True)
+ elif cmd == 'bbdebug':
+ splitval = value.split(' ', 1)
+ level = int(splitval[0])
+ value = splitval[1]
+ bb.debug(level, value)
+ else:
+ bb.warn("Unrecognised command '%s' on FIFO" % cmd)
+ fifobuffer = nextmsg
+ else:
+ break
+
+ tempdir = d.getVar('T')
+ fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid())
+ if os.path.exists(fifopath):
+ os.unlink(fifopath)
+ os.mkfifo(fifopath)
+ with open(fifopath, 'r+b', buffering=0) as fifo:
+ try:
+ bb.debug(2, "Executing shell function %s" % func)
+
+ try:
+ with open(os.devnull, 'r+') as stdin:
+ bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
+ except bb.process.CmdError:
+ logfn = d.getVar('BB_LOGFILE')
+ raise FuncFailed(func, logfn)
+ finally:
+ os.unlink(fifopath)
+
+ bb.debug(2, "Shell function %s finished" % func)
+
+def _task_data(fn, task, d):
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('BB_FILENAME', fn)
+ localdata.setVar('BB_CURRENTTASK', task[3:])
+ localdata.setVar('OVERRIDES', 'task-%s:%s' %
+ (task[3:].replace('_', '-'), d.getVar('OVERRIDES', False)))
+ localdata.finalize()
+ bb.data.expandKeys(localdata)
+ return localdata
+
+def _exec_task(fn, task, d, quieterr):
+ """Execute a BB 'task'
+
+ Execution of a task involves a bit more setup than executing a function,
+ running it with its own local metadata, and with some useful variables set.
+ """
+ if not d.getVarFlag(task, 'task', False):
+ event.fire(TaskInvalid(task, d), d)
+ logger.error("No such task: %s" % task)
+ return 1
+
+ logger.debug(1, "Executing task %s", task)
+
+ localdata = _task_data(fn, task, d)
+ tempdir = localdata.getVar('T')
+ if not tempdir:
+ bb.fatal("T variable not set, unable to build")
+
+ # Change nice level if we're asked to
+ nice = localdata.getVar("BB_TASK_NICE_LEVEL")
+ if nice:
+ curnice = os.nice(0)
+ nice = int(nice) - curnice
+ newnice = os.nice(nice)
+ logger.debug(1, "Renice to %s " % newnice)
+ ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
+ if ionice:
+ try:
+ cls, prio = ionice.split(".", 1)
+ bb.utils.ioprio_set(os.getpid(), int(cls), int(prio))
+ except:
+ bb.warn("Invalid ionice level %s" % ionice)
+
+ bb.utils.mkdirhier(tempdir)
+
+ # Determine the logfile to generate
+ logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
+ logbase = logfmt.format(task=task, pid=os.getpid())
+
+ # Document the order of the tasks...
+ logorder = os.path.join(tempdir, 'log.task_order')
+ try:
+ with open(logorder, 'a') as logorderfile:
+ logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
+ except OSError:
+ logger.exception("Opening log file '%s'", logorder)
+ pass
+
+ # Setup the courtesy link to the logfn
+ loglink = os.path.join(tempdir, 'log.{0}'.format(task))
+ logfn = os.path.join(tempdir, logbase)
+ if loglink:
+ bb.utils.remove(loglink)
+
+ try:
+ os.symlink(logbase, loglink)
+ except OSError:
+ pass
+
+ prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
+ postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)
+
+ class ErrorCheckHandler(logging.Handler):
+ def __init__(self):
+ self.triggered = False
+ logging.Handler.__init__(self, logging.ERROR)
+ def emit(self, record):
+ if getattr(record, 'forcelog', False):
+ self.triggered = False
+ else:
+ self.triggered = True
+
+ # Handle logfiles
+ si = open('/dev/null', 'r')
+ try:
+ bb.utils.mkdirhier(os.path.dirname(logfn))
+ logfile = open(logfn, 'w')
+ except OSError:
+ logger.exception("Opening log file '%s'", logfn)
+ pass
+
+ # Dup the existing fds so we dont lose them
+ osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
+ oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
+ ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
+
+ # Replace those fds with our own
+ os.dup2(si.fileno(), osi[1])
+ os.dup2(logfile.fileno(), oso[1])
+ os.dup2(logfile.fileno(), ose[1])
+
+ # Ensure Python logging goes to the logfile
+ handler = logging.StreamHandler(logfile)
+ handler.setFormatter(logformatter)
+ # Always enable full debug output into task logfiles
+ handler.setLevel(logging.DEBUG - 2)
+ bblogger.addHandler(handler)
+
+ errchk = ErrorCheckHandler()
+ bblogger.addHandler(errchk)
+
+ localdata.setVar('BB_LOGFILE', logfn)
+ localdata.setVar('BB_RUNTASK', task)
+ localdata.setVar('BB_TASK_LOGGER', bblogger)
+
+ flags = localdata.getVarFlags(task)
+
+ try:
+ try:
+ event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
+ except (bb.BBHandledException, SystemExit):
+ return 1
+ except FuncFailed as exc:
+ logger.error(str(exc))
+ return 1
+
+ try:
+ for func in (prefuncs or '').split():
+ exec_func(func, localdata)
+ exec_func(task, localdata)
+ for func in (postfuncs or '').split():
+ exec_func(func, localdata)
+ except FuncFailed as exc:
+ if quieterr:
+ event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
+ else:
+ errprinted = errchk.triggered
+ logger.error(str(exc))
+ event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
+ return 1
+ except bb.BBHandledException:
+ event.fire(TaskFailed(task, logfn, localdata, True), localdata)
+ return 1
+ finally:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ bblogger.removeHandler(handler)
+
+ # Restore the backup fds
+ os.dup2(osi[0], osi[1])
+ os.dup2(oso[0], oso[1])
+ os.dup2(ose[0], ose[1])
+
+ # Close the backup fds
+ os.close(osi[0])
+ os.close(oso[0])
+ os.close(ose[0])
+ si.close()
+
+ logfile.close()
+ if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
+ logger.debug(2, "Zero size logfn %s, removing", logfn)
+ bb.utils.remove(logfn)
+ bb.utils.remove(loglink)
+ event.fire(TaskSucceeded(task, logfn, localdata), localdata)
+
+ if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False):
+ make_stamp(task, localdata)
+
+ return 0
+
+def exec_task(fn, task, d, profile = False):
+ try:
+ quieterr = False
+ if d.getVarFlag(task, "quieterrors", False) is not None:
+ quieterr = True
+
+ if profile:
+ profname = "profile-%s.log" % (d.getVar("PN") + "-" + task)
+ try:
+ import cProfile as profile
+ except:
+ import profile
+ prof = profile.Profile()
+ ret = profile.Profile.runcall(prof, _exec_task, fn, task, d, quieterr)
+ prof.dump_stats(profname)
+ bb.utils.process_profilelog(profname)
+
+ return ret
+ else:
+ return _exec_task(fn, task, d, quieterr)
+
+ except Exception:
+ from traceback import format_exc
+ if not quieterr:
+ logger.error("Build of %s failed" % (task))
+ logger.error(format_exc())
+ failedevent = TaskFailed(task, None, d, True)
+ event.fire(failedevent, d)
+ return 1
+
+def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
+ """
+ Internal stamp helper function
+ Makes sure the stamp directory exists
+ Returns the stamp path+filename
+
+ In the bitbake core, d can be a CacheData and file_name will be set.
+ When called in task context, d will be a data store, file_name will not be set
+ """
+ taskflagname = taskname
+ if taskname.endswith("_setscene") and taskname != "do_setscene":
+ taskflagname = taskname.replace("_setscene", "")
+
+ if file_name:
+ stamp = d.stamp[file_name]
+ extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
+ else:
+ stamp = d.getVar('STAMP')
+ file_name = d.getVar('BB_FILENAME')
+ extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
+
+ if baseonly:
+ return stamp
+ if noextra:
+ extrainfo = ""
+
+ if not stamp:
+ return
+
+ stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
+
+ stampdir = os.path.dirname(stamp)
+ if cached_mtime_noerror(stampdir) == 0:
+ bb.utils.mkdirhier(stampdir)
+
+ return stamp
+
+def stamp_cleanmask_internal(taskname, d, file_name):
+ """
+ Internal stamp helper function to generate stamp cleaning mask
+ Returns the stamp path+filename
+
+ In the bitbake core, d can be a CacheData and file_name will be set.
+ When called in task context, d will be a data store, file_name will not be set
+ """
+ taskflagname = taskname
+ if taskname.endswith("_setscene") and taskname != "do_setscene":
+ taskflagname = taskname.replace("_setscene", "")
+
+ if file_name:
+ stamp = d.stampclean[file_name]
+ extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
+ else:
+ stamp = d.getVar('STAMPCLEAN')
+ file_name = d.getVar('BB_FILENAME')
+ extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
+
+ if not stamp:
+ return []
+
+ cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo)
+
+ return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
+
+def make_stamp(task, d, file_name = None):
+ """
+ Creates/updates a stamp for a given task
+ (d can be a data dict or dataCache)
+ """
+ cleanmask = stamp_cleanmask_internal(task, d, file_name)
+ for mask in cleanmask:
+ for name in glob.glob(mask):
+ # Preserve sigdata files in the stamps directory
+ if "sigdata" in name or "sigbasedata" in name:
+ continue
+ # Preserve taint files in the stamps directory
+ if name.endswith('.taint'):
+ continue
+ os.unlink(name)
+
+ stamp = stamp_internal(task, d, file_name)
+ # Remove the file and recreate to force timestamp
+ # change on broken NFS filesystems
+ if stamp:
+ bb.utils.remove(stamp)
+ open(stamp, "w").close()
+
+ # If we're in task context, write out a signature file for each task
+ # as it completes
+ if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
+ stampbase = stamp_internal(task, d, None, True)
+ file_name = d.getVar('BB_FILENAME')
+ bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
+
+def del_stamp(task, d, file_name = None):
+ """
+ Removes a stamp for a given task
+ (d can be a data dict or dataCache)
+ """
+ stamp = stamp_internal(task, d, file_name)
+ bb.utils.remove(stamp)
+
+def write_taint(task, d, file_name = None):
+ """
+ Creates a "taint" file which will force the specified task and its
+ dependents to be re-run the next time by influencing the value of its
+ taskhash.
+ (d can be a data dict or dataCache)
+ """
+ import uuid
+ if file_name:
+ taintfn = d.stamp[file_name] + '.' + task + '.taint'
+ else:
+ taintfn = d.getVar('STAMP') + '.' + task + '.taint'
+ bb.utils.mkdirhier(os.path.dirname(taintfn))
+ # The specific content of the taint file is not really important,
+ # we just need it to be random, so a random UUID is used
+ with open(taintfn, 'w') as taintf:
+ taintf.write(str(uuid.uuid4()))
+
+def stampfile(taskname, d, file_name = None, noextra=False):
+ """
+ Return the stamp for a given task
+ (d can be a data dict or dataCache)
+ """
+ return stamp_internal(taskname, d, file_name, noextra=noextra)
+
+def add_tasks(tasklist, d):
+ task_deps = d.getVar('_task_deps', False)
+ if not task_deps:
+ task_deps = {}
+ if not 'tasks' in task_deps:
+ task_deps['tasks'] = []
+ if not 'parents' in task_deps:
+ task_deps['parents'] = {}
+
+ for task in tasklist:
+ task = d.expand(task)
+
+ d.setVarFlag(task, 'task', 1)
+
+ if not task in task_deps['tasks']:
+ task_deps['tasks'].append(task)
+
+ flags = d.getVarFlags(task)
+ def getTask(name):
+ if not name in task_deps:
+ task_deps[name] = {}
+ if name in flags:
+ deptask = d.expand(flags[name])
+ task_deps[name][task] = deptask
+ getTask('depends')
+ getTask('rdepends')
+ getTask('deptask')
+ getTask('rdeptask')
+ getTask('recrdeptask')
+ getTask('recideptask')
+ getTask('nostamp')
+ getTask('fakeroot')
+ getTask('noexec')
+ getTask('umask')
+ task_deps['parents'][task] = []
+ if 'deps' in flags:
+ for dep in flags['deps']:
+ dep = d.expand(dep)
+ task_deps['parents'][task].append(dep)
+
+ # don't assume holding a reference
+ d.setVar('_task_deps', task_deps)
+
+def addtask(task, before, after, d):
+ if task[:3] != "do_":
+ task = "do_" + task
+
+ d.setVarFlag(task, "task", 1)
+ bbtasks = d.getVar('__BBTASKS', False) or []
+ if task not in bbtasks:
+ bbtasks.append(task)
+ d.setVar('__BBTASKS', bbtasks)
+
+ existing = d.getVarFlag(task, "deps", False) or []
+ if after is not None:
+ # set up deps for function
+ for entry in after.split():
+ if entry not in existing:
+ existing.append(entry)
+ d.setVarFlag(task, "deps", existing)
+ if before is not None:
+ # set up things that depend on this func
+ for entry in before.split():
+ existing = d.getVarFlag(entry, "deps", False) or []
+ if task not in existing:
+ d.setVarFlag(entry, "deps", [task] + existing)
+
+def deltask(task, d):
+ if task[:3] != "do_":
+ task = "do_" + task
+
+ bbtasks = d.getVar('__BBTASKS', False) or []
+ if task in bbtasks:
+ bbtasks.remove(task)
+ d.delVarFlag(task, 'task')
+ d.setVar('__BBTASKS', bbtasks)
+
+ d.delVarFlag(task, 'deps')
+ for bbtask in d.getVar('__BBTASKS', False) or []:
+ deps = d.getVarFlag(bbtask, 'deps', False) or []
+ if task in deps:
+ deps.remove(task)
+ d.setVarFlag(bbtask, 'deps', deps)
+
+def preceedtask(task, with_recrdeptasks, d):
+ """
+ Returns a set of tasks in the current recipe which were specified as
+ precondition by the task itself ("after") or which listed themselves
+ as precondition ("before"). Preceeding tasks specified via the
+ "recrdeptask" are included in the result only if requested. Beware
+ that this may lead to the task itself being listed.
+ """
+ preceed = set()
+
+ # Ignore tasks which don't exist
+ tasks = d.getVar('__BBTASKS', False)
+ if task not in tasks:
+ return preceed
+
+ preceed.update(d.getVarFlag(task, 'deps') or [])
+ if with_recrdeptasks:
+ recrdeptask = d.getVarFlag(task, 'recrdeptask')
+ if recrdeptask:
+ preceed.update(recrdeptask.split())
+ return preceed
+
+def tasksbetween(task_start, task_end, d):
+ """
+ Return the list of tasks between two tasks in the current recipe,
+ where task_start is to start at and task_end is the task to end at
+ (and task_end has a dependency chain back to task_start).
+ """
+ outtasks = []
+ tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys()))
+ def follow_chain(task, endtask, chain=None):
+ if not chain:
+ chain = []
+ chain.append(task)
+ for othertask in tasks:
+ if othertask == task:
+ continue
+ if task == endtask:
+ for ctask in chain:
+ if ctask not in outtasks:
+ outtasks.append(ctask)
+ else:
+ deps = d.getVarFlag(othertask, 'deps', False)
+ if task in deps:
+ follow_chain(othertask, endtask, chain)
+ chain.pop()
+ follow_chain(task_start, task_end)
+ return outtasks
diff --git a/poky/bitbake/lib/bb/cache.py b/poky/bitbake/lib/bb/cache.py
new file mode 100644
index 000000000..168a77ac0
--- /dev/null
+++ b/poky/bitbake/lib/bb/cache.py
@@ -0,0 +1,891 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Cache implementation
+#
+# Caching of bitbake variables before task execution
+
+# Copyright (C) 2006 Richard Purdie
+# Copyright (C) 2012 Intel Corporation
+
+# but small sections based on code from bin/bitbake:
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+import logging
+import pickle
+from collections import defaultdict
+import bb.utils
+
+logger = logging.getLogger("BitBake.Cache")
+
+__cache_version__ = "151"
+
+def getCacheFile(path, filename, data_hash):
+ return os.path.join(path, filename + "." + data_hash)
+
+# RecipeInfoCommon defines common data retrieving methods
+# from meta data for caches. CoreRecipeInfo as well as other
+# Extra RecipeInfo needs to inherit this class
+class RecipeInfoCommon(object):
+
+ @classmethod
+ def listvar(cls, var, metadata):
+ return cls.getvar(var, metadata).split()
+
+ @classmethod
+ def intvar(cls, var, metadata):
+ return int(cls.getvar(var, metadata) or 0)
+
+ @classmethod
+ def depvar(cls, var, metadata):
+ return bb.utils.explode_deps(cls.getvar(var, metadata))
+
+ @classmethod
+ def pkgvar(cls, var, packages, metadata):
+ return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata))
+ for pkg in packages)
+
+ @classmethod
+ def taskvar(cls, var, tasks, metadata):
+ return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata))
+ for task in tasks)
+
+ @classmethod
+ def flaglist(cls, flag, varlist, metadata, squash=False):
+ out_dict = dict((var, metadata.getVarFlag(var, flag))
+ for var in varlist)
+ if squash:
+ return dict((k,v) for (k,v) in out_dict.items() if v)
+ else:
+ return out_dict
+
+ @classmethod
+ def getvar(cls, var, metadata, expand = True):
+ return metadata.getVar(var, expand) or ''
+
+
+class CoreRecipeInfo(RecipeInfoCommon):
+ __slots__ = ()
+
+ cachefile = "bb_cache.dat"
+
+ def __init__(self, filename, metadata):
+ self.file_depends = metadata.getVar('__depends', False)
+ self.timestamp = bb.parse.cached_mtime(filename)
+ self.variants = self.listvar('__VARIANTS', metadata) + ['']
+ self.appends = self.listvar('__BBAPPEND', metadata)
+ self.nocache = self.getvar('BB_DONT_CACHE', metadata)
+
+ self.skipreason = self.getvar('__SKIPPED', metadata)
+ if self.skipreason:
+ self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0]
+ self.skipped = True
+ self.provides = self.depvar('PROVIDES', metadata)
+ self.rprovides = self.depvar('RPROVIDES', metadata)
+ return
+
+ self.tasks = metadata.getVar('__BBTASKS', False)
+
+ self.pn = self.getvar('PN', metadata)
+ self.packages = self.listvar('PACKAGES', metadata)
+ if not self.packages:
+ self.packages.append(self.pn)
+
+ self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
+ self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
+
+ self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}}
+
+ self.skipped = False
+ self.pe = self.getvar('PE', metadata)
+ self.pv = self.getvar('PV', metadata)
+ self.pr = self.getvar('PR', metadata)
+ self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
+ self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
+ self.stamp = self.getvar('STAMP', metadata)
+ self.stampclean = self.getvar('STAMPCLEAN', metadata)
+ self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
+ self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
+ self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
+ self.depends = self.depvar('DEPENDS', metadata)
+ self.provides = self.depvar('PROVIDES', metadata)
+ self.rdepends = self.depvar('RDEPENDS', metadata)
+ self.rprovides = self.depvar('RPROVIDES', metadata)
+ self.rrecommends = self.depvar('RRECOMMENDS', metadata)
+ self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
+ self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
+ self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
+ self.inherits = self.getvar('__inherit_cache', metadata, expand=False)
+ self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
+ self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
+ self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata)
+ self.extradepsfunc = self.getvar('calculate_extra_depends', metadata)
+
+ @classmethod
+ def init_cacheData(cls, cachedata):
+ # CacheData in Core RecipeInfo Class
+ cachedata.task_deps = {}
+ cachedata.pkg_fn = {}
+ cachedata.pkg_pn = defaultdict(list)
+ cachedata.pkg_pepvpr = {}
+ cachedata.pkg_dp = {}
+
+ cachedata.stamp = {}
+ cachedata.stampclean = {}
+ cachedata.stamp_extrainfo = {}
+ cachedata.file_checksums = {}
+ cachedata.fn_provides = {}
+ cachedata.pn_provides = defaultdict(list)
+ cachedata.all_depends = []
+
+ cachedata.deps = defaultdict(list)
+ cachedata.packages = defaultdict(list)
+ cachedata.providers = defaultdict(list)
+ cachedata.rproviders = defaultdict(list)
+ cachedata.packages_dynamic = defaultdict(list)
+
+ cachedata.rundeps = defaultdict(lambda: defaultdict(list))
+ cachedata.runrecs = defaultdict(lambda: defaultdict(list))
+ cachedata.possible_world = []
+ cachedata.universe_target = []
+ cachedata.hashfn = {}
+
+ cachedata.basetaskhash = {}
+ cachedata.inherits = {}
+ cachedata.fakerootenv = {}
+ cachedata.fakerootnoenv = {}
+ cachedata.fakerootdirs = {}
+ cachedata.extradepsfunc = {}
+
+ def add_cacheData(self, cachedata, fn):
+ cachedata.task_deps[fn] = self.task_deps
+ cachedata.pkg_fn[fn] = self.pn
+ cachedata.pkg_pn[self.pn].append(fn)
+ cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr)
+ cachedata.pkg_dp[fn] = self.defaultpref
+ cachedata.stamp[fn] = self.stamp
+ cachedata.stampclean[fn] = self.stampclean
+ cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo
+ cachedata.file_checksums[fn] = self.file_checksums
+
+ provides = [self.pn]
+ for provide in self.provides:
+ if provide not in provides:
+ provides.append(provide)
+ cachedata.fn_provides[fn] = provides
+
+ for provide in provides:
+ cachedata.providers[provide].append(fn)
+ if provide not in cachedata.pn_provides[self.pn]:
+ cachedata.pn_provides[self.pn].append(provide)
+
+ for dep in self.depends:
+ if dep not in cachedata.deps[fn]:
+ cachedata.deps[fn].append(dep)
+ if dep not in cachedata.all_depends:
+ cachedata.all_depends.append(dep)
+
+ rprovides = self.rprovides
+ for package in self.packages:
+ cachedata.packages[package].append(fn)
+ rprovides += self.rprovides_pkg[package]
+
+ for rprovide in rprovides:
+ if fn not in cachedata.rproviders[rprovide]:
+ cachedata.rproviders[rprovide].append(fn)
+
+ for package in self.packages_dynamic:
+ cachedata.packages_dynamic[package].append(fn)
+
+ # Build hash of runtime depends and recommends
+ for package in self.packages:
+ cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
+ cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
+
+ # Collect files we may need for possible world-dep
+ # calculations
+ if self.not_world:
+ logger.debug(1, "EXCLUDE FROM WORLD: %s", fn)
+ else:
+ cachedata.possible_world.append(fn)
+
+ # create a collection of all targets for sanity checking
+ # tasks, such as upstream versions, license, and tools for
+ # task and image creation.
+ cachedata.universe_target.append(self.pn)
+
+ cachedata.hashfn[fn] = self.hashfilename
+ for task, taskhash in self.basetaskhashes.items():
+ identifier = '%s.%s' % (fn, task)
+ cachedata.basetaskhash[identifier] = taskhash
+
+ cachedata.inherits[fn] = self.inherits
+ cachedata.fakerootenv[fn] = self.fakerootenv
+ cachedata.fakerootnoenv[fn] = self.fakerootnoenv
+ cachedata.fakerootdirs[fn] = self.fakerootdirs
+ cachedata.extradepsfunc[fn] = self.extradepsfunc
+
+def virtualfn2realfn(virtualfn):
+ """
+ Convert a virtual file name to a real one + the associated subclass keyword
+ """
+ mc = ""
+ if virtualfn.startswith('multiconfig:'):
+ elems = virtualfn.split(':')
+ mc = elems[1]
+ virtualfn = ":".join(elems[2:])
+
+ fn = virtualfn
+ cls = ""
+ if virtualfn.startswith('virtual:'):
+ elems = virtualfn.split(':')
+ cls = ":".join(elems[1:-1])
+ fn = elems[-1]
+
+ return (fn, cls, mc)
+
+def realfn2virtual(realfn, cls, mc):
+ """
+ Convert a real filename + the associated subclass keyword to a virtual filename
+ """
+ if cls:
+ realfn = "virtual:" + cls + ":" + realfn
+ if mc:
+ realfn = "multiconfig:" + mc + ":" + realfn
+ return realfn
+
+def variant2virtual(realfn, variant):
+ """
+ Convert a real filename + the associated subclass keyword to a virtual filename
+ """
+ if variant == "":
+ return realfn
+ if variant.startswith("multiconfig:"):
+ elems = variant.split(":")
+ if elems[2]:
+ return "multiconfig:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn
+ return "multiconfig:" + elems[1] + ":" + realfn
+ return "virtual:" + variant + ":" + realfn
+
+def parse_recipe(bb_data, bbfile, appends, mc=''):
+ """
+ Parse a recipe
+ """
+
+ chdir_back = False
+
+ bb_data.setVar("__BBMULTICONFIG", mc)
+
+ # expand tmpdir to include this topdir
+ bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
+ bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
+ oldpath = os.path.abspath(os.getcwd())
+ bb.parse.cached_mtime_noerror(bbfile_loc)
+
+ # The ConfHandler first looks if there is a TOPDIR and if not
+ # then it would call getcwd().
+ # Previously, we chdir()ed to bbfile_loc, called the handler
+ # and finally chdir()ed back, a couple of thousand times. We now
+ # just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
+ if not bb_data.getVar('TOPDIR', False):
+ chdir_back = True
+ bb_data.setVar('TOPDIR', bbfile_loc)
+ try:
+ if appends:
+ bb_data.setVar('__BBAPPEND', " ".join(appends))
+ bb_data = bb.parse.handle(bbfile, bb_data)
+ if chdir_back:
+ os.chdir(oldpath)
+ return bb_data
+ except:
+ if chdir_back:
+ os.chdir(oldpath)
+ raise
+
+
+
+class NoCache(object):
+
+ def __init__(self, databuilder):
+ self.databuilder = databuilder
+ self.data = databuilder.data
+
+ def loadDataFull(self, virtualfn, appends):
+ """
+ Return a complete set of data for fn.
+ To do this, we need to parse the file.
+ """
+ logger.debug(1, "Parsing %s (full)" % virtualfn)
+ (fn, virtual, mc) = virtualfn2realfn(virtualfn)
+ bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
+ return bb_data[virtual]
+
+ def load_bbfile(self, bbfile, appends, virtonly = False):
+ """
+ Load and parse one .bb build file
+ Return the data and whether parsing resulted in the file being skipped
+ """
+
+ if virtonly:
+ (bbfile, virtual, mc) = virtualfn2realfn(bbfile)
+ bb_data = self.databuilder.mcdata[mc].createCopy()
+ bb_data.setVar("__ONLYFINALISE", virtual or "default")
+ datastores = parse_recipe(bb_data, bbfile, appends, mc)
+ return datastores
+
+ bb_data = self.data.createCopy()
+ datastores = parse_recipe(bb_data, bbfile, appends)
+
+ for mc in self.databuilder.mcdata:
+ if not mc:
+ continue
+ bb_data = self.databuilder.mcdata[mc].createCopy()
+ newstores = parse_recipe(bb_data, bbfile, appends, mc)
+ for ns in newstores:
+ datastores["multiconfig:%s:%s" % (mc, ns)] = newstores[ns]
+
+ return datastores
+
+class Cache(NoCache):
+ """
+ BitBake Cache implementation
+ """
+
+ def __init__(self, databuilder, data_hash, caches_array):
+ super().__init__(databuilder)
+ data = databuilder.data
+
+ # Pass caches_array information into Cache Constructor
+ # It will be used later for deciding whether we
+ # need extra cache file dump/load support
+ self.caches_array = caches_array
+ self.cachedir = data.getVar("CACHE")
+ self.clean = set()
+ self.checked = set()
+ self.depends_cache = {}
+ self.data_fn = None
+ self.cacheclean = True
+ self.data_hash = data_hash
+
+ if self.cachedir in [None, '']:
+ self.has_cache = False
+ logger.info("Not using a cache. "
+ "Set CACHE = <directory> to enable.")
+ return
+
+ self.has_cache = True
+ self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
+
+ logger.debug(1, "Cache dir: %s", self.cachedir)
+ bb.utils.mkdirhier(self.cachedir)
+
+ cache_ok = True
+ if self.caches_array:
+ for cache_class in self.caches_array:
+ cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
+ cache_ok = cache_ok and os.path.exists(cachefile)
+ cache_class.init_cacheData(self)
+ if cache_ok:
+ self.load_cachefile()
+ elif os.path.isfile(self.cachefile):
+ logger.info("Out of date cache found, rebuilding...")
+ else:
+ logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
+
+ def load_cachefile(self):
+ cachesize = 0
+ previous_progress = 0
+ previous_percent = 0
+
+ # Calculate the correct cachesize of all those cache files
+ for cache_class in self.caches_array:
+ cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
+ with open(cachefile, "rb") as cachefile:
+ cachesize += os.fstat(cachefile.fileno()).st_size
+
+ bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
+
+ for cache_class in self.caches_array:
+ cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
+ logger.debug(1, 'Loading cache file: %s' % cachefile)
+ with open(cachefile, "rb") as cachefile:
+ pickled = pickle.Unpickler(cachefile)
+ # Check cache version information
+ try:
+ cache_ver = pickled.load()
+ bitbake_ver = pickled.load()
+ except Exception:
+ logger.info('Invalid cache, rebuilding...')
+ return
+
+ if cache_ver != __cache_version__:
+ logger.info('Cache version mismatch, rebuilding...')
+ return
+ elif bitbake_ver != bb.__version__:
+ logger.info('Bitbake version mismatch, rebuilding...')
+ return
+
+ # Load the rest of the cache file
+ current_progress = 0
+ while cachefile:
+ try:
+ key = pickled.load()
+ value = pickled.load()
+ except Exception:
+ break
+ if not isinstance(key, str):
+ bb.warn("%s from extras cache is not a string?" % key)
+ break
+ if not isinstance(value, RecipeInfoCommon):
+ bb.warn("%s from extras cache is not a RecipeInfoCommon class?" % value)
+ break
+
+ if key in self.depends_cache:
+ self.depends_cache[key].append(value)
+ else:
+ self.depends_cache[key] = [value]
+ # only fire events on even percentage boundaries
+ current_progress = cachefile.tell() + previous_progress
+ if current_progress > cachesize:
+ # we might have calculated incorrect total size because a file
+ # might've been written out just after we checked its size
+ cachesize = current_progress
+ current_percent = 100 * current_progress / cachesize
+ if current_percent > previous_percent:
+ previous_percent = current_percent
+ bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize),
+ self.data)
+
+ previous_progress += current_progress
+
+ # Note: depends cache number is corresponding to the parsing file numbers.
+ # The same file has several caches, still regarded as one item in the cache
+ bb.event.fire(bb.event.CacheLoadCompleted(cachesize,
+ len(self.depends_cache)),
+ self.data)
+
+ def parse(self, filename, appends):
+ """Parse the specified filename, returning the recipe information"""
+ logger.debug(1, "Parsing %s", filename)
+ infos = []
+ datastores = self.load_bbfile(filename, appends)
+ depends = []
+ variants = []
+ # Process the "real" fn last so we can store variants list
+ for variant, data in sorted(datastores.items(),
+ key=lambda i: i[0],
+ reverse=True):
+ virtualfn = variant2virtual(filename, variant)
+ variants.append(variant)
+ depends = depends + (data.getVar("__depends", False) or [])
+ if depends and not variant:
+ data.setVar("__depends", depends)
+ if virtualfn == filename:
+ data.setVar("__VARIANTS", " ".join(variants))
+ info_array = []
+ for cache_class in self.caches_array:
+ info = cache_class(filename, data)
+ info_array.append(info)
+ infos.append((virtualfn, info_array))
+
+ return infos
+
+ def load(self, filename, appends):
+ """Obtain the recipe information for the specified filename,
+ using cached values if available, otherwise parsing.
+
+ Note that if it does parse to obtain the info, it will not
+ automatically add the information to the cache or to your
+ CacheData. Use the add or add_info method to do so after
+ running this, or use loadData instead."""
+ cached = self.cacheValid(filename, appends)
+ if cached:
+ infos = []
+ # info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
+ info_array = self.depends_cache[filename]
+ for variant in info_array[0].variants:
+ virtualfn = variant2virtual(filename, variant)
+ infos.append((virtualfn, self.depends_cache[virtualfn]))
+ else:
+ return self.parse(filename, appends, configdata, self.caches_array)
+
+ return cached, infos
+
+ def loadData(self, fn, appends, cacheData):
+ """Load the recipe info for the specified filename,
+ parsing and adding to the cache if necessary, and adding
+ the recipe information to the supplied CacheData instance."""
+ skipped, virtuals = 0, 0
+
+ cached, infos = self.load(fn, appends)
+ for virtualfn, info_array in infos:
+ if info_array[0].skipped:
+ logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
+ skipped += 1
+ else:
+ self.add_info(virtualfn, info_array, cacheData, not cached)
+ virtuals += 1
+
+ return cached, skipped, virtuals
+
+ def cacheValid(self, fn, appends):
+ """
+ Is the cache valid for fn?
+ Fast version, no timestamps checked.
+ """
+ if fn not in self.checked:
+ self.cacheValidUpdate(fn, appends)
+
+ # Is cache enabled?
+ if not self.has_cache:
+ return False
+ if fn in self.clean:
+ return True
+ return False
+
+ def cacheValidUpdate(self, fn, appends):
+ """
+ Is the cache valid for fn?
+ Make thorough (slower) checks including timestamps.
+ """
+ # Is cache enabled?
+ if not self.has_cache:
+ return False
+
+ self.checked.add(fn)
+
+ # File isn't in depends_cache
+ if not fn in self.depends_cache:
+ logger.debug(2, "Cache: %s is not cached", fn)
+ return False
+
+ mtime = bb.parse.cached_mtime_noerror(fn)
+
+ # Check file still exists
+ if mtime == 0:
+ logger.debug(2, "Cache: %s no longer exists", fn)
+ self.remove(fn)
+ return False
+
+ info_array = self.depends_cache[fn]
+ # Check the file's timestamp
+ if mtime != info_array[0].timestamp:
+ logger.debug(2, "Cache: %s changed", fn)
+ self.remove(fn)
+ return False
+
+ # Check dependencies are still valid
+ depends = info_array[0].file_depends
+ if depends:
+ for f, old_mtime in depends:
+ fmtime = bb.parse.cached_mtime_noerror(f)
+ # Check if file still exists
+ if old_mtime != 0 and fmtime == 0:
+ logger.debug(2, "Cache: %s's dependency %s was removed",
+ fn, f)
+ self.remove(fn)
+ return False
+
+ if (fmtime != old_mtime):
+ logger.debug(2, "Cache: %s's dependency %s changed",
+ fn, f)
+ self.remove(fn)
+ return False
+
+ if hasattr(info_array[0], 'file_checksums'):
+ for _, fl in info_array[0].file_checksums.items():
+ fl = fl.strip()
+ while fl:
+ # A .split() would be simpler but means spaces or colons in filenames would break
+ a = fl.find(":True")
+ b = fl.find(":False")
+ if ((a < 0) and b) or ((b > 0) and (b < a)):
+ f = fl[:b+6]
+ fl = fl[b+7:]
+ elif ((b < 0) and a) or ((a > 0) and (a < b)):
+ f = fl[:a+5]
+ fl = fl[a+6:]
+ else:
+ break
+ fl = fl.strip()
+ if "*" in f:
+ continue
+ f, exist = f.split(":")
+ if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
+ logger.debug(2, "Cache: %s's file checksum list file %s changed",
+ fn, f)
+ self.remove(fn)
+ return False
+
+ if appends != info_array[0].appends:
+ logger.debug(2, "Cache: appends for %s changed", fn)
+ logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends)))
+ self.remove(fn)
+ return False
+
+ invalid = False
+ for cls in info_array[0].variants:
+ virtualfn = variant2virtual(fn, cls)
+ self.clean.add(virtualfn)
+ if virtualfn not in self.depends_cache:
+ logger.debug(2, "Cache: %s is not cached", virtualfn)
+ invalid = True
+ elif len(self.depends_cache[virtualfn]) != len(self.caches_array):
+ logger.debug(2, "Cache: Extra caches missing for %s?" % virtualfn)
+ invalid = True
+
+ # If any one of the variants is not present, mark as invalid for all
+ if invalid:
+ for cls in info_array[0].variants:
+ virtualfn = variant2virtual(fn, cls)
+ if virtualfn in self.clean:
+ logger.debug(2, "Cache: Removing %s from cache", virtualfn)
+ self.clean.remove(virtualfn)
+ if fn in self.clean:
+ logger.debug(2, "Cache: Marking %s as not clean", fn)
+ self.clean.remove(fn)
+ return False
+
+ self.clean.add(fn)
+ return True
+
+ def remove(self, fn):
+ """
+ Remove a fn from the cache
+ Called from the parser in error cases
+ """
+ if fn in self.depends_cache:
+ logger.debug(1, "Removing %s from cache", fn)
+ del self.depends_cache[fn]
+ if fn in self.clean:
+ logger.debug(1, "Marking %s as unclean", fn)
+ self.clean.remove(fn)
+
+ def sync(self):
+ """
+ Save the cache
+ Called from the parser when complete (or exiting)
+ """
+
+ if not self.has_cache:
+ return
+
+ if self.cacheclean:
+ logger.debug(2, "Cache is clean, not saving.")
+ return
+
+ for cache_class in self.caches_array:
+ cache_class_name = cache_class.__name__
+ cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
+ with open(cachefile, "wb") as f:
+ p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL)
+ p.dump(__cache_version__)
+ p.dump(bb.__version__)
+
+ for key, info_array in self.depends_cache.items():
+ for info in info_array:
+ if isinstance(info, RecipeInfoCommon) and info.__class__.__name__ == cache_class_name:
+ p.dump(key)
+ p.dump(info)
+
+ del self.depends_cache
+
+ @staticmethod
+ def mtime(cachefile):
+ return bb.parse.cached_mtime_noerror(cachefile)
+
+ def add_info(self, filename, info_array, cacheData, parsed=None, watcher=None):
+ if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped):
+ cacheData.add_from_recipeinfo(filename, info_array)
+
+ if watcher:
+ watcher(info_array[0].file_depends)
+
+ if not self.has_cache:
+ return
+
+ if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache:
+ if parsed:
+ self.cacheclean = False
+ self.depends_cache[filename] = info_array
+
+ def add(self, file_name, data, cacheData, parsed=None):
+ """
+ Save data we need into the cache
+ """
+
+ realfn = virtualfn2realfn(file_name)[0]
+
+ info_array = []
+ for cache_class in self.caches_array:
+ info_array.append(cache_class(realfn, data))
+ self.add_info(file_name, info_array, cacheData, parsed)
+
+
+def init(cooker):
+ """
+ The Objective: Cache the minimum amount of data possible yet get to the
+ stage of building packages (i.e. tryBuild) without reparsing any .bb files.
+
+ To do this, we intercept getVar calls and only cache the variables we see
+ being accessed. We rely on the cache getVar calls being made for all
+ variables bitbake might need to use to reach this stage. For each cached
+ file we need to track:
+
+ * Its mtime
+ * The mtimes of all its dependencies
+ * Whether it caused a parse.SkipRecipe exception
+
+ Files causing parsing errors are evicted from the cache.
+
+ """
+ return Cache(cooker.configuration.data, cooker.configuration.data_hash)
+
+
+class CacheData(object):
+ """
+ The data structures we compile from the cached data
+ """
+
+ def __init__(self, caches_array):
+ self.caches_array = caches_array
+ for cache_class in self.caches_array:
+ if not issubclass(cache_class, RecipeInfoCommon):
+ bb.error("Extra cache data class %s should subclass RecipeInfoCommon class" % cache_class)
+ cache_class.init_cacheData(self)
+
+ # Direct cache variables
+ self.task_queues = {}
+ self.preferred = {}
+ self.tasks = {}
+ # Indirect Cache variables (set elsewhere)
+ self.ignored_dependencies = []
+ self.world_target = set()
+ self.bbfile_priority = {}
+
+ def add_from_recipeinfo(self, fn, info_array):
+ for info in info_array:
+ info.add_cacheData(self, fn)
+
+class MultiProcessCache(object):
+ """
+ BitBake multi-process cache implementation
+
+ Used by the codeparser & file checksum caches
+ """
+
+ def __init__(self):
+ self.cachefile = None
+ self.cachedata = self.create_cachedata()
+ self.cachedata_extras = self.create_cachedata()
+
+ def init_cache(self, d, cache_file_name=None):
+ cachedir = (d.getVar("PERSISTENT_DIR") or
+ d.getVar("CACHE"))
+ if cachedir in [None, '']:
+ return
+ bb.utils.mkdirhier(cachedir)
+ self.cachefile = os.path.join(cachedir,
+ cache_file_name or self.__class__.cache_file_name)
+ logger.debug(1, "Using cache in '%s'", self.cachefile)
+
+ glf = bb.utils.lockfile(self.cachefile + ".lock")
+
+ try:
+ with open(self.cachefile, "rb") as f:
+ p = pickle.Unpickler(f)
+ data, version = p.load()
+ except:
+ bb.utils.unlockfile(glf)
+ return
+
+ bb.utils.unlockfile(glf)
+
+ if version != self.__class__.CACHE_VERSION:
+ return
+
+ self.cachedata = data
+
+ def create_cachedata(self):
+ data = [{}]
+ return data
+
+ def save_extras(self):
+ if not self.cachefile:
+ return
+
+ glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True)
+
+ i = os.getpid()
+ lf = None
+ while not lf:
+ lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False)
+ if not lf or os.path.exists(self.cachefile + "-" + str(i)):
+ if lf:
+ bb.utils.unlockfile(lf)
+ lf = None
+ i = i + 1
+ continue
+
+ with open(self.cachefile + "-" + str(i), "wb") as f:
+ p = pickle.Pickler(f, -1)
+ p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION])
+
+ bb.utils.unlockfile(lf)
+ bb.utils.unlockfile(glf)
+
+ def merge_data(self, source, dest):
+ for j in range(0,len(dest)):
+ for h in source[j]:
+ if h not in dest[j]:
+ dest[j][h] = source[j][h]
+
+ def save_merge(self):
+ if not self.cachefile:
+ return
+
+ glf = bb.utils.lockfile(self.cachefile + ".lock")
+
+ data = self.cachedata
+
+ for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
+ f = os.path.join(os.path.dirname(self.cachefile), f)
+ try:
+ with open(f, "rb") as fd:
+ p = pickle.Unpickler(fd)
+ extradata, version = p.load()
+ except (IOError, EOFError):
+ os.unlink(f)
+ continue
+
+ if version != self.__class__.CACHE_VERSION:
+ os.unlink(f)
+ continue
+
+ self.merge_data(extradata, data)
+ os.unlink(f)
+
+ with open(self.cachefile, "wb") as f:
+ p = pickle.Pickler(f, -1)
+ p.dump([data, self.__class__.CACHE_VERSION])
+
+ bb.utils.unlockfile(glf)
diff --git a/poky/bitbake/lib/bb/cache_extra.py b/poky/bitbake/lib/bb/cache_extra.py
new file mode 100644
index 000000000..83f4959d6
--- /dev/null
+++ b/poky/bitbake/lib/bb/cache_extra.py
@@ -0,0 +1,75 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Extra RecipeInfo will be all defined in this file. Currently,
+# Only Hob (Image Creator) Requests some extra fields. So
+# HobRecipeInfo is defined. It's named HobRecipeInfo because it
+# is introduced by 'hob'. Users could also introduce other
+# RecipeInfo or simply use those already defined RecipeInfo.
+# In the following patch, this newly defined new extra RecipeInfo
+# will be dynamically loaded and used for loading/saving the extra
+# cache fields
+
+# Copyright (C) 2011, Intel Corporation. All rights reserved.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from bb.cache import RecipeInfoCommon
+
+class HobRecipeInfo(RecipeInfoCommon):
+ __slots__ = ()
+
+ classname = "HobRecipeInfo"
+ # please override this member with the correct data cache file
+ # such as (bb_cache.dat, bb_extracache_hob.dat)
+ cachefile = "bb_extracache_" + classname +".dat"
+
+ # override this member with the list of extra cache fields
+ # that this class will provide
+ cachefields = ['summary', 'license', 'section',
+ 'description', 'homepage', 'bugtracker',
+ 'prevision', 'files_info']
+
+ def __init__(self, filename, metadata):
+
+ self.summary = self.getvar('SUMMARY', metadata)
+ self.license = self.getvar('LICENSE', metadata)
+ self.section = self.getvar('SECTION', metadata)
+ self.description = self.getvar('DESCRIPTION', metadata)
+ self.homepage = self.getvar('HOMEPAGE', metadata)
+ self.bugtracker = self.getvar('BUGTRACKER', metadata)
+ self.prevision = self.getvar('PR', metadata)
+ self.files_info = self.getvar('FILES_INFO', metadata)
+
+ @classmethod
+ def init_cacheData(cls, cachedata):
+ # CacheData in Hob RecipeInfo Class
+ cachedata.summary = {}
+ cachedata.license = {}
+ cachedata.section = {}
+ cachedata.description = {}
+ cachedata.homepage = {}
+ cachedata.bugtracker = {}
+ cachedata.prevision = {}
+ cachedata.files_info = {}
+
+ def add_cacheData(self, cachedata, fn):
+ cachedata.summary[fn] = self.summary
+ cachedata.license[fn] = self.license
+ cachedata.section[fn] = self.section
+ cachedata.description[fn] = self.description
+ cachedata.homepage[fn] = self.homepage
+ cachedata.bugtracker[fn] = self.bugtracker
+ cachedata.prevision[fn] = self.prevision
+ cachedata.files_info[fn] = self.files_info
diff --git a/poky/bitbake/lib/bb/checksum.py b/poky/bitbake/lib/bb/checksum.py
new file mode 100644
index 000000000..84289208f
--- /dev/null
+++ b/poky/bitbake/lib/bb/checksum.py
@@ -0,0 +1,134 @@
+# Local file checksum cache implementation
+#
+# Copyright (C) 2012 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import glob
+import operator
+import os
+import stat
+import pickle
+import bb.utils
+import logging
+from bb.cache import MultiProcessCache
+
+logger = logging.getLogger("BitBake.Cache")
+
+# mtime cache (non-persistent)
+# based upon the assumption that files do not change during bitbake run
+class FileMtimeCache(object):
+ cache = {}
+
+ def cached_mtime(self, f):
+ if f not in self.cache:
+ self.cache[f] = os.stat(f)[stat.ST_MTIME]
+ return self.cache[f]
+
+ def cached_mtime_noerror(self, f):
+ if f not in self.cache:
+ try:
+ self.cache[f] = os.stat(f)[stat.ST_MTIME]
+ except OSError:
+ return 0
+ return self.cache[f]
+
+ def update_mtime(self, f):
+ self.cache[f] = os.stat(f)[stat.ST_MTIME]
+ return self.cache[f]
+
+ def clear(self):
+ self.cache.clear()
+
+# Checksum + mtime cache (persistent)
+class FileChecksumCache(MultiProcessCache):
+ cache_file_name = "local_file_checksum_cache.dat"
+ CACHE_VERSION = 1
+
+ def __init__(self):
+ self.mtime_cache = FileMtimeCache()
+ MultiProcessCache.__init__(self)
+
+ def get_checksum(self, f):
+ entry = self.cachedata[0].get(f)
+ cmtime = self.mtime_cache.cached_mtime(f)
+ if entry:
+ (mtime, hashval) = entry
+ if cmtime == mtime:
+ return hashval
+ else:
+ bb.debug(2, "file %s changed mtime, recompute checksum" % f)
+
+ hashval = bb.utils.md5_file(f)
+ self.cachedata_extras[0][f] = (cmtime, hashval)
+ return hashval
+
+ def merge_data(self, source, dest):
+ for h in source[0]:
+ if h in dest:
+ (smtime, _) = source[0][h]
+ (dmtime, _) = dest[0][h]
+ if smtime > dmtime:
+ dest[0][h] = source[0][h]
+ else:
+ dest[0][h] = source[0][h]
+
+ def get_checksums(self, filelist, pn):
+ """Get checksums for a list of files"""
+
+ def checksum_file(f):
+ try:
+ checksum = self.get_checksum(f)
+ except OSError as e:
+ bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e))
+ return None
+ return checksum
+
+ def checksum_dir(pth):
+ # Handle directories recursively
+ dirchecksums = []
+ for root, dirs, files in os.walk(pth):
+ for name in files:
+ fullpth = os.path.join(root, name)
+ checksum = checksum_file(fullpth)
+ if checksum:
+ dirchecksums.append((fullpth, checksum))
+ return dirchecksums
+
+ checksums = []
+ for pth in filelist.split():
+ exist = pth.split(":")[1]
+ if exist == "False":
+ continue
+ pth = pth.split(":")[0]
+ if '*' in pth:
+ # Handle globs
+ for f in glob.glob(pth):
+ if os.path.isdir(f):
+ if not os.path.islink(f):
+ checksums.extend(checksum_dir(f))
+ else:
+ checksum = checksum_file(f)
+ if checksum:
+ checksums.append((f, checksum))
+ elif os.path.isdir(pth):
+ if not os.path.islink(pth):
+ checksums.extend(checksum_dir(pth))
+ else:
+ checksum = checksum_file(pth)
+ if checksum:
+ checksums.append((pth, checksum))
+
+ checksums.sort(key=operator.itemgetter(1))
+ return checksums
diff --git a/poky/bitbake/lib/bb/codeparser.py b/poky/bitbake/lib/bb/codeparser.py
new file mode 100644
index 000000000..530f44e57
--- /dev/null
+++ b/poky/bitbake/lib/bb/codeparser.py
@@ -0,0 +1,476 @@
+"""
+BitBake code parser
+
+Parses actual code (i.e. python and shell) for functions and in-line
+expressions. Used mainly to determine dependencies on other functions
+and variables within the BitBake metadata. Also provides a cache for
+this information in order to speed up processing.
+
+(Not to be confused with the code that parses the metadata itself,
+see lib/bb/parse/ for that).
+
+NOTE: if you change how the parsers gather information you will almost
+certainly need to increment CodeParserCache.CACHE_VERSION below so that
+any existing codeparser cache gets invalidated. Additionally you'll need
+to increment __cache_version__ in cache.py in order to ensure that old
+recipe caches don't trigger "Taskhash mismatch" errors.
+
+"""
+
+import ast
+import sys
+import codegen
+import logging
+import pickle
+import bb.pysh as pysh
+import os.path
+import bb.utils, bb.data
+import hashlib
+from itertools import chain
+from bb.pysh import pyshyacc, pyshlex, sherrors
+from bb.cache import MultiProcessCache
+
+logger = logging.getLogger('BitBake.CodeParser')
+
+def bbhash(s):
+ return hashlib.md5(s.encode("utf-8")).hexdigest()
+
+def check_indent(codestr):
+ """If the code is indented, add a top level piece of code to 'remove' the indentation"""
+
+ i = 0
+ while codestr[i] in ["\n", "\t", " "]:
+ i = i + 1
+
+ if i == 0:
+ return codestr
+
+ if codestr[i-1] == "\t" or codestr[i-1] == " ":
+ if codestr[0] == "\n":
+ # Since we're adding a line, we need to remove one line of any empty padding
+ # to ensure line numbers are correct
+ codestr = codestr[1:]
+ return "if 1:\n" + codestr
+
+ return codestr
+
+
+# Basically pickle, in python 2.7.3 at least, does badly with data duplication
+# upon pickling and unpickling. Combine this with duplicate objects and things
+# are a mess.
+#
+# When the sets are originally created, python calls intern() on the set keys
+# which significantly improves memory usage. Sadly the pickle/unpickle process
+# doesn't call intern() on the keys and results in the same strings being duplicated
+# in memory. This also means pickle will save the same string multiple times in
+# the cache file.
+#
+# By having shell and python cacheline objects with setstate/getstate, we force
+# the object creation through our own routine where we can call intern (via internSet).
+#
+# We also use hashable frozensets and ensure we use references to these so that
+# duplicates can be removed, both in memory and in the resulting pickled data.
+#
+# By playing these games, the size of the cache file shrinks dramatically
+# meaning faster load times and the reloaded cache files also consume much less
+# memory. Smaller cache files, faster load times and lower memory usage is good.
+#
+# A custom getstate/setstate using tuples is actually worth 15% cachesize by
+# avoiding duplication of the attribute names!
+
+class SetCache(object):
+ def __init__(self):
+ self.setcache = {}
+
+ def internSet(self, items):
+
+ new = []
+ for i in items:
+ new.append(sys.intern(i))
+ s = frozenset(new)
+ h = hash(s)
+ if h in self.setcache:
+ return self.setcache[h]
+ self.setcache[h] = s
+ return s
+
+codecache = SetCache()
+
+class pythonCacheLine(object):
+ def __init__(self, refs, execs, contains):
+ self.refs = codecache.internSet(refs)
+ self.execs = codecache.internSet(execs)
+ self.contains = {}
+ for c in contains:
+ self.contains[c] = codecache.internSet(contains[c])
+
+ def __getstate__(self):
+ return (self.refs, self.execs, self.contains)
+
+ def __setstate__(self, state):
+ (refs, execs, contains) = state
+ self.__init__(refs, execs, contains)
+ def __hash__(self):
+ l = (hash(self.refs), hash(self.execs))
+ for c in sorted(self.contains.keys()):
+ l = l + (c, hash(self.contains[c]))
+ return hash(l)
+ def __repr__(self):
+ return " ".join([str(self.refs), str(self.execs), str(self.contains)])
+
+
+class shellCacheLine(object):
+ def __init__(self, execs):
+ self.execs = codecache.internSet(execs)
+
+ def __getstate__(self):
+ return (self.execs)
+
+ def __setstate__(self, state):
+ (execs) = state
+ self.__init__(execs)
+ def __hash__(self):
+ return hash(self.execs)
+ def __repr__(self):
+ return str(self.execs)
+
+class CodeParserCache(MultiProcessCache):
+ cache_file_name = "bb_codeparser.dat"
+ # NOTE: you must increment this if you change how the parsers gather information,
+ # so that an existing cache gets invalidated. Additionally you'll need
+ # to increment __cache_version__ in cache.py in order to ensure that old
+ # recipe caches don't trigger "Taskhash mismatch" errors.
+ CACHE_VERSION = 9
+
+ def __init__(self):
+ MultiProcessCache.__init__(self)
+ self.pythoncache = self.cachedata[0]
+ self.shellcache = self.cachedata[1]
+ self.pythoncacheextras = self.cachedata_extras[0]
+ self.shellcacheextras = self.cachedata_extras[1]
+
+ # To avoid duplication in the codeparser cache, keep
+ # a lookup of hashes of objects we already have
+ self.pythoncachelines = {}
+ self.shellcachelines = {}
+
+ def newPythonCacheLine(self, refs, execs, contains):
+ cacheline = pythonCacheLine(refs, execs, contains)
+ h = hash(cacheline)
+ if h in self.pythoncachelines:
+ return self.pythoncachelines[h]
+ self.pythoncachelines[h] = cacheline
+ return cacheline
+
+ def newShellCacheLine(self, execs):
+ cacheline = shellCacheLine(execs)
+ h = hash(cacheline)
+ if h in self.shellcachelines:
+ return self.shellcachelines[h]
+ self.shellcachelines[h] = cacheline
+ return cacheline
+
+ def init_cache(self, d):
+ # Check if we already have the caches
+ if self.pythoncache:
+ return
+
+ MultiProcessCache.init_cache(self, d)
+
+ # cachedata gets re-assigned in the parent
+ self.pythoncache = self.cachedata[0]
+ self.shellcache = self.cachedata[1]
+
+ def create_cachedata(self):
+ data = [{}, {}]
+ return data
+
+codeparsercache = CodeParserCache()
+
+def parser_cache_init(d):
+ codeparsercache.init_cache(d)
+
+def parser_cache_save():
+ codeparsercache.save_extras()
+
+def parser_cache_savemerge():
+ codeparsercache.save_merge()
+
+Logger = logging.getLoggerClass()
+class BufferedLogger(Logger):
+ def __init__(self, name, level=0, target=None):
+ Logger.__init__(self, name)
+ self.setLevel(level)
+ self.buffer = []
+ self.target = target
+
+ def handle(self, record):
+ self.buffer.append(record)
+
+ def flush(self):
+ for record in self.buffer:
+ if self.target.isEnabledFor(record.levelno):
+ self.target.handle(record)
+ self.buffer = []
+
+class PythonParser():
+ getvars = (".getVar", ".appendVar", ".prependVar")
+ getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
+ containsfuncs = ("bb.utils.contains", "base_contains")
+ containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter")
+ execfuncs = ("bb.build.exec_func", "bb.build.exec_task")
+
+ def warn(self, func, arg):
+ """Warn about calls of bitbake APIs which pass a non-literal
+ argument for the variable name, as we're not able to track such
+ a reference.
+ """
+
+ try:
+ funcstr = codegen.to_source(func)
+ argstr = codegen.to_source(arg)
+ except TypeError:
+ self.log.debug(2, 'Failed to convert function and argument to source form')
+ else:
+ self.log.debug(1, self.unhandled_message % (funcstr, argstr))
+
+ def visit_Call(self, node):
+ name = self.called_node_name(node.func)
+ if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
+ if isinstance(node.args[0], ast.Str):
+ varname = node.args[0].s
+ if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
+ if varname not in self.contains:
+ self.contains[varname] = set()
+ self.contains[varname].add(node.args[1].s)
+ elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
+ if varname not in self.contains:
+ self.contains[varname] = set()
+ self.contains[varname].update(node.args[1].s.split())
+ elif name.endswith(self.getvarflags):
+ if isinstance(node.args[1], ast.Str):
+ self.references.add('%s[%s]' % (varname, node.args[1].s))
+ else:
+ self.warn(node.func, node.args[1])
+ else:
+ self.references.add(varname)
+ else:
+ self.warn(node.func, node.args[0])
+ elif name and name.endswith(".expand"):
+ if isinstance(node.args[0], ast.Str):
+ value = node.args[0].s
+ d = bb.data.init()
+ parser = d.expandWithRefs(value, self.name)
+ self.references |= parser.references
+ self.execs |= parser.execs
+ for varname in parser.contains:
+ if varname not in self.contains:
+ self.contains[varname] = set()
+ self.contains[varname] |= parser.contains[varname]
+ elif name in self.execfuncs:
+ if isinstance(node.args[0], ast.Str):
+ self.var_execs.add(node.args[0].s)
+ else:
+ self.warn(node.func, node.args[0])
+ elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
+ self.execs.add(name)
+
+ def called_node_name(self, node):
+ """Given a called node, return its original string form"""
+ components = []
+ while node:
+ if isinstance(node, ast.Attribute):
+ components.append(node.attr)
+ node = node.value
+ elif isinstance(node, ast.Name):
+ components.append(node.id)
+ return '.'.join(reversed(components))
+ else:
+ break
+
+ def __init__(self, name, log):
+ self.name = name
+ self.var_execs = set()
+ self.contains = {}
+ self.execs = set()
+ self.references = set()
+ self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, log)
+
+ self.unhandled_message = "in call of %s, argument '%s' is not a string literal"
+ self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message)
+
+ def parse_python(self, node, lineno=0, filename="<string>"):
+ if not node or not node.strip():
+ return
+
+ h = bbhash(str(node))
+
+ if h in codeparsercache.pythoncache:
+ self.references = set(codeparsercache.pythoncache[h].refs)
+ self.execs = set(codeparsercache.pythoncache[h].execs)
+ self.contains = {}
+ for i in codeparsercache.pythoncache[h].contains:
+ self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
+ return
+
+ if h in codeparsercache.pythoncacheextras:
+ self.references = set(codeparsercache.pythoncacheextras[h].refs)
+ self.execs = set(codeparsercache.pythoncacheextras[h].execs)
+ self.contains = {}
+ for i in codeparsercache.pythoncacheextras[h].contains:
+ self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
+ return
+
+ # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though
+ node = "\n" * int(lineno) + node
+ code = compile(check_indent(str(node)), filename, "exec",
+ ast.PyCF_ONLY_AST)
+
+ for n in ast.walk(code):
+ if n.__class__.__name__ == "Call":
+ self.visit_Call(n)
+
+ self.execs.update(self.var_execs)
+
+ codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
+
+class ShellParser():
+ def __init__(self, name, log):
+ self.funcdefs = set()
+ self.allexecs = set()
+ self.execs = set()
+ self.log = BufferedLogger('BitBake.Data.%s' % name, logging.DEBUG, log)
+ self.unhandled_template = "unable to handle non-literal command '%s'"
+ self.unhandled_template = "while parsing %s, %s" % (name, self.unhandled_template)
+
+ def parse_shell(self, value):
+ """Parse the supplied shell code in a string, returning the external
+ commands it executes.
+ """
+
+ h = bbhash(str(value))
+
+ if h in codeparsercache.shellcache:
+ self.execs = set(codeparsercache.shellcache[h].execs)
+ return self.execs
+
+ if h in codeparsercache.shellcacheextras:
+ self.execs = set(codeparsercache.shellcacheextras[h].execs)
+ return self.execs
+
+ self._parse_shell(value)
+ self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs)
+
+ codeparsercache.shellcacheextras[h] = codeparsercache.newShellCacheLine(self.execs)
+
+ return self.execs
+
+ def _parse_shell(self, value):
+ try:
+ tokens, _ = pyshyacc.parse(value, eof=True, debug=False)
+ except pyshlex.NeedMore:
+ raise sherrors.ShellSyntaxError("Unexpected EOF")
+
+ self.process_tokens(tokens)
+
+ def process_tokens(self, tokens):
+ """Process a supplied portion of the syntax tree as returned by
+ pyshyacc.parse.
+ """
+
+ def function_definition(value):
+ self.funcdefs.add(value.name)
+ return [value.body], None
+
+ def case_clause(value):
+ # Element 0 of each item in the case is the list of patterns, and
+ # Element 1 of each item in the case is the list of commands to be
+ # executed when that pattern matches.
+ words = chain(*[item[0] for item in value.items])
+ cmds = chain(*[item[1] for item in value.items])
+ return cmds, words
+
+ def if_clause(value):
+ main = chain(value.cond, value.if_cmds)
+ rest = value.else_cmds
+ if isinstance(rest, tuple) and rest[0] == "elif":
+ return chain(main, if_clause(rest[1]))
+ else:
+ return chain(main, rest)
+
+ def simple_command(value):
+ return None, chain(value.words, (assign[1] for assign in value.assigns))
+
+ token_handlers = {
+ "and_or": lambda x: ((x.left, x.right), None),
+ "async": lambda x: ([x], None),
+ "brace_group": lambda x: (x.cmds, None),
+ "for_clause": lambda x: (x.cmds, x.items),
+ "function_definition": function_definition,
+ "if_clause": lambda x: (if_clause(x), None),
+ "pipeline": lambda x: (x.commands, None),
+ "redirect_list": lambda x: ([x.cmd], None),
+ "subshell": lambda x: (x.cmds, None),
+ "while_clause": lambda x: (chain(x.condition, x.cmds), None),
+ "until_clause": lambda x: (chain(x.condition, x.cmds), None),
+ "simple_command": simple_command,
+ "case_clause": case_clause,
+ }
+
+ def process_token_list(tokens):
+ for token in tokens:
+ if isinstance(token, list):
+ process_token_list(token)
+ continue
+ name, value = token
+ try:
+ more_tokens, words = token_handlers[name](value)
+ except KeyError:
+ raise NotImplementedError("Unsupported token type " + name)
+
+ if more_tokens:
+ self.process_tokens(more_tokens)
+
+ if words:
+ self.process_words(words)
+
+ process_token_list(tokens)
+
+ def process_words(self, words):
+ """Process a set of 'words' in pyshyacc parlance, which includes
+ extraction of executed commands from $() blocks, as well as grabbing
+ the command name argument.
+ """
+
+ words = list(words)
+ for word in list(words):
+ wtree = pyshlex.make_wordtree(word[1])
+ for part in wtree:
+ if not isinstance(part, list):
+ continue
+
+ if part[0] in ('`', '$('):
+ command = pyshlex.wordtree_as_string(part[1:-1])
+ self._parse_shell(command)
+
+ if word[0] in ("cmd_name", "cmd_word"):
+ if word in words:
+ words.remove(word)
+
+ usetoken = False
+ for word in words:
+ if word[0] in ("cmd_name", "cmd_word") or \
+ (usetoken and word[0] == "TOKEN"):
+ if "=" in word[1]:
+ usetoken = True
+ continue
+
+ cmd = word[1]
+ if cmd.startswith("$"):
+ self.log.debug(1, self.unhandled_template % cmd)
+ elif cmd == "eval":
+ command = " ".join(word for _, word in words[1:])
+ self._parse_shell(command)
+ else:
+ self.allexecs.add(cmd)
+ break
diff --git a/poky/bitbake/lib/bb/command.py b/poky/bitbake/lib/bb/command.py
new file mode 100644
index 000000000..6c966e3db
--- /dev/null
+++ b/poky/bitbake/lib/bb/command.py
@@ -0,0 +1,765 @@
+"""
+BitBake 'Command' module
+
+Provide an interface to interact with the bitbake server through 'commands'
+"""
+
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+The bitbake server takes 'commands' from its UI/commandline.
+Commands are either synchronous or asynchronous.
+Async commands return data to the client in the form of events.
+Sync commands must only return data through the function return value
+and must not trigger events, directly or indirectly.
+Commands are queued in a CommandQueue
+"""
+
+from collections import OrderedDict, defaultdict
+
+import bb.event
+import bb.cooker
+import bb.remotedata
+
+class DataStoreConnectionHandle(object):
+ def __init__(self, dsindex=0):
+ self.dsindex = dsindex
+
+class CommandCompleted(bb.event.Event):
+ pass
+
+class CommandExit(bb.event.Event):
+ def __init__(self, exitcode):
+ bb.event.Event.__init__(self)
+ self.exitcode = int(exitcode)
+
+class CommandFailed(CommandExit):
+ def __init__(self, message):
+ self.error = message
+ CommandExit.__init__(self, 1)
+ def __str__(self):
+ return "Command execution failed: %s" % self.error
+
+class CommandError(Exception):
+ pass
+
+class Command:
+ """
+ A queue of asynchronous commands for bitbake
+ """
+ def __init__(self, cooker):
+ self.cooker = cooker
+ self.cmds_sync = CommandsSync()
+ self.cmds_async = CommandsAsync()
+ self.remotedatastores = bb.remotedata.RemoteDatastores(cooker)
+
+ # FIXME Add lock for this
+ self.currentAsyncCommand = None
+
+ def runCommand(self, commandline, ro_only = False):
+ command = commandline.pop(0)
+ if hasattr(CommandsSync, command):
+ # Can run synchronous commands straight away
+ command_method = getattr(self.cmds_sync, command)
+ if ro_only:
+ if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'):
+ return None, "Not able to execute not readonly commands in readonly mode"
+ try:
+ self.cooker.process_inotify_updates()
+ if getattr(command_method, 'needconfig', True):
+ self.cooker.updateCacheSync()
+ result = command_method(self, commandline)
+ except CommandError as exc:
+ return None, exc.args[0]
+ except (Exception, SystemExit):
+ import traceback
+ return None, traceback.format_exc()
+ else:
+ return result, None
+ if self.currentAsyncCommand is not None:
+ return None, "Busy (%s in progress)" % self.currentAsyncCommand[0]
+ if command not in CommandsAsync.__dict__:
+ return None, "No such command"
+ self.currentAsyncCommand = (command, commandline)
+ self.cooker.configuration.server_register_idlecallback(self.cooker.runCommands, self.cooker)
+ return True, None
+
+ def runAsyncCommand(self):
+ try:
+ self.cooker.process_inotify_updates()
+ if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
+ # updateCache will trigger a shutdown of the parser
+ # and then raise BBHandledException triggering an exit
+ self.cooker.updateCache()
+ return False
+ if self.currentAsyncCommand is not None:
+ (command, options) = self.currentAsyncCommand
+ commandmethod = getattr(CommandsAsync, command)
+ needcache = getattr( commandmethod, "needcache" )
+ if needcache and self.cooker.state != bb.cooker.state.running:
+ self.cooker.updateCache()
+ return True
+ else:
+ commandmethod(self.cmds_async, self, options)
+ return False
+ else:
+ return False
+ except KeyboardInterrupt as exc:
+ self.finishAsyncCommand("Interrupted")
+ return False
+ except SystemExit as exc:
+ arg = exc.args[0]
+ if isinstance(arg, str):
+ self.finishAsyncCommand(arg)
+ else:
+ self.finishAsyncCommand("Exited with %s" % arg)
+ return False
+ except Exception as exc:
+ import traceback
+ if isinstance(exc, bb.BBHandledException):
+ self.finishAsyncCommand("")
+ else:
+ self.finishAsyncCommand(traceback.format_exc())
+ return False
+
+ def finishAsyncCommand(self, msg=None, code=None):
+ if msg or msg == "":
+ bb.event.fire(CommandFailed(msg), self.cooker.data)
+ elif code:
+ bb.event.fire(CommandExit(code), self.cooker.data)
+ else:
+ bb.event.fire(CommandCompleted(), self.cooker.data)
+ self.currentAsyncCommand = None
+ self.cooker.finishcommand()
+
+ def reset(self):
+ self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
+
+def split_mc_pn(pn):
+ if pn.startswith("multiconfig:"):
+ _, mc, pn = pn.split(":", 2)
+ return (mc, pn)
+ return ('', pn)
+
+class CommandsSync:
+ """
+ A class of synchronous commands
+ These should run quickly so as not to hurt interactive performance.
+ These must not influence any running synchronous command.
+ """
+
+ def stateShutdown(self, command, params):
+ """
+ Trigger cooker 'shutdown' mode
+ """
+ command.cooker.shutdown(False)
+
+ def stateForceShutdown(self, command, params):
+ """
+ Stop the cooker
+ """
+ command.cooker.shutdown(True)
+
+ def getAllKeysWithFlags(self, command, params):
+ """
+ Returns a dump of the global state. Call with
+ variable flags to be retrieved as params.
+ """
+ flaglist = params[0]
+ return command.cooker.getAllKeysWithFlags(flaglist)
+ getAllKeysWithFlags.readonly = True
+
+ def getVariable(self, command, params):
+ """
+ Read the value of a variable from data
+ """
+ varname = params[0]
+ expand = True
+ if len(params) > 1:
+ expand = (params[1] == "True")
+
+ return command.cooker.data.getVar(varname, expand)
+ getVariable.readonly = True
+
+ def setVariable(self, command, params):
+ """
+ Set the value of variable in data
+ """
+ varname = params[0]
+ value = str(params[1])
+ command.cooker.extraconfigdata[varname] = value
+ command.cooker.data.setVar(varname, value)
+
+ def getSetVariable(self, command, params):
+ """
+ Read the value of a variable from data and set it into the datastore
+ which effectively expands and locks the value.
+ """
+ varname = params[0]
+ result = self.getVariable(command, params)
+ command.cooker.data.setVar(varname, result)
+ return result
+
+ def setConfig(self, command, params):
+ """
+ Set the value of variable in configuration
+ """
+ varname = params[0]
+ value = str(params[1])
+ setattr(command.cooker.configuration, varname, value)
+
+ def enableDataTracking(self, command, params):
+ """
+ Enable history tracking for variables
+ """
+ command.cooker.enableDataTracking()
+
+ def disableDataTracking(self, command, params):
+ """
+ Disable history tracking for variables
+ """
+ command.cooker.disableDataTracking()
+
+ def setPrePostConfFiles(self, command, params):
+ prefiles = params[0].split()
+ postfiles = params[1].split()
+ command.cooker.configuration.prefile = prefiles
+ command.cooker.configuration.postfile = postfiles
+ setPrePostConfFiles.needconfig = False
+
+ def matchFile(self, command, params):
+ fMatch = params[0]
+ return command.cooker.matchFile(fMatch)
+ matchFile.needconfig = False
+
+ def getUIHandlerNum(self, command, params):
+ return bb.event.get_uihandler()
+ getUIHandlerNum.needconfig = False
+ getUIHandlerNum.readonly = True
+
+ def setEventMask(self, command, params):
+ handlerNum = params[0]
+ llevel = params[1]
+ debug_domains = params[2]
+ mask = params[3]
+ return bb.event.set_UIHmask(handlerNum, llevel, debug_domains, mask)
+ setEventMask.needconfig = False
+ setEventMask.readonly = True
+
+ def setFeatures(self, command, params):
+ """
+ Set the cooker features to include the passed list of features
+ """
+ features = params[0]
+ command.cooker.setFeatures(features)
+ setFeatures.needconfig = False
+ # although we change the internal state of the cooker, this is transparent since
+ # we always take and leave the cooker in state.initial
+ setFeatures.readonly = True
+
+ def updateConfig(self, command, params):
+ options = params[0]
+ environment = params[1]
+ cmdline = params[2]
+ command.cooker.updateConfigOpts(options, environment, cmdline)
+ updateConfig.needconfig = False
+
+ def parseConfiguration(self, command, params):
+ """Instruct bitbake to parse its configuration
+ NOTE: it is only necessary to call this if you aren't calling any normal action
+ (otherwise parsing is taken care of automatically)
+ """
+ command.cooker.parseConfiguration()
+ parseConfiguration.needconfig = False
+
+ def getLayerPriorities(self, command, params):
+ command.cooker.parseConfiguration()
+ ret = []
+ # regex objects cannot be marshalled by xmlrpc
+ for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities:
+ ret.append((collection, pattern, regex.pattern, pri))
+ return ret
+ getLayerPriorities.readonly = True
+
+ def getRecipes(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return list(command.cooker.recipecaches[mc].pkg_pn.items())
+ getRecipes.readonly = True
+
+ def getRecipeDepends(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return list(command.cooker.recipecaches[mc].deps.items())
+ getRecipeDepends.readonly = True
+
+ def getRecipeVersions(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].pkg_pepvpr
+ getRecipeVersions.readonly = True
+
+ def getRecipeProvides(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].fn_provides
+ getRecipeProvides.readonly = True
+
+ def getRecipePackages(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].packages
+ getRecipePackages.readonly = True
+
+ def getRecipePackagesDynamic(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].packages_dynamic
+ getRecipePackagesDynamic.readonly = True
+
+ def getRProviders(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].rproviders
+ getRProviders.readonly = True
+
+ def getRuntimeDepends(self, command, params):
+ ret = []
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ rundeps = command.cooker.recipecaches[mc].rundeps
+ for key, value in rundeps.items():
+ if isinstance(value, defaultdict):
+ value = dict(value)
+ ret.append((key, value))
+ return ret
+ getRuntimeDepends.readonly = True
+
+ def getRuntimeRecommends(self, command, params):
+ ret = []
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ runrecs = command.cooker.recipecaches[mc].runrecs
+ for key, value in runrecs.items():
+ if isinstance(value, defaultdict):
+ value = dict(value)
+ ret.append((key, value))
+ return ret
+ getRuntimeRecommends.readonly = True
+
+ def getRecipeInherits(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].inherits
+ getRecipeInherits.readonly = True
+
+ def getBbFilePriority(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].bbfile_priority
+ getBbFilePriority.readonly = True
+
+ def getDefaultPreference(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return command.cooker.recipecaches[mc].pkg_dp
+ getDefaultPreference.readonly = True
+
+ def getSkippedRecipes(self, command, params):
+ # Return list sorted by reverse priority order
+ import bb.cache
+ skipdict = OrderedDict(sorted(command.cooker.skiplist.items(),
+ key=lambda x: (-command.cooker.collection.calc_bbfile_priority(bb.cache.virtualfn2realfn(x[0])[0]), x[0])))
+ return list(skipdict.items())
+ getSkippedRecipes.readonly = True
+
+ def getOverlayedRecipes(self, command, params):
+ return list(command.cooker.collection.overlayed.items())
+ getOverlayedRecipes.readonly = True
+
+ def getFileAppends(self, command, params):
+ fn = params[0]
+ return command.cooker.collection.get_file_appends(fn)
+ getFileAppends.readonly = True
+
+ def getAllAppends(self, command, params):
+ return command.cooker.collection.bbappends
+ getAllAppends.readonly = True
+
+ def findProviders(self, command, params):
+ return command.cooker.findProviders()
+ findProviders.readonly = True
+
+ def findBestProvider(self, command, params):
+ (mc, pn) = split_mc_pn(params[0])
+ return command.cooker.findBestProvider(pn, mc)
+ findBestProvider.readonly = True
+
+ def allProviders(self, command, params):
+ try:
+ mc = params[0]
+ except IndexError:
+ mc = ''
+ return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items())
+ allProviders.readonly = True
+
+ def getRuntimeProviders(self, command, params):
+ rprovide = params[0]
+ try:
+ mc = params[1]
+ except IndexError:
+ mc = ''
+ all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide)
+ if all_p:
+ best = bb.providers.filterProvidersRunTime(all_p, rprovide,
+ command.cooker.data,
+ command.cooker.recipecaches[mc])[0][0]
+ else:
+ best = None
+ return all_p, best
+ getRuntimeProviders.readonly = True
+
+ def dataStoreConnectorFindVar(self, command, params):
+ dsindex = params[0]
+ name = params[1]
+ datastore = command.remotedatastores[dsindex]
+ value, overridedata = datastore._findVar(name)
+
+ if value:
+ content = value.get('_content', None)
+ if isinstance(content, bb.data_smart.DataSmart):
+ # Value is a datastore (e.g. BB_ORIGENV) - need to handle this carefully
+ idx = command.remotedatastores.check_store(content, True)
+ return {'_content': DataStoreConnectionHandle(idx),
+ '_connector_origtype': 'DataStoreConnectionHandle',
+ '_connector_overrides': overridedata}
+ elif isinstance(content, set):
+ return {'_content': list(content),
+ '_connector_origtype': 'set',
+ '_connector_overrides': overridedata}
+ else:
+ value['_connector_overrides'] = overridedata
+ else:
+ value = {}
+ value['_connector_overrides'] = overridedata
+ return value
+ dataStoreConnectorFindVar.readonly = True
+
+ def dataStoreConnectorGetKeys(self, command, params):
+ dsindex = params[0]
+ datastore = command.remotedatastores[dsindex]
+ return list(datastore.keys())
+ dataStoreConnectorGetKeys.readonly = True
+
+ def dataStoreConnectorGetVarHistory(self, command, params):
+ dsindex = params[0]
+ name = params[1]
+ datastore = command.remotedatastores[dsindex]
+ return datastore.varhistory.variable(name)
+ dataStoreConnectorGetVarHistory.readonly = True
+
+ def dataStoreConnectorExpandPythonRef(self, command, params):
+ config_data_dict = params[0]
+ varname = params[1]
+ expr = params[2]
+
+ config_data = command.remotedatastores.receive_datastore(config_data_dict)
+
+ varparse = bb.data_smart.VariableParse(varname, config_data)
+ return varparse.python_sub(expr)
+
+ def dataStoreConnectorRelease(self, command, params):
+ dsindex = params[0]
+ if dsindex <= 0:
+ raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex)
+ command.remotedatastores.release(dsindex)
+
+ def dataStoreConnectorSetVarFlag(self, command, params):
+ dsindex = params[0]
+ name = params[1]
+ flag = params[2]
+ value = params[3]
+ datastore = command.remotedatastores[dsindex]
+ datastore.setVarFlag(name, flag, value)
+
+ def dataStoreConnectorDelVar(self, command, params):
+ dsindex = params[0]
+ name = params[1]
+ datastore = command.remotedatastores[dsindex]
+ if len(params) > 2:
+ flag = params[2]
+ datastore.delVarFlag(name, flag)
+ else:
+ datastore.delVar(name)
+
+ def dataStoreConnectorRenameVar(self, command, params):
+ dsindex = params[0]
+ name = params[1]
+ newname = params[2]
+ datastore = command.remotedatastores[dsindex]
+ datastore.renameVar(name, newname)
+
+ def parseRecipeFile(self, command, params):
+ """
+ Parse the specified recipe file (with or without bbappends)
+ and return a datastore object representing the environment
+ for the recipe.
+ """
+ fn = params[0]
+ appends = params[1]
+ appendlist = params[2]
+ if len(params) > 3:
+ config_data_dict = params[3]
+ config_data = command.remotedatastores.receive_datastore(config_data_dict)
+ else:
+ config_data = None
+
+ if appends:
+ if appendlist is not None:
+ appendfiles = appendlist
+ else:
+ appendfiles = command.cooker.collection.get_file_appends(fn)
+ else:
+ appendfiles = []
+ # We are calling bb.cache locally here rather than on the server,
+ # but that's OK because it doesn't actually need anything from
+ # the server barring the global datastore (which we have a remote
+ # version of)
+ if config_data:
+ # We have to use a different function here if we're passing in a datastore
+ # NOTE: we took a copy above, so we don't do it here again
+ envdata = bb.cache.parse_recipe(config_data, fn, appendfiles)['']
+ else:
+ # Use the standard path
+ parser = bb.cache.NoCache(command.cooker.databuilder)
+ envdata = parser.loadDataFull(fn, appendfiles)
+ idx = command.remotedatastores.store(envdata)
+ return DataStoreConnectionHandle(idx)
+ parseRecipeFile.readonly = True
+
+class CommandsAsync:
+ """
+ A class of asynchronous commands
+ These functions communicate via generated events.
+ Any function that requires metadata parsing should be here.
+ """
+
+ def buildFile(self, command, params):
+ """
+ Build a single specified .bb file
+ """
+ bfile = params[0]
+ task = params[1]
+ if len(params) > 2:
+ internal = params[2]
+ else:
+ internal = False
+
+ if internal:
+ command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True)
+ else:
+ command.cooker.buildFile(bfile, task)
+ buildFile.needcache = False
+
+ def buildTargets(self, command, params):
+ """
+ Build a set of targets
+ """
+ pkgs_to_build = params[0]
+ task = params[1]
+
+ command.cooker.buildTargets(pkgs_to_build, task)
+ buildTargets.needcache = True
+
+ def generateDepTreeEvent(self, command, params):
+ """
+ Generate an event containing the dependency information
+ """
+ pkgs_to_build = params[0]
+ task = params[1]
+
+ command.cooker.generateDepTreeEvent(pkgs_to_build, task)
+ command.finishAsyncCommand()
+ generateDepTreeEvent.needcache = True
+
+ def generateDotGraph(self, command, params):
+ """
+ Dump dependency information to disk as .dot files
+ """
+ pkgs_to_build = params[0]
+ task = params[1]
+
+ command.cooker.generateDotGraphFiles(pkgs_to_build, task)
+ command.finishAsyncCommand()
+ generateDotGraph.needcache = True
+
+ def generateTargetsTree(self, command, params):
+ """
+ Generate a tree of buildable targets.
+ If klass is provided ensure all recipes that inherit the class are
+ included in the package list.
+ If pkg_list provided use that list (plus any extras brought in by
+ klass) rather than generating a tree for all packages.
+ """
+ klass = params[0]
+ pkg_list = params[1]
+
+ command.cooker.generateTargetsTree(klass, pkg_list)
+ command.finishAsyncCommand()
+ generateTargetsTree.needcache = True
+
+ def findConfigFiles(self, command, params):
+ """
+ Find config files which provide appropriate values
+ for the passed configuration variable. i.e. MACHINE
+ """
+ varname = params[0]
+
+ command.cooker.findConfigFiles(varname)
+ command.finishAsyncCommand()
+ findConfigFiles.needcache = False
+
+ def findFilesMatchingInDir(self, command, params):
+ """
+ Find implementation files matching the specified pattern
+ in the requested subdirectory of a BBPATH
+ """
+ pattern = params[0]
+ directory = params[1]
+
+ command.cooker.findFilesMatchingInDir(pattern, directory)
+ command.finishAsyncCommand()
+ findFilesMatchingInDir.needcache = False
+
+ def findConfigFilePath(self, command, params):
+ """
+ Find the path of the requested configuration file
+ """
+ configfile = params[0]
+
+ command.cooker.findConfigFilePath(configfile)
+ command.finishAsyncCommand()
+ findConfigFilePath.needcache = False
+
+ def showVersions(self, command, params):
+ """
+ Show the currently selected versions
+ """
+ command.cooker.showVersions()
+ command.finishAsyncCommand()
+ showVersions.needcache = True
+
+ def showEnvironmentTarget(self, command, params):
+ """
+ Print the environment of a target recipe
+ (needs the cache to work out which recipe to use)
+ """
+ pkg = params[0]
+
+ command.cooker.showEnvironment(None, pkg)
+ command.finishAsyncCommand()
+ showEnvironmentTarget.needcache = True
+
+ def showEnvironment(self, command, params):
+ """
+ Print the standard environment
+ or if specified the environment for a specified recipe
+ """
+ bfile = params[0]
+
+ command.cooker.showEnvironment(bfile)
+ command.finishAsyncCommand()
+ showEnvironment.needcache = False
+
+ def parseFiles(self, command, params):
+ """
+ Parse the .bb files
+ """
+ command.cooker.updateCache()
+ command.finishAsyncCommand()
+ parseFiles.needcache = True
+
+ def compareRevisions(self, command, params):
+ """
+ Parse the .bb files
+ """
+ if bb.fetch.fetcher_compare_revisions(command.cooker.data):
+ command.finishAsyncCommand(code=1)
+ else:
+ command.finishAsyncCommand()
+ compareRevisions.needcache = True
+
+ def triggerEvent(self, command, params):
+ """
+ Trigger a certain event
+ """
+ event = params[0]
+ bb.event.fire(eval(event), command.cooker.data)
+ command.currentAsyncCommand = None
+ triggerEvent.needcache = False
+
+ def resetCooker(self, command, params):
+ """
+ Reset the cooker to its initial state, thus forcing a reparse for
+ any async command that has the needcache property set to True
+ """
+ command.cooker.reset()
+ command.finishAsyncCommand()
+ resetCooker.needcache = False
+
+ def clientComplete(self, command, params):
+ """
+ Do the right thing when the controlling client exits
+ """
+ command.cooker.clientComplete()
+ command.finishAsyncCommand()
+ clientComplete.needcache = False
+
+ def findSigInfo(self, command, params):
+ """
+ Find signature info files via the signature generator
+ """
+ pn = params[0]
+ taskname = params[1]
+ sigs = params[2]
+ res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data)
+ bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data)
+ command.finishAsyncCommand()
+ findSigInfo.needcache = False
diff --git a/poky/bitbake/lib/bb/compat.py b/poky/bitbake/lib/bb/compat.py
new file mode 100644
index 000000000..de1923d28
--- /dev/null
+++ b/poky/bitbake/lib/bb/compat.py
@@ -0,0 +1,6 @@
+"""Code pulled from future python versions, here for compatibility"""
+
+from collections import MutableMapping, KeysView, ValuesView, ItemsView, OrderedDict
+from functools import total_ordering
+
+
diff --git a/poky/bitbake/lib/bb/cooker.py b/poky/bitbake/lib/bb/cooker.py
new file mode 100644
index 000000000..1fda40dd4
--- /dev/null
+++ b/poky/bitbake/lib/bb/cooker.py
@@ -0,0 +1,2161 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+# Copyright (C) 2006 - 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import sys, os, glob, os.path, re, time
+import atexit
+import itertools
+import logging
+import multiprocessing
+import sre_constants
+import threading
+from io import StringIO, UnsupportedOperation
+from contextlib import closing
+from functools import wraps
+from collections import defaultdict, namedtuple
+import bb, bb.exceptions, bb.command
+from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build
+import queue
+import signal
+import subprocess
+import errno
+import prserv.serv
+import pyinotify
+import json
+import pickle
+import codecs
+
+logger = logging.getLogger("BitBake")
+collectlog = logging.getLogger("BitBake.Collection")
+buildlog = logging.getLogger("BitBake.Build")
+parselog = logging.getLogger("BitBake.Parsing")
+providerlog = logging.getLogger("BitBake.Provider")
+
+class NoSpecificMatch(bb.BBHandledException):
+ """
+ Exception raised when no or multiple file matches are found
+ """
+
+class NothingToBuild(Exception):
+ """
+ Exception raised when there is nothing to build
+ """
+
+class CollectionError(bb.BBHandledException):
+ """
+ Exception raised when layer configuration is incorrect
+ """
+
+class state:
+ initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7))
+
+ @classmethod
+ def get_name(cls, code):
+ for name in dir(cls):
+ value = getattr(cls, name)
+ if type(value) == type(cls.initial) and value == code:
+ return name
+ raise ValueError("Invalid status code: %s" % code)
+
+
+class SkippedPackage:
+ def __init__(self, info = None, reason = None):
+ self.pn = None
+ self.skipreason = None
+ self.provides = None
+ self.rprovides = None
+
+ if info:
+ self.pn = info.pn
+ self.skipreason = info.skipreason
+ self.provides = info.provides
+ self.rprovides = info.rprovides
+ elif reason:
+ self.skipreason = reason
+
+
+class CookerFeatures(object):
+ _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS] = list(range(3))
+
+ def __init__(self):
+ self._features=set()
+
+ def setFeature(self, f):
+ # validate we got a request for a feature we support
+ if f not in CookerFeatures._feature_list:
+ return
+ self._features.add(f)
+
+ def __contains__(self, f):
+ return f in self._features
+
+ def __iter__(self):
+ return self._features.__iter__()
+
+ def __next__(self):
+ return next(self._features)
+
+
+class EventWriter:
+ def __init__(self, cooker, eventfile):
+ self.file_inited = None
+ self.cooker = cooker
+ self.eventfile = eventfile
+ self.event_queue = []
+
+ def write_event(self, event):
+ with open(self.eventfile, "a") as f:
+ try:
+ str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8')
+ f.write("%s\n" % json.dumps({"class": event.__module__ + "." + event.__class__.__name__,
+ "vars": str_event}))
+ except Exception as err:
+ import traceback
+ print(err, traceback.format_exc())
+
+ def send(self, event):
+ if self.file_inited:
+ # we have the file, just write the event
+ self.write_event(event)
+ else:
+ # init on bb.event.BuildStarted
+ name = "%s.%s" % (event.__module__, event.__class__.__name__)
+ if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"):
+ with open(self.eventfile, "w") as f:
+ f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])}))
+
+ self.file_inited = True
+
+ # write pending events
+ for evt in self.event_queue:
+ self.write_event(evt)
+
+ # also write the current event
+ self.write_event(event)
+ else:
+ # queue all events until the file is inited
+ self.event_queue.append(event)
+
+#============================================================================#
+# BBCooker
+#============================================================================#
+class BBCooker:
+ """
+ Manages one bitbake build run
+ """
+
+ def __init__(self, configuration, featureSet=None):
+ self.recipecaches = None
+ self.skiplist = {}
+ self.featureset = CookerFeatures()
+ if featureSet:
+ for f in featureSet:
+ self.featureset.setFeature(f)
+
+ self.configuration = configuration
+
+ self.configwatcher = pyinotify.WatchManager()
+ self.configwatcher.bbseen = []
+ self.configwatcher.bbwatchedfiles = []
+ self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
+ self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
+ pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
+ pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
+ self.watcher = pyinotify.WatchManager()
+ self.watcher.bbseen = []
+ self.watcher.bbwatchedfiles = []
+ self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
+
+ # If being called by something like tinfoil, we need to clean cached data
+ # which may now be invalid
+ bb.parse.clear_cache()
+ bb.parse.BBHandler.cached_statements = {}
+
+ self.ui_cmdline = None
+
+ self.initConfigurationData()
+
+ # we log all events to a file if so directed
+ if self.configuration.writeeventlog:
+ # register the log file writer as UI Handler
+ writer = EventWriter(self, self.configuration.writeeventlog)
+ EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event'])
+ bb.event.register_UIHhandler(EventLogWriteHandler(writer))
+
+ self.inotify_modified_files = []
+
+ def _process_inotify_updates(server, cooker, abort):
+ cooker.process_inotify_updates()
+ return 1.0
+
+ self.configuration.server_register_idlecallback(_process_inotify_updates, self)
+
+ # TOSTOP must not be set or our children will hang when they output
+ try:
+ fd = sys.stdout.fileno()
+ if os.isatty(fd):
+ import termios
+ tcattr = termios.tcgetattr(fd)
+ if tcattr[3] & termios.TOSTOP:
+ buildlog.info("The terminal had the TOSTOP bit set, clearing...")
+ tcattr[3] = tcattr[3] & ~termios.TOSTOP
+ termios.tcsetattr(fd, termios.TCSANOW, tcattr)
+ except UnsupportedOperation:
+ pass
+
+ self.command = bb.command.Command(self)
+ self.state = state.initial
+
+ self.parser = None
+
+ signal.signal(signal.SIGTERM, self.sigterm_exception)
+ # Let SIGHUP exit as SIGTERM
+ signal.signal(signal.SIGHUP, self.sigterm_exception)
+
+ def process_inotify_updates(self):
+ for n in [self.confignotifier, self.notifier]:
+ if n.check_events(timeout=0):
+ # read notified events and enqeue them
+ n.read_events()
+ n.process_events()
+
+ def config_notifications(self, event):
+ if event.maskname == "IN_Q_OVERFLOW":
+ bb.warn("inotify event queue overflowed, invalidating caches.")
+ self.parsecache_valid = False
+ self.baseconfig_valid = False
+ bb.parse.clear_cache()
+ return
+ if not event.pathname in self.configwatcher.bbwatchedfiles:
+ return
+ if not event.pathname in self.inotify_modified_files:
+ self.inotify_modified_files.append(event.pathname)
+ self.baseconfig_valid = False
+
+ def notifications(self, event):
+ if event.maskname == "IN_Q_OVERFLOW":
+ bb.warn("inotify event queue overflowed, invalidating caches.")
+ self.parsecache_valid = False
+ bb.parse.clear_cache()
+ return
+ if event.pathname.endswith("bitbake-cookerdaemon.log") \
+ or event.pathname.endswith("bitbake.lock"):
+ return
+ if not event.pathname in self.inotify_modified_files:
+ self.inotify_modified_files.append(event.pathname)
+ self.parsecache_valid = False
+
+ def add_filewatch(self, deps, watcher=None, dirs=False):
+ if not watcher:
+ watcher = self.watcher
+ for i in deps:
+ watcher.bbwatchedfiles.append(i[0])
+ if dirs:
+ f = i[0]
+ else:
+ f = os.path.dirname(i[0])
+ if f in watcher.bbseen:
+ continue
+ watcher.bbseen.append(f)
+ watchtarget = None
+ while True:
+ # We try and add watches for files that don't exist but if they did, would influence
+ # the parser. The parent directory of these files may not exist, in which case we need
+ # to watch any parent that does exist for changes.
+ try:
+ watcher.add_watch(f, self.watchmask, quiet=False)
+ if watchtarget:
+ watcher.bbwatchedfiles.append(watchtarget)
+ break
+ except pyinotify.WatchManagerError as e:
+ if 'ENOENT' in str(e):
+ watchtarget = f
+ f = os.path.dirname(f)
+ if f in watcher.bbseen:
+ break
+ watcher.bbseen.append(f)
+ continue
+ if 'ENOSPC' in str(e):
+ providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?")
+ providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.")
+ providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.")
+ providerlog.error("Root privilege is required to modify max_user_watches.")
+ raise
+
+ def sigterm_exception(self, signum, stackframe):
+ if signum == signal.SIGTERM:
+ bb.warn("Cooker received SIGTERM, shutting down...")
+ elif signum == signal.SIGHUP:
+ bb.warn("Cooker received SIGHUP, shutting down...")
+ self.state = state.forceshutdown
+
+ def setFeatures(self, features):
+ # we only accept a new feature set if we're in state initial, so we can reset without problems
+ if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]:
+ raise Exception("Illegal state for feature set change")
+ original_featureset = list(self.featureset)
+ for feature in features:
+ self.featureset.setFeature(feature)
+ bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset)))
+ if (original_featureset != list(self.featureset)) and self.state != state.error:
+ self.reset()
+
+ def initConfigurationData(self):
+
+ self.state = state.initial
+ self.caches_array = []
+
+ # Need to preserve BB_CONSOLELOG over resets
+ consolelog = None
+ if hasattr(self, "data"):
+ consolelog = self.data.getVar("BB_CONSOLELOG")
+
+ if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
+ self.enableDataTracking()
+
+ all_extra_cache_names = []
+ # We hardcode all known cache types in a single place, here.
+ if CookerFeatures.HOB_EXTRA_CACHES in self.featureset:
+ all_extra_cache_names.append("bb.cache_extra:HobRecipeInfo")
+
+ caches_name_array = ['bb.cache:CoreRecipeInfo'] + all_extra_cache_names
+
+ # At least CoreRecipeInfo will be loaded, so caches_array will never be empty!
+ # This is the entry point, no further check needed!
+ for var in caches_name_array:
+ try:
+ module_name, cache_name = var.split(':')
+ module = __import__(module_name, fromlist=(cache_name,))
+ self.caches_array.append(getattr(module, cache_name))
+ except ImportError as exc:
+ logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc))
+ sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name)
+
+ self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False)
+ self.databuilder.parseBaseConfiguration()
+ self.data = self.databuilder.data
+ self.data_hash = self.databuilder.data_hash
+ self.extraconfigdata = {}
+
+ if consolelog:
+ self.data.setVar("BB_CONSOLELOG", consolelog)
+
+ self.data.setVar('BB_CMDLINE', self.ui_cmdline)
+
+ #
+ # Copy of the data store which has been expanded.
+ # Used for firing events and accessing variables where expansion needs to be accounted for
+ #
+ bb.parse.init_parser(self.data)
+
+ if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
+ self.disableDataTracking()
+
+ self.data.renameVar("__depends", "__base_depends")
+ self.add_filewatch(self.data.getVar("__base_depends", False), self.configwatcher)
+
+ self.baseconfig_valid = True
+ self.parsecache_valid = False
+
+ def handlePRServ(self):
+ # Setup a PR Server based on the new configuration
+ try:
+ self.prhost = prserv.serv.auto_start(self.data)
+ except prserv.serv.PRServiceConfigError as e:
+ bb.fatal("Unable to start PR Server, exitting")
+
+ def enableDataTracking(self):
+ self.configuration.tracking = True
+ if hasattr(self, "data"):
+ self.data.enableTracking()
+
+ def disableDataTracking(self):
+ self.configuration.tracking = False
+ if hasattr(self, "data"):
+ self.data.disableTracking()
+
+ def parseConfiguration(self):
+ # Set log file verbosity
+ verboselogs = bb.utils.to_boolean(self.data.getVar("BB_VERBOSE_LOGS", False))
+ if verboselogs:
+ bb.msg.loggerVerboseLogs = True
+
+ # Change nice level if we're asked to
+ nice = self.data.getVar("BB_NICE_LEVEL")
+ if nice:
+ curnice = os.nice(0)
+ nice = int(nice) - curnice
+ buildlog.verbose("Renice to %s " % os.nice(nice))
+
+ if self.recipecaches:
+ del self.recipecaches
+ self.multiconfigs = self.databuilder.mcdata.keys()
+ self.recipecaches = {}
+ for mc in self.multiconfigs:
+ self.recipecaches[mc] = bb.cache.CacheData(self.caches_array)
+
+ self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS"))
+
+ self.parsecache_valid = False
+
+ def updateConfigOpts(self, options, environment, cmdline):
+ self.ui_cmdline = cmdline
+ clean = True
+ for o in options:
+ if o in ['prefile', 'postfile']:
+ # Only these options may require a reparse
+ try:
+ if getattr(self.configuration, o) == options[o]:
+ # Value is the same, no need to mark dirty
+ continue
+ except AttributeError:
+ pass
+ logger.debug(1, "Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
+ print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
+ clean = False
+ setattr(self.configuration, o, options[o])
+ for k in bb.utils.approved_variables():
+ if k in environment and k not in self.configuration.env:
+ logger.debug(1, "Updating new environment variable %s to %s" % (k, environment[k]))
+ self.configuration.env[k] = environment[k]
+ clean = False
+ if k in self.configuration.env and k not in environment:
+ logger.debug(1, "Updating environment variable %s (deleted)" % (k))
+ del self.configuration.env[k]
+ clean = False
+ if k not in self.configuration.env and k not in environment:
+ continue
+ if environment[k] != self.configuration.env[k]:
+ logger.debug(1, "Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k]))
+ self.configuration.env[k] = environment[k]
+ clean = False
+ if not clean:
+ logger.debug(1, "Base environment change, triggering reparse")
+ self.reset()
+
+ def runCommands(self, server, data, abort):
+ """
+ Run any queued asynchronous command
+ This is done by the idle handler so it runs in true context rather than
+ tied to any UI.
+ """
+
+ return self.command.runAsyncCommand()
+
+ def showVersions(self):
+
+ (latest_versions, preferred_versions) = self.findProviders()
+
+ logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version")
+ logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================")
+
+ for p in sorted(self.recipecaches[''].pkg_pn):
+ pref = preferred_versions[p]
+ latest = latest_versions[p]
+
+ prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
+ lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2]
+
+ if pref == latest:
+ prefstr = ""
+
+ logger.plain("%-35s %25s %25s", p, lateststr, prefstr)
+
+ def showEnvironment(self, buildfile=None, pkgs_to_build=None):
+ """
+ Show the outer or per-recipe environment
+ """
+ fn = None
+ envdata = None
+ if not pkgs_to_build:
+ pkgs_to_build = []
+
+ orig_tracking = self.configuration.tracking
+ if not orig_tracking:
+ self.enableDataTracking()
+ self.reset()
+
+
+ if buildfile:
+ # Parse the configuration here. We need to do it explicitly here since
+ # this showEnvironment() code path doesn't use the cache
+ self.parseConfiguration()
+
+ fn, cls, mc = bb.cache.virtualfn2realfn(buildfile)
+ fn = self.matchFile(fn)
+ fn = bb.cache.realfn2virtual(fn, cls, mc)
+ elif len(pkgs_to_build) == 1:
+ ignore = self.data.getVar("ASSUME_PROVIDED") or ""
+ if pkgs_to_build[0] in set(ignore.split()):
+ bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0])
+
+ taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True)
+
+ mc = runlist[0][0]
+ fn = runlist[0][3]
+ else:
+ envdata = self.data
+ data.expandKeys(envdata)
+ parse.ast.runAnonFuncs(envdata)
+
+ if fn:
+ try:
+ bb_cache = bb.cache.Cache(self.databuilder, self.data_hash, self.caches_array)
+ envdata = bb_cache.loadDataFull(fn, self.collection.get_file_appends(fn))
+ except Exception as e:
+ parselog.exception("Unable to read %s", fn)
+ raise
+
+ # Display history
+ with closing(StringIO()) as env:
+ self.data.inchistory.emit(env)
+ logger.plain(env.getvalue())
+
+ # emit variables and shell functions
+ with closing(StringIO()) as env:
+ data.emit_env(env, envdata, True)
+ logger.plain(env.getvalue())
+
+ # emit the metadata which isnt valid shell
+ for e in sorted(envdata.keys()):
+ if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False):
+ logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False))
+
+ if not orig_tracking:
+ self.disableDataTracking()
+ self.reset()
+
+ def buildTaskData(self, pkgs_to_build, task, abort, allowincomplete=False):
+ """
+ Prepare a runqueue and taskdata object for iteration over pkgs_to_build
+ """
+ bb.event.fire(bb.event.TreeDataPreparationStarted(), self.data)
+
+ # A task of None means use the default task
+ if task is None:
+ task = self.configuration.cmd
+ if not task.startswith("do_"):
+ task = "do_%s" % task
+
+ targetlist = self.checkPackages(pkgs_to_build, task)
+ fulltargetlist = []
+ defaulttask_implicit = ''
+ defaulttask_explicit = False
+ wildcard = False
+
+ # Wild card expansion:
+ # Replace string such as "multiconfig:*:bash"
+ # into "multiconfig:A:bash multiconfig:B:bash bash"
+ for k in targetlist:
+ if k.startswith("multiconfig:"):
+ if wildcard:
+ bb.fatal('multiconfig conflict')
+ if k.split(":")[1] == "*":
+ wildcard = True
+ for mc in self.multiconfigs:
+ if mc:
+ fulltargetlist.append(k.replace('*', mc))
+ # implicit default task
+ else:
+ defaulttask_implicit = k.split(":")[2]
+ else:
+ fulltargetlist.append(k)
+ else:
+ defaulttask_explicit = True
+ fulltargetlist.append(k)
+
+ if not defaulttask_explicit and defaulttask_implicit != '':
+ fulltargetlist.append(defaulttask_implicit)
+
+ bb.debug(1,"Target list: %s" % (str(fulltargetlist)))
+ taskdata = {}
+ localdata = {}
+
+ for mc in self.multiconfigs:
+ taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete)
+ localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
+ bb.data.expandKeys(localdata[mc])
+
+ current = 0
+ runlist = []
+ for k in fulltargetlist:
+ mc = ""
+ if k.startswith("multiconfig:"):
+ mc = k.split(":")[1]
+ k = ":".join(k.split(":")[2:])
+ ktask = task
+ if ":do_" in k:
+ k2 = k.split(":do_")
+ k = k2[0]
+ ktask = k2[1]
+ taskdata[mc].add_provider(localdata[mc], self.recipecaches[mc], k)
+ current += 1
+ if not ktask.startswith("do_"):
+ ktask = "do_%s" % ktask
+ if k not in taskdata[mc].build_targets or not taskdata[mc].build_targets[k]:
+ # e.g. in ASSUME_PROVIDED
+ continue
+ fn = taskdata[mc].build_targets[k][0]
+ runlist.append([mc, k, ktask, fn])
+ bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data)
+
+ for mc in self.multiconfigs:
+ taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
+
+ bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data)
+ return taskdata, runlist
+
+ def prepareTreeData(self, pkgs_to_build, task):
+ """
+ Prepare a runqueue and taskdata object for iteration over pkgs_to_build
+ """
+
+ # We set abort to False here to prevent unbuildable targets raising
+ # an exception when we're just generating data
+ taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
+
+ return runlist, taskdata
+
+ ######## WARNING : this function requires cache_extra to be enabled ########
+
+ def generateTaskDepTreeData(self, pkgs_to_build, task):
+ """
+ Create a dependency graph of pkgs_to_build including reverse dependency
+ information.
+ """
+ if not task.startswith("do_"):
+ task = "do_%s" % task
+
+ runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
+ rq.rqdata.prepare()
+ return self.buildDependTree(rq, taskdata)
+
+ @staticmethod
+ def add_mc_prefix(mc, pn):
+ if mc:
+ return "multiconfig:%s:%s" % (mc, pn)
+ return pn
+
+ def buildDependTree(self, rq, taskdata):
+ seen_fns = []
+ depend_tree = {}
+ depend_tree["depends"] = {}
+ depend_tree["tdepends"] = {}
+ depend_tree["pn"] = {}
+ depend_tree["rdepends-pn"] = {}
+ depend_tree["packages"] = {}
+ depend_tree["rdepends-pkg"] = {}
+ depend_tree["rrecs-pkg"] = {}
+ depend_tree['providermap'] = {}
+ depend_tree["layer-priorities"] = self.bbfile_config_priorities
+
+ for mc in taskdata:
+ for name, fn in list(taskdata[mc].get_providermap().items()):
+ pn = self.recipecaches[mc].pkg_fn[fn]
+ pn = self.add_mc_prefix(mc, pn)
+ if name != pn:
+ version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[fn]
+ depend_tree['providermap'][name] = (pn, version)
+
+ for tid in rq.rqdata.runtaskentries:
+ (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid)
+ pn = self.recipecaches[mc].pkg_fn[taskfn]
+ pn = self.add_mc_prefix(mc, pn)
+ version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn]
+ if pn not in depend_tree["pn"]:
+ depend_tree["pn"][pn] = {}
+ depend_tree["pn"][pn]["filename"] = taskfn
+ depend_tree["pn"][pn]["version"] = version
+ depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None)
+
+ # if we have extra caches, list all attributes they bring in
+ extra_info = []
+ for cache_class in self.caches_array:
+ if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'):
+ cachefields = getattr(cache_class, 'cachefields', [])
+ extra_info = extra_info + cachefields
+
+ # for all attributes stored, add them to the dependency tree
+ for ei in extra_info:
+ depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn]
+
+
+ dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid))
+ if not dotname in depend_tree["tdepends"]:
+ depend_tree["tdepends"][dotname] = []
+ for dep in rq.rqdata.runtaskentries[tid].depends:
+ (depmc, depfn, deptaskname, deptaskfn) = bb.runqueue.split_tid_mcfn(dep)
+ deppn = self.recipecaches[mc].pkg_fn[deptaskfn]
+ depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep)))
+ if taskfn not in seen_fns:
+ seen_fns.append(taskfn)
+ packages = []
+
+ depend_tree["depends"][pn] = []
+ for dep in taskdata[mc].depids[taskfn]:
+ depend_tree["depends"][pn].append(dep)
+
+ depend_tree["rdepends-pn"][pn] = []
+ for rdep in taskdata[mc].rdepids[taskfn]:
+ depend_tree["rdepends-pn"][pn].append(rdep)
+
+ rdepends = self.recipecaches[mc].rundeps[taskfn]
+ for package in rdepends:
+ depend_tree["rdepends-pkg"][package] = []
+ for rdepend in rdepends[package]:
+ depend_tree["rdepends-pkg"][package].append(rdepend)
+ packages.append(package)
+
+ rrecs = self.recipecaches[mc].runrecs[taskfn]
+ for package in rrecs:
+ depend_tree["rrecs-pkg"][package] = []
+ for rdepend in rrecs[package]:
+ depend_tree["rrecs-pkg"][package].append(rdepend)
+ if not package in packages:
+ packages.append(package)
+
+ for package in packages:
+ if package not in depend_tree["packages"]:
+ depend_tree["packages"][package] = {}
+ depend_tree["packages"][package]["pn"] = pn
+ depend_tree["packages"][package]["filename"] = taskfn
+ depend_tree["packages"][package]["version"] = version
+
+ return depend_tree
+
+ ######## WARNING : this function requires cache_extra to be enabled ########
+ def generatePkgDepTreeData(self, pkgs_to_build, task):
+ """
+ Create a dependency tree of pkgs_to_build, returning the data.
+ """
+ if not task.startswith("do_"):
+ task = "do_%s" % task
+
+ _, taskdata = self.prepareTreeData(pkgs_to_build, task)
+
+ seen_fns = []
+ depend_tree = {}
+ depend_tree["depends"] = {}
+ depend_tree["pn"] = {}
+ depend_tree["rdepends-pn"] = {}
+ depend_tree["rdepends-pkg"] = {}
+ depend_tree["rrecs-pkg"] = {}
+
+ # if we have extra caches, list all attributes they bring in
+ extra_info = []
+ for cache_class in self.caches_array:
+ if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'):
+ cachefields = getattr(cache_class, 'cachefields', [])
+ extra_info = extra_info + cachefields
+
+ tids = []
+ for mc in taskdata:
+ for tid in taskdata[mc].taskentries:
+ tids.append(tid)
+
+ for tid in tids:
+ (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid)
+
+ pn = self.recipecaches[mc].pkg_fn[taskfn]
+ pn = self.add_mc_prefix(mc, pn)
+
+ if pn not in depend_tree["pn"]:
+ depend_tree["pn"][pn] = {}
+ depend_tree["pn"][pn]["filename"] = taskfn
+ version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn]
+ depend_tree["pn"][pn]["version"] = version
+ rdepends = self.recipecaches[mc].rundeps[taskfn]
+ rrecs = self.recipecaches[mc].runrecs[taskfn]
+ depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None)
+
+ # for all extra attributes stored, add them to the dependency tree
+ for ei in extra_info:
+ depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn]
+
+ if taskfn not in seen_fns:
+ seen_fns.append(taskfn)
+
+ depend_tree["depends"][pn] = []
+ for dep in taskdata[mc].depids[taskfn]:
+ pn_provider = ""
+ if dep in taskdata[mc].build_targets and taskdata[mc].build_targets[dep]:
+ fn_provider = taskdata[mc].build_targets[dep][0]
+ pn_provider = self.recipecaches[mc].pkg_fn[fn_provider]
+ else:
+ pn_provider = dep
+ pn_provider = self.add_mc_prefix(mc, pn_provider)
+ depend_tree["depends"][pn].append(pn_provider)
+
+ depend_tree["rdepends-pn"][pn] = []
+ for rdep in taskdata[mc].rdepids[taskfn]:
+ pn_rprovider = ""
+ if rdep in taskdata[mc].run_targets and taskdata[mc].run_targets[rdep]:
+ fn_rprovider = taskdata[mc].run_targets[rdep][0]
+ pn_rprovider = self.recipecaches[mc].pkg_fn[fn_rprovider]
+ else:
+ pn_rprovider = rdep
+ pn_rprovider = self.add_mc_prefix(mc, pn_rprovider)
+ depend_tree["rdepends-pn"][pn].append(pn_rprovider)
+
+ depend_tree["rdepends-pkg"].update(rdepends)
+ depend_tree["rrecs-pkg"].update(rrecs)
+
+ return depend_tree
+
+ def generateDepTreeEvent(self, pkgs_to_build, task):
+ """
+ Create a task dependency graph of pkgs_to_build.
+ Generate an event with the result
+ """
+ depgraph = self.generateTaskDepTreeData(pkgs_to_build, task)
+ bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.data)
+
+ def generateDotGraphFiles(self, pkgs_to_build, task):
+ """
+ Create a task dependency graph of pkgs_to_build.
+ Save the result to a set of .dot files.
+ """
+
+ depgraph = self.generateTaskDepTreeData(pkgs_to_build, task)
+
+ with open('pn-buildlist', 'w') as f:
+ for pn in depgraph["pn"]:
+ f.write(pn + "\n")
+ logger.info("PN build list saved to 'pn-buildlist'")
+
+ # Remove old format output files to ensure no confusion with stale data
+ try:
+ os.unlink('pn-depends.dot')
+ except FileNotFoundError:
+ pass
+ try:
+ os.unlink('package-depends.dot')
+ except FileNotFoundError:
+ pass
+
+ with open('task-depends.dot', 'w') as f:
+ f.write("digraph depends {\n")
+ for task in sorted(depgraph["tdepends"]):
+ (pn, taskname) = task.rsplit(".", 1)
+ fn = depgraph["pn"][pn]["filename"]
+ version = depgraph["pn"][pn]["version"]
+ f.write('"%s.%s" [label="%s %s\\n%s\\n%s"]\n' % (pn, taskname, pn, taskname, version, fn))
+ for dep in sorted(depgraph["tdepends"][task]):
+ f.write('"%s" -> "%s"\n' % (task, dep))
+ f.write("}\n")
+ logger.info("Task dependencies saved to 'task-depends.dot'")
+
+ with open('recipe-depends.dot', 'w') as f:
+ f.write("digraph depends {\n")
+ pndeps = {}
+ for task in sorted(depgraph["tdepends"]):
+ (pn, taskname) = task.rsplit(".", 1)
+ if pn not in pndeps:
+ pndeps[pn] = set()
+ for dep in sorted(depgraph["tdepends"][task]):
+ (deppn, deptaskname) = dep.rsplit(".", 1)
+ pndeps[pn].add(deppn)
+ for pn in sorted(pndeps):
+ fn = depgraph["pn"][pn]["filename"]
+ version = depgraph["pn"][pn]["version"]
+ f.write('"%s" [label="%s\\n%s\\n%s"]\n' % (pn, pn, version, fn))
+ for dep in sorted(pndeps[pn]):
+ if dep == pn:
+ continue
+ f.write('"%s" -> "%s"\n' % (pn, dep))
+ f.write("}\n")
+ logger.info("Flattened recipe dependencies saved to 'recipe-depends.dot'")
+
+ def show_appends_with_no_recipes(self):
+ # Determine which bbappends haven't been applied
+
+ # First get list of recipes, including skipped
+ recipefns = list(self.recipecaches[''].pkg_fn.keys())
+ recipefns.extend(self.skiplist.keys())
+
+ # Work out list of bbappends that have been applied
+ applied_appends = []
+ for fn in recipefns:
+ applied_appends.extend(self.collection.get_file_appends(fn))
+
+ appends_without_recipes = []
+ for _, appendfn in self.collection.bbappends:
+ if not appendfn in applied_appends:
+ appends_without_recipes.append(appendfn)
+
+ if appends_without_recipes:
+ msg = 'No recipes available for:\n %s' % '\n '.join(appends_without_recipes)
+ warn_only = self.data.getVar("BB_DANGLINGAPPENDS_WARNONLY", \
+ False) or "no"
+ if warn_only.lower() in ("1", "yes", "true"):
+ bb.warn(msg)
+ else:
+ bb.fatal(msg)
+
+ def handlePrefProviders(self):
+
+ for mc in self.multiconfigs:
+ localdata = data.createCopy(self.databuilder.mcdata[mc])
+ bb.data.expandKeys(localdata)
+
+ # Handle PREFERRED_PROVIDERS
+ for p in (localdata.getVar('PREFERRED_PROVIDERS') or "").split():
+ try:
+ (providee, provider) = p.split(':')
+ except:
+ providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p)
+ continue
+ if providee in self.recipecaches[mc].preferred and self.recipecaches[mc].preferred[providee] != provider:
+ providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecaches[mc].preferred[providee])
+ self.recipecaches[mc].preferred[providee] = provider
+
+ def findConfigFilePath(self, configfile):
+ """
+ Find the location on disk of configfile and if it exists and was parsed by BitBake
+ emit the ConfigFilePathFound event with the path to the file.
+ """
+ path = bb.cookerdata.findConfigFile(configfile, self.data)
+ if not path:
+ return
+
+ # Generate a list of parsed configuration files by searching the files
+ # listed in the __depends and __base_depends variables with a .conf suffix.
+ conffiles = []
+ dep_files = self.data.getVar('__base_depends', False) or []
+ dep_files = dep_files + (self.data.getVar('__depends', False) or [])
+
+ for f in dep_files:
+ if f[0].endswith(".conf"):
+ conffiles.append(f[0])
+
+ _, conf, conffile = path.rpartition("conf/")
+ match = os.path.join(conf, conffile)
+ # Try and find matches for conf/conffilename.conf as we don't always
+ # have the full path to the file.
+ for cfg in conffiles:
+ if cfg.endswith(match):
+ bb.event.fire(bb.event.ConfigFilePathFound(path),
+ self.data)
+ break
+
+ def findFilesMatchingInDir(self, filepattern, directory):
+ """
+ Searches for files containing the substring 'filepattern' which are children of
+ 'directory' in each BBPATH. i.e. to find all rootfs package classes available
+ to BitBake one could call findFilesMatchingInDir(self, 'rootfs_', 'classes')
+ or to find all machine configuration files one could call:
+ findFilesMatchingInDir(self, '.conf', 'conf/machine')
+ """
+
+ matches = []
+ bbpaths = self.data.getVar('BBPATH').split(':')
+ for path in bbpaths:
+ dirpath = os.path.join(path, directory)
+ if os.path.exists(dirpath):
+ for root, dirs, files in os.walk(dirpath):
+ for f in files:
+ if filepattern in f:
+ matches.append(f)
+
+ if matches:
+ bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data)
+
+ def findProviders(self, mc=''):
+ return bb.providers.findProviders(self.data, self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
+
+ def findBestProvider(self, pn, mc=''):
+ if pn in self.recipecaches[mc].providers:
+ filenames = self.recipecaches[mc].providers[pn]
+ eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.data, self.recipecaches[mc])
+ filename = eligible[0]
+ return None, None, None, filename
+ elif pn in self.recipecaches[mc].pkg_pn:
+ return bb.providers.findBestProvider(pn, self.data, self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
+ else:
+ return None, None, None, None
+
+ def findConfigFiles(self, varname):
+ """
+ Find config files which are appropriate values for varname.
+ i.e. MACHINE, DISTRO
+ """
+ possible = []
+ var = varname.lower()
+
+ data = self.data
+ # iterate configs
+ bbpaths = data.getVar('BBPATH').split(':')
+ for path in bbpaths:
+ confpath = os.path.join(path, "conf", var)
+ if os.path.exists(confpath):
+ for root, dirs, files in os.walk(confpath):
+ # get all child files, these are appropriate values
+ for f in files:
+ val, sep, end = f.rpartition('.')
+ if end == 'conf':
+ possible.append(val)
+
+ if possible:
+ bb.event.fire(bb.event.ConfigFilesFound(var, possible), self.data)
+
+ def findInheritsClass(self, klass):
+ """
+ Find all recipes which inherit the specified class
+ """
+ pkg_list = []
+
+ for pfn in self.recipecaches[''].pkg_fn:
+ inherits = self.recipecaches[''].inherits.get(pfn, None)
+ if inherits and klass in inherits:
+ pkg_list.append(self.recipecaches[''].pkg_fn[pfn])
+
+ return pkg_list
+
+ def generateTargetsTree(self, klass=None, pkgs=None):
+ """
+ Generate a dependency tree of buildable targets
+ Generate an event with the result
+ """
+ # if the caller hasn't specified a pkgs list default to universe
+ if not pkgs:
+ pkgs = ['universe']
+ # if inherited_class passed ensure all recipes which inherit the
+ # specified class are included in pkgs
+ if klass:
+ extra_pkgs = self.findInheritsClass(klass)
+ pkgs = pkgs + extra_pkgs
+
+ # generate a dependency tree for all our packages
+ tree = self.generatePkgDepTreeData(pkgs, 'build')
+ bb.event.fire(bb.event.TargetsTreeGenerated(tree), self.data)
+
+ def interactiveMode( self ):
+ """Drop off into a shell"""
+ try:
+ from bb import shell
+ except ImportError:
+ parselog.exception("Interactive mode not available")
+ sys.exit(1)
+ else:
+ shell.start( self )
+
+
+ def handleCollections(self, collections):
+ """Handle collections"""
+ errors = False
+ self.bbfile_config_priorities = []
+ if collections:
+ collection_priorities = {}
+ collection_depends = {}
+ collection_list = collections.split()
+ min_prio = 0
+ for c in collection_list:
+ bb.debug(1,'Processing %s in collection list' % (c))
+
+ # Get collection priority if defined explicitly
+ priority = self.data.getVar("BBFILE_PRIORITY_%s" % c)
+ if priority:
+ try:
+ prio = int(priority)
+ except ValueError:
+ parselog.error("invalid value for BBFILE_PRIORITY_%s: \"%s\"", c, priority)
+ errors = True
+ if min_prio == 0 or prio < min_prio:
+ min_prio = prio
+ collection_priorities[c] = prio
+ else:
+ collection_priorities[c] = None
+
+ # Check dependencies and store information for priority calculation
+ deps = self.data.getVar("LAYERDEPENDS_%s" % c)
+ if deps:
+ try:
+ depDict = bb.utils.explode_dep_versions2(deps)
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse)))
+ for dep, oplist in list(depDict.items()):
+ if dep in collection_list:
+ for opstr in oplist:
+ layerver = self.data.getVar("LAYERVERSION_%s" % dep)
+ (op, depver) = opstr.split()
+ if layerver:
+ try:
+ res = bb.utils.vercmp_string_op(layerver, depver, op)
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse)))
+ if not res:
+ parselog.error("Layer '%s' depends on version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, dep, layerver)
+ errors = True
+ else:
+ parselog.error("Layer '%s' depends on version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, dep)
+ errors = True
+ else:
+ parselog.error("Layer '%s' depends on layer '%s', but this layer is not enabled in your configuration", c, dep)
+ errors = True
+ collection_depends[c] = list(depDict.keys())
+ else:
+ collection_depends[c] = []
+
+ # Check recommends and store information for priority calculation
+ recs = self.data.getVar("LAYERRECOMMENDS_%s" % c)
+ if recs:
+ try:
+ recDict = bb.utils.explode_dep_versions2(recs)
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse)))
+ for rec, oplist in list(recDict.items()):
+ if rec in collection_list:
+ if oplist:
+ opstr = oplist[0]
+ layerver = self.data.getVar("LAYERVERSION_%s" % rec)
+ if layerver:
+ (op, recver) = opstr.split()
+ try:
+ res = bb.utils.vercmp_string_op(layerver, recver, op)
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse)))
+ if not res:
+ parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver)
+ continue
+ else:
+ parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec)
+ continue
+ parselog.debug(3,"Layer '%s' recommends layer '%s', so we are adding it", c, rec)
+ collection_depends[c].append(rec)
+ else:
+ parselog.debug(3,"Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec)
+
+ # Recursively work out collection priorities based on dependencies
+ def calc_layer_priority(collection):
+ if not collection_priorities[collection]:
+ max_depprio = min_prio
+ for dep in collection_depends[collection]:
+ calc_layer_priority(dep)
+ depprio = collection_priorities[dep]
+ if depprio > max_depprio:
+ max_depprio = depprio
+ max_depprio += 1
+ parselog.debug(1, "Calculated priority of layer %s as %d", collection, max_depprio)
+ collection_priorities[collection] = max_depprio
+
+ # Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities
+ for c in collection_list:
+ calc_layer_priority(c)
+ regex = self.data.getVar("BBFILE_PATTERN_%s" % c)
+ if regex == None:
+ parselog.error("BBFILE_PATTERN_%s not defined" % c)
+ errors = True
+ continue
+ elif regex == "":
+ parselog.debug(1, "BBFILE_PATTERN_%s is empty" % c)
+ errors = False
+ continue
+ else:
+ try:
+ cre = re.compile(regex)
+ except re.error:
+ parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex)
+ errors = True
+ continue
+ self.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c]))
+ if errors:
+ # We've already printed the actual error(s)
+ raise CollectionError("Errors during parsing layer configuration")
+
+ def buildSetVars(self):
+ """
+ Setup any variables needed before starting a build
+ """
+ t = time.gmtime()
+ for mc in self.databuilder.mcdata:
+ ds = self.databuilder.mcdata[mc]
+ if not ds.getVar("BUILDNAME", False):
+ ds.setVar("BUILDNAME", "${DATE}${TIME}")
+ ds.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', t))
+ ds.setVar("DATE", time.strftime('%Y%m%d', t))
+ ds.setVar("TIME", time.strftime('%H%M%S', t))
+
+ def reset_mtime_caches(self):
+ """
+ Reset mtime caches - this is particularly important when memory resident as something
+ which is cached is not unlikely to have changed since the last invocation (e.g. a
+ file associated with a recipe might have been modified by the user).
+ """
+ build.reset_cache()
+ bb.fetch._checksum_cache.mtime_cache.clear()
+ siggen_cache = getattr(bb.parse.siggen, 'checksum_cache', None)
+ if siggen_cache:
+ bb.parse.siggen.checksum_cache.mtime_cache.clear()
+
+ def matchFiles(self, bf):
+ """
+ Find the .bb files which match the expression in 'buildfile'.
+ """
+ if bf.startswith("/") or bf.startswith("../"):
+ bf = os.path.abspath(bf)
+
+ self.collection = CookerCollectFiles(self.bbfile_config_priorities)
+ filelist, masked, searchdirs = self.collection.collect_bbfiles(self.data, self.data)
+ try:
+ os.stat(bf)
+ bf = os.path.abspath(bf)
+ return [bf]
+ except OSError:
+ regexp = re.compile(bf)
+ matches = []
+ for f in filelist:
+ if regexp.search(f) and os.path.isfile(f):
+ matches.append(f)
+ return matches
+
+ def matchFile(self, buildfile):
+ """
+ Find the .bb file which matches the expression in 'buildfile'.
+ Raise an error if multiple files
+ """
+ matches = self.matchFiles(buildfile)
+ if len(matches) != 1:
+ if matches:
+ msg = "Unable to match '%s' to a specific recipe file - %s matches found:" % (buildfile, len(matches))
+ if matches:
+ for f in matches:
+ msg += "\n %s" % f
+ parselog.error(msg)
+ else:
+ parselog.error("Unable to find any recipe file matching '%s'" % buildfile)
+ raise NoSpecificMatch
+ return matches[0]
+
+ def buildFile(self, buildfile, task):
+ """
+ Build the file matching regexp buildfile
+ """
+ bb.event.fire(bb.event.BuildInit(), self.data)
+
+ # Too many people use -b because they think it's how you normally
+ # specify a target to be built, so show a warning
+ bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.")
+
+ self.buildFileInternal(buildfile, task)
+
+ def buildFileInternal(self, buildfile, task, fireevents=True, quietlog=False):
+ """
+ Build the file matching regexp buildfile
+ """
+
+ # Parse the configuration here. We need to do it explicitly here since
+ # buildFile() doesn't use the cache
+ self.parseConfiguration()
+
+ # If we are told to do the None task then query the default task
+ if (task == None):
+ task = self.configuration.cmd
+ if not task.startswith("do_"):
+ task = "do_%s" % task
+
+ fn, cls, mc = bb.cache.virtualfn2realfn(buildfile)
+ fn = self.matchFile(fn)
+
+ self.buildSetVars()
+ self.reset_mtime_caches()
+
+ bb_cache = bb.cache.Cache(self.databuilder, self.data_hash, self.caches_array)
+
+ infos = bb_cache.parse(fn, self.collection.get_file_appends(fn))
+ infos = dict(infos)
+
+ fn = bb.cache.realfn2virtual(fn, cls, mc)
+ try:
+ info_array = infos[fn]
+ except KeyError:
+ bb.fatal("%s does not exist" % fn)
+
+ if info_array[0].skipped:
+ bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason))
+
+ self.recipecaches[mc].add_from_recipeinfo(fn, info_array)
+
+ # Tweak some variables
+ item = info_array[0].pn
+ self.recipecaches[mc].ignored_dependencies = set()
+ self.recipecaches[mc].bbfile_priority[fn] = 1
+ self.configuration.limited_deps = True
+
+ # Remove external dependencies
+ self.recipecaches[mc].task_deps[fn]['depends'] = {}
+ self.recipecaches[mc].deps[fn] = []
+ self.recipecaches[mc].rundeps[fn] = defaultdict(list)
+ self.recipecaches[mc].runrecs[fn] = defaultdict(list)
+
+ # Invalidate task for target if force mode active
+ if self.configuration.force:
+ logger.verbose("Invalidate task %s, %s", task, fn)
+ bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn)
+
+ # Setup taskdata structure
+ taskdata = {}
+ taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort)
+ taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item)
+
+ if quietlog:
+ rqloglevel = bb.runqueue.logger.getEffectiveLevel()
+ bb.runqueue.logger.setLevel(logging.WARNING)
+
+ buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME")
+ if fireevents:
+ bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc])
+
+ # Execute the runqueue
+ runlist = [[mc, item, task, fn]]
+
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
+
+ def buildFileIdle(server, rq, abort):
+
+ msg = None
+ interrupted = 0
+ if abort or self.state == state.forceshutdown:
+ rq.finish_runqueue(True)
+ msg = "Forced shutdown"
+ interrupted = 2
+ elif self.state == state.shutdown:
+ rq.finish_runqueue(False)
+ msg = "Stopped build"
+ interrupted = 1
+ failures = 0
+ try:
+ retval = rq.execute_runqueue()
+ except runqueue.TaskFailure as exc:
+ failures += len(exc.args)
+ retval = False
+ except SystemExit as exc:
+ self.command.finishAsyncCommand(str(exc))
+ if quietlog:
+ bb.runqueue.logger.setLevel(rqloglevel)
+ return False
+
+ if not retval:
+ if fireevents:
+ bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc])
+ self.command.finishAsyncCommand(msg)
+ # We trashed self.recipecaches above
+ self.parsecache_valid = False
+ self.configuration.limited_deps = False
+ bb.parse.siggen.reset(self.data)
+ if quietlog:
+ bb.runqueue.logger.setLevel(rqloglevel)
+ return False
+ if retval is True:
+ return True
+ return retval
+
+ self.configuration.server_register_idlecallback(buildFileIdle, rq)
+
+ def buildTargets(self, targets, task):
+ """
+ Attempt to build the targets specified
+ """
+
+ def buildTargetsIdle(server, rq, abort):
+ msg = None
+ interrupted = 0
+ if abort or self.state == state.forceshutdown:
+ rq.finish_runqueue(True)
+ msg = "Forced shutdown"
+ interrupted = 2
+ elif self.state == state.shutdown:
+ rq.finish_runqueue(False)
+ msg = "Stopped build"
+ interrupted = 1
+ failures = 0
+ try:
+ retval = rq.execute_runqueue()
+ except runqueue.TaskFailure as exc:
+ failures += len(exc.args)
+ retval = False
+ except SystemExit as exc:
+ self.command.finishAsyncCommand(str(exc))
+ return False
+
+ if not retval:
+ try:
+ for mc in self.multiconfigs:
+ bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc])
+ finally:
+ self.command.finishAsyncCommand(msg)
+ return False
+ if retval is True:
+ return True
+ return retval
+
+ self.reset_mtime_caches()
+ self.buildSetVars()
+
+ # If we are told to do the None task then query the default task
+ if (task == None):
+ task = self.configuration.cmd
+
+ if not task.startswith("do_"):
+ task = "do_%s" % task
+
+ packages = [target if ':' in target else '%s:%s' % (target, task) for target in targets]
+
+ bb.event.fire(bb.event.BuildInit(packages), self.data)
+
+ taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort)
+
+ buildname = self.data.getVar("BUILDNAME", False)
+
+ # make targets to always look as <target>:do_<task>
+ ntargets = []
+ for target in runlist:
+ if target[0]:
+ ntargets.append("multiconfig:%s:%s:%s" % (target[0], target[1], target[2]))
+ ntargets.append("%s:%s" % (target[1], target[2]))
+
+ for mc in self.multiconfigs:
+ bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc])
+
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
+ if 'universe' in targets:
+ rq.rqdata.warn_multi_bb = True
+
+ self.configuration.server_register_idlecallback(buildTargetsIdle, rq)
+
+
+ def getAllKeysWithFlags(self, flaglist):
+ dump = {}
+ for k in self.data.keys():
+ try:
+ expand = True
+ flags = self.data.getVarFlags(k)
+ if flags and "func" in flags and "python" in flags:
+ expand = False
+ v = self.data.getVar(k, expand)
+ if not k.startswith("__") and not isinstance(v, bb.data_smart.DataSmart):
+ dump[k] = {
+ 'v' : str(v) ,
+ 'history' : self.data.varhistory.variable(k),
+ }
+ for d in flaglist:
+ if flags and d in flags:
+ dump[k][d] = flags[d]
+ else:
+ dump[k][d] = None
+ except Exception as e:
+ print(e)
+ return dump
+
+
+ def updateCacheSync(self):
+ if self.state == state.running:
+ return
+
+ # reload files for which we got notifications
+ for p in self.inotify_modified_files:
+ bb.parse.update_cache(p)
+ if p in bb.parse.BBHandler.cached_statements:
+ del bb.parse.BBHandler.cached_statements[p]
+ self.inotify_modified_files = []
+
+ if not self.baseconfig_valid:
+ logger.debug(1, "Reloading base configuration data")
+ self.initConfigurationData()
+ self.handlePRServ()
+
+ # This is called for all async commands when self.state != running
+ def updateCache(self):
+ if self.state == state.running:
+ return
+
+ if self.state in (state.shutdown, state.forceshutdown, state.error):
+ if hasattr(self.parser, 'shutdown'):
+ self.parser.shutdown(clean=False, force = True)
+ raise bb.BBHandledException()
+
+ if self.state != state.parsing:
+ self.updateCacheSync()
+
+ if self.state != state.parsing and not self.parsecache_valid:
+ bb.parse.siggen.reset(self.data)
+ self.parseConfiguration ()
+ if CookerFeatures.SEND_SANITYEVENTS in self.featureset:
+ for mc in self.multiconfigs:
+ bb.event.fire(bb.event.SanityCheck(False), self.databuilder.mcdata[mc])
+
+ for mc in self.multiconfigs:
+ ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED") or ""
+ self.recipecaches[mc].ignored_dependencies = set(ignore.split())
+
+ for dep in self.configuration.extra_assume_provided:
+ self.recipecaches[mc].ignored_dependencies.add(dep)
+
+ self.collection = CookerCollectFiles(self.bbfile_config_priorities)
+ (filelist, masked, searchdirs) = self.collection.collect_bbfiles(self.data, self.data)
+
+ # Add inotify watches for directories searched for bb/bbappend files
+ for dirent in searchdirs:
+ self.add_filewatch([[dirent]], dirs=True)
+
+ self.parser = CookerParser(self, filelist, masked)
+ self.parsecache_valid = True
+
+ self.state = state.parsing
+
+ if not self.parser.parse_next():
+ collectlog.debug(1, "parsing complete")
+ if self.parser.error:
+ raise bb.BBHandledException()
+ self.show_appends_with_no_recipes()
+ self.handlePrefProviders()
+ for mc in self.multiconfigs:
+ self.recipecaches[mc].bbfile_priority = self.collection.collection_priorities(self.recipecaches[mc].pkg_fn, self.data)
+ self.state = state.running
+
+ # Send an event listing all stamps reachable after parsing
+ # which the metadata may use to clean up stale data
+ for mc in self.multiconfigs:
+ event = bb.event.ReachableStamps(self.recipecaches[mc].stamp)
+ bb.event.fire(event, self.databuilder.mcdata[mc])
+ return None
+
+ return True
+
+ def checkPackages(self, pkgs_to_build, task=None):
+
+ # Return a copy, don't modify the original
+ pkgs_to_build = pkgs_to_build[:]
+
+ if len(pkgs_to_build) == 0:
+ raise NothingToBuild
+
+ ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split()
+ for pkg in pkgs_to_build:
+ if pkg in ignore:
+ parselog.warning("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg)
+
+ if 'world' in pkgs_to_build:
+ pkgs_to_build.remove('world')
+ for mc in self.multiconfigs:
+ bb.providers.buildWorldTargetList(self.recipecaches[mc], task)
+ for t in self.recipecaches[mc].world_target:
+ if mc:
+ t = "multiconfig:" + mc + ":" + t
+ pkgs_to_build.append(t)
+
+ if 'universe' in pkgs_to_build:
+ parselog.warning("The \"universe\" target is only intended for testing and may produce errors.")
+ parselog.debug(1, "collating packages for \"universe\"")
+ pkgs_to_build.remove('universe')
+ for mc in self.multiconfigs:
+ for t in self.recipecaches[mc].universe_target:
+ if task:
+ foundtask = False
+ for provider_fn in self.recipecaches[mc].providers[t]:
+ if task in self.recipecaches[mc].task_deps[provider_fn]['tasks']:
+ foundtask = True
+ break
+ if not foundtask:
+ bb.debug(1, "Skipping %s for universe tasks as task %s doesn't exist" % (t, task))
+ continue
+ if mc:
+ t = "multiconfig:" + mc + ":" + t
+ pkgs_to_build.append(t)
+
+ return pkgs_to_build
+
+ def pre_serve(self):
+ # We now are in our own process so we can call this here.
+ # PRServ exits if its parent process exits
+ self.handlePRServ()
+ return
+
+ def post_serve(self):
+ prserv.serv.auto_shutdown()
+ bb.event.fire(CookerExit(), self.data)
+
+
+ def shutdown(self, force = False):
+ if force:
+ self.state = state.forceshutdown
+ else:
+ self.state = state.shutdown
+
+ if self.parser:
+ self.parser.shutdown(clean=not force, force=force)
+
+ def finishcommand(self):
+ self.state = state.initial
+
+ def reset(self):
+ self.initConfigurationData()
+
+ def clientComplete(self):
+ """Called when the client is done using the server"""
+ self.finishcommand()
+ self.extraconfigdata = {}
+ self.command.reset()
+ self.databuilder.reset()
+ self.data = self.databuilder.data
+
+
+class CookerExit(bb.event.Event):
+ """
+ Notify clients of the Cooker shutdown
+ """
+
+ def __init__(self):
+ bb.event.Event.__init__(self)
+
+
+class CookerCollectFiles(object):
+ def __init__(self, priorities):
+ self.bbappends = []
+ self.bbfile_config_priorities = priorities
+
+ def calc_bbfile_priority( self, filename, matched = None ):
+ for _, _, regex, pri in self.bbfile_config_priorities:
+ if regex.match(filename):
+ if matched != None:
+ if not regex in matched:
+ matched.add(regex)
+ return pri
+ return 0
+
+ def get_bbfiles(self):
+ """Get list of default .bb files by reading out the current directory"""
+ path = os.getcwd()
+ contents = os.listdir(path)
+ bbfiles = []
+ for f in contents:
+ if f.endswith(".bb"):
+ bbfiles.append(os.path.abspath(os.path.join(path, f)))
+ return bbfiles
+
+ def find_bbfiles(self, path):
+ """Find all the .bb and .bbappend files in a directory"""
+ found = []
+ for dir, dirs, files in os.walk(path):
+ for ignored in ('SCCS', 'CVS', '.svn'):
+ if ignored in dirs:
+ dirs.remove(ignored)
+ found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))]
+
+ return found
+
+ def collect_bbfiles(self, config, eventdata):
+ """Collect all available .bb build files"""
+ masked = 0
+
+ collectlog.debug(1, "collecting .bb files")
+
+ files = (config.getVar( "BBFILES") or "").split()
+ config.setVar("BBFILES", " ".join(files))
+
+ # Sort files by priority
+ files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem) )
+
+ if not len(files):
+ files = self.get_bbfiles()
+
+ if not len(files):
+ collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
+ bb.event.fire(CookerExit(), eventdata)
+
+ # We need to track where we look so that we can add inotify watches. There
+ # is no nice way to do this, this is horrid. We intercept the os.listdir()
+ # (or os.scandir() for python 3.6+) calls while we run glob().
+ origlistdir = os.listdir
+ if hasattr(os, 'scandir'):
+ origscandir = os.scandir
+ searchdirs = []
+
+ def ourlistdir(d):
+ searchdirs.append(d)
+ return origlistdir(d)
+
+ def ourscandir(d):
+ searchdirs.append(d)
+ return origscandir(d)
+
+ os.listdir = ourlistdir
+ if hasattr(os, 'scandir'):
+ os.scandir = ourscandir
+ try:
+ # Can't use set here as order is important
+ newfiles = []
+ for f in files:
+ if os.path.isdir(f):
+ dirfiles = self.find_bbfiles(f)
+ for g in dirfiles:
+ if g not in newfiles:
+ newfiles.append(g)
+ else:
+ globbed = glob.glob(f)
+ if not globbed and os.path.exists(f):
+ globbed = [f]
+ # glob gives files in order on disk. Sort to be deterministic.
+ for g in sorted(globbed):
+ if g not in newfiles:
+ newfiles.append(g)
+ finally:
+ os.listdir = origlistdir
+ if hasattr(os, 'scandir'):
+ os.scandir = origscandir
+
+ bbmask = config.getVar('BBMASK')
+
+ if bbmask:
+ # First validate the individual regular expressions and ignore any
+ # that do not compile
+ bbmasks = []
+ for mask in bbmask.split():
+ # When constructing an older style single regex, it's possible for BBMASK
+ # to end up beginning with '|', which matches and masks _everything_.
+ if mask.startswith("|"):
+ collectlog.warn("BBMASK contains regular expression beginning with '|', fixing: %s" % mask)
+ mask = mask[1:]
+ try:
+ re.compile(mask)
+ bbmasks.append(mask)
+ except sre_constants.error:
+ collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask)
+
+ # Then validate the combined regular expressions. This should never
+ # fail, but better safe than sorry...
+ bbmask = "|".join(bbmasks)
+ try:
+ bbmask_compiled = re.compile(bbmask)
+ except sre_constants.error:
+ collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask)
+ bbmask = None
+
+ bbfiles = []
+ bbappend = []
+ for f in newfiles:
+ if bbmask and bbmask_compiled.search(f):
+ collectlog.debug(1, "skipping masked file %s", f)
+ masked += 1
+ continue
+ if f.endswith('.bb'):
+ bbfiles.append(f)
+ elif f.endswith('.bbappend'):
+ bbappend.append(f)
+ else:
+ collectlog.debug(1, "skipping %s: unknown file extension", f)
+
+ # Build a list of .bbappend files for each .bb file
+ for f in bbappend:
+ base = os.path.basename(f).replace('.bbappend', '.bb')
+ self.bbappends.append((base, f))
+
+ # Find overlayed recipes
+ # bbfiles will be in priority order which makes this easy
+ bbfile_seen = dict()
+ self.overlayed = defaultdict(list)
+ for f in reversed(bbfiles):
+ base = os.path.basename(f)
+ if base not in bbfile_seen:
+ bbfile_seen[base] = f
+ else:
+ topfile = bbfile_seen[base]
+ self.overlayed[topfile].append(f)
+
+ return (bbfiles, masked, searchdirs)
+
+ def get_file_appends(self, fn):
+ """
+ Returns a list of .bbappend files to apply to fn
+ """
+ filelist = []
+ f = os.path.basename(fn)
+ for b in self.bbappends:
+ (bbappend, filename) = b
+ if (bbappend == f) or ('%' in bbappend and bbappend.startswith(f[:bbappend.index('%')])):
+ filelist.append(filename)
+ return filelist
+
+ def collection_priorities(self, pkgfns, d):
+
+ priorities = {}
+
+ # Calculate priorities for each file
+ matched = set()
+ for p in pkgfns:
+ realfn, cls, mc = bb.cache.virtualfn2realfn(p)
+ priorities[p] = self.calc_bbfile_priority(realfn, matched)
+
+ unmatched = set()
+ for _, _, regex, pri in self.bbfile_config_priorities:
+ if not regex in matched:
+ unmatched.add(regex)
+
+ # Don't show the warning if the BBFILE_PATTERN did match .bbappend files
+ def find_bbappend_match(regex):
+ for b in self.bbappends:
+ (bbfile, append) = b
+ if regex.match(append):
+ # If the bbappend is matched by already "matched set", return False
+ for matched_regex in matched:
+ if matched_regex.match(append):
+ return False
+ return True
+ return False
+
+ for unmatch in unmatched.copy():
+ if find_bbappend_match(unmatch):
+ unmatched.remove(unmatch)
+
+ for collection, pattern, regex, _ in self.bbfile_config_priorities:
+ if regex in unmatched:
+ if d.getVar('BBFILE_PATTERN_IGNORE_EMPTY_%s' % collection) != '1':
+ collectlog.warning("No bb files matched BBFILE_PATTERN_%s '%s'" % (collection, pattern))
+
+ return priorities
+
+class ParsingFailure(Exception):
+ def __init__(self, realexception, recipe):
+ self.realexception = realexception
+ self.recipe = recipe
+ Exception.__init__(self, realexception, recipe)
+
+class Feeder(multiprocessing.Process):
+ def __init__(self, jobs, to_parsers, quit):
+ self.quit = quit
+ self.jobs = jobs
+ self.to_parsers = to_parsers
+ multiprocessing.Process.__init__(self)
+
+ def run(self):
+ while True:
+ try:
+ quit = self.quit.get_nowait()
+ except queue.Empty:
+ pass
+ else:
+ if quit == 'cancel':
+ self.to_parsers.cancel_join_thread()
+ break
+
+ try:
+ job = self.jobs.pop()
+ except IndexError:
+ break
+
+ try:
+ self.to_parsers.put(job, timeout=0.5)
+ except queue.Full:
+ self.jobs.insert(0, job)
+ continue
+
+class Parser(multiprocessing.Process):
+ def __init__(self, jobs, results, quit, init, profile):
+ self.jobs = jobs
+ self.results = results
+ self.quit = quit
+ self.init = init
+ multiprocessing.Process.__init__(self)
+ self.context = bb.utils.get_context().copy()
+ self.handlers = bb.event.get_class_handlers().copy()
+ self.profile = profile
+
+ def run(self):
+
+ if not self.profile:
+ self.realrun()
+ return
+
+ try:
+ import cProfile as profile
+ except:
+ import profile
+ prof = profile.Profile()
+ try:
+ profile.Profile.runcall(prof, self.realrun)
+ finally:
+ logfile = "profile-parse-%s.log" % multiprocessing.current_process().name
+ prof.dump_stats(logfile)
+
+ def realrun(self):
+ if self.init:
+ self.init()
+
+ pending = []
+ while True:
+ try:
+ self.quit.get_nowait()
+ except queue.Empty:
+ pass
+ else:
+ self.results.cancel_join_thread()
+ break
+
+ if pending:
+ result = pending.pop()
+ else:
+ try:
+ job = self.jobs.get(timeout=0.25)
+ except queue.Empty:
+ continue
+
+ if job is None:
+ break
+ result = self.parse(*job)
+
+ try:
+ self.results.put(result, timeout=0.25)
+ except queue.Full:
+ pending.append(result)
+
+ def parse(self, filename, appends):
+ try:
+ # Record the filename we're parsing into any events generated
+ def parse_filter(self, record):
+ record.taskpid = bb.event.worker_pid
+ record.fn = filename
+ return True
+
+ # Reset our environment and handlers to the original settings
+ bb.utils.set_context(self.context.copy())
+ bb.event.set_class_handlers(self.handlers.copy())
+ bb.event.LogHandler.filter = parse_filter
+
+ return True, self.bb_cache.parse(filename, appends)
+ except Exception as exc:
+ tb = sys.exc_info()[2]
+ exc.recipe = filename
+ exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
+ return True, exc
+ # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
+ # and for example a worker thread doesn't just exit on its own in response to
+ # a SystemExit event for example.
+ except BaseException as exc:
+ return True, ParsingFailure(exc, filename)
+
+class CookerParser(object):
+ def __init__(self, cooker, filelist, masked):
+ self.filelist = filelist
+ self.cooker = cooker
+ self.cfgdata = cooker.data
+ self.cfghash = cooker.data_hash
+ self.cfgbuilder = cooker.databuilder
+
+ # Accounting statistics
+ self.parsed = 0
+ self.cached = 0
+ self.error = 0
+ self.masked = masked
+
+ self.skipped = 0
+ self.virtuals = 0
+ self.total = len(filelist)
+
+ self.current = 0
+ self.process_names = []
+
+ self.bb_cache = bb.cache.Cache(self.cfgbuilder, self.cfghash, cooker.caches_array)
+ self.fromcache = []
+ self.willparse = []
+ for filename in self.filelist:
+ appends = self.cooker.collection.get_file_appends(filename)
+ if not self.bb_cache.cacheValid(filename, appends):
+ self.willparse.append((filename, appends))
+ else:
+ self.fromcache.append((filename, appends))
+ self.toparse = self.total - len(self.fromcache)
+ self.progress_chunk = int(max(self.toparse / 100, 1))
+
+ self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or
+ multiprocessing.cpu_count()), len(self.willparse))
+
+ self.start()
+ self.haveshutdown = False
+
+ def start(self):
+ self.results = self.load_cached()
+ self.processes = []
+ if self.toparse:
+ bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata)
+ def init():
+ Parser.bb_cache = self.bb_cache
+ bb.utils.set_process_name(multiprocessing.current_process().name)
+ multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1)
+ multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1)
+
+ self.feeder_quit = multiprocessing.Queue(maxsize=1)
+ self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes)
+ self.jobs = multiprocessing.Queue(maxsize=self.num_processes)
+ self.result_queue = multiprocessing.Queue()
+ self.feeder = Feeder(self.willparse, self.jobs, self.feeder_quit)
+ self.feeder.start()
+ for i in range(0, self.num_processes):
+ parser = Parser(self.jobs, self.result_queue, self.parser_quit, init, self.cooker.configuration.profile)
+ parser.start()
+ self.process_names.append(parser.name)
+ self.processes.append(parser)
+
+ self.results = itertools.chain(self.results, self.parse_generator())
+
+ def shutdown(self, clean=True, force=False):
+ if not self.toparse:
+ return
+ if self.haveshutdown:
+ return
+ self.haveshutdown = True
+
+ if clean:
+ event = bb.event.ParseCompleted(self.cached, self.parsed,
+ self.skipped, self.masked,
+ self.virtuals, self.error,
+ self.total)
+
+ bb.event.fire(event, self.cfgdata)
+ self.feeder_quit.put(None)
+ for process in self.processes:
+ self.parser_quit.put(None)
+ else:
+ self.feeder_quit.put('cancel')
+
+ self.parser_quit.cancel_join_thread()
+ for process in self.processes:
+ self.parser_quit.put(None)
+
+ self.jobs.cancel_join_thread()
+
+ for process in self.processes:
+ if force:
+ process.join(.1)
+ process.terminate()
+ else:
+ process.join()
+ self.feeder.join()
+
+ sync = threading.Thread(target=self.bb_cache.sync)
+ sync.start()
+ multiprocessing.util.Finalize(None, sync.join, exitpriority=-100)
+ bb.codeparser.parser_cache_savemerge()
+ bb.fetch.fetcher_parse_done()
+ if self.cooker.configuration.profile:
+ profiles = []
+ for i in self.process_names:
+ logfile = "profile-parse-%s.log" % i
+ if os.path.exists(logfile):
+ profiles.append(logfile)
+
+ pout = "profile-parse.log.processed"
+ bb.utils.process_profilelog(profiles, pout = pout)
+ print("Processed parsing statistics saved to %s" % (pout))
+
+ def load_cached(self):
+ for filename, appends in self.fromcache:
+ cached, infos = self.bb_cache.load(filename, appends)
+ yield not cached, infos
+
+ def parse_generator(self):
+ while True:
+ if self.parsed >= self.toparse:
+ break
+
+ try:
+ result = self.result_queue.get(timeout=0.25)
+ except queue.Empty:
+ pass
+ else:
+ value = result[1]
+ if isinstance(value, BaseException):
+ raise value
+ else:
+ yield result
+
+ def parse_next(self):
+ result = []
+ parsed = None
+ try:
+ parsed, result = next(self.results)
+ except StopIteration:
+ self.shutdown()
+ return False
+ except bb.BBHandledException as exc:
+ self.error += 1
+ logger.error('Failed to parse recipe: %s' % exc.recipe)
+ self.shutdown(clean=False)
+ return False
+ except ParsingFailure as exc:
+ self.error += 1
+ logger.error('Unable to parse %s: %s' %
+ (exc.recipe, bb.exceptions.to_string(exc.realexception)))
+ self.shutdown(clean=False)
+ return False
+ except bb.parse.ParseError as exc:
+ self.error += 1
+ logger.error(str(exc))
+ self.shutdown(clean=False)
+ return False
+ except bb.data_smart.ExpansionError as exc:
+ self.error += 1
+ bbdir = os.path.dirname(__file__) + os.sep
+ etype, value, _ = sys.exc_info()
+ tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
+ logger.error('ExpansionError during parsing %s', value.recipe,
+ exc_info=(etype, value, tb))
+ self.shutdown(clean=False)
+ return False
+ except Exception as exc:
+ self.error += 1
+ etype, value, tb = sys.exc_info()
+ if hasattr(value, "recipe"):
+ logger.error('Unable to parse %s' % value.recipe,
+ exc_info=(etype, value, exc.traceback))
+ else:
+ # Most likely, an exception occurred during raising an exception
+ import traceback
+ logger.error('Exception during parse: %s' % traceback.format_exc())
+ self.shutdown(clean=False)
+ return False
+
+ self.current += 1
+ self.virtuals += len(result)
+ if parsed:
+ self.parsed += 1
+ if self.parsed % self.progress_chunk == 0:
+ bb.event.fire(bb.event.ParseProgress(self.parsed, self.toparse),
+ self.cfgdata)
+ else:
+ self.cached += 1
+
+ for virtualfn, info_array in result:
+ if info_array[0].skipped:
+ self.skipped += 1
+ self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0])
+ (fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn)
+ self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecaches[mc],
+ parsed=parsed, watcher = self.cooker.add_filewatch)
+ return True
+
+ def reparse(self, filename):
+ infos = self.bb_cache.parse(filename, self.cooker.collection.get_file_appends(filename))
+ for vfn, info_array in infos:
+ (fn, cls, mc) = bb.cache.virtualfn2realfn(vfn)
+ self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array)
diff --git a/poky/bitbake/lib/bb/cookerdata.py b/poky/bitbake/lib/bb/cookerdata.py
new file mode 100644
index 000000000..5df66e617
--- /dev/null
+++ b/poky/bitbake/lib/bb/cookerdata.py
@@ -0,0 +1,434 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import logging
+import os
+import re
+import sys
+from functools import wraps
+import bb
+from bb import data
+import bb.parse
+
+logger = logging.getLogger("BitBake")
+parselog = logging.getLogger("BitBake.Parsing")
+
+class ConfigParameters(object):
+ def __init__(self, argv=sys.argv):
+ self.options, targets = self.parseCommandLine(argv)
+ self.environment = self.parseEnvironment()
+
+ self.options.pkgs_to_build = targets or []
+
+ for key, val in self.options.__dict__.items():
+ setattr(self, key, val)
+
+ def parseCommandLine(self, argv=sys.argv):
+ raise Exception("Caller must implement commandline option parsing")
+
+ def parseEnvironment(self):
+ return os.environ.copy()
+
+ def updateFromServer(self, server):
+ if not self.options.cmd:
+ defaulttask, error = server.runCommand(["getVariable", "BB_DEFAULT_TASK"])
+ if error:
+ raise Exception("Unable to get the value of BB_DEFAULT_TASK from the server: %s" % error)
+ self.options.cmd = defaulttask or "build"
+ _, error = server.runCommand(["setConfig", "cmd", self.options.cmd])
+ if error:
+ raise Exception("Unable to set configuration option 'cmd' on the server: %s" % error)
+
+ if not self.options.pkgs_to_build:
+ bbpkgs, error = server.runCommand(["getVariable", "BBTARGETS"])
+ if error:
+ raise Exception("Unable to get the value of BBTARGETS from the server: %s" % error)
+ if bbpkgs:
+ self.options.pkgs_to_build.extend(bbpkgs.split())
+
+ def updateToServer(self, server, environment):
+ options = {}
+ for o in ["abort", "force", "invalidate_stamp",
+ "verbose", "debug", "dry_run", "dump_signatures",
+ "debug_domains", "extra_assume_provided", "profile",
+ "prefile", "postfile", "server_timeout"]:
+ options[o] = getattr(self.options, o)
+
+ ret, error = server.runCommand(["updateConfig", options, environment, sys.argv])
+ if error:
+ raise Exception("Unable to update the server configuration with local parameters: %s" % error)
+
+ def parseActions(self):
+ # Parse any commandline into actions
+ action = {'action':None, 'msg':None}
+ if self.options.show_environment:
+ if 'world' in self.options.pkgs_to_build:
+ action['msg'] = "'world' is not a valid target for --environment."
+ elif 'universe' in self.options.pkgs_to_build:
+ action['msg'] = "'universe' is not a valid target for --environment."
+ elif len(self.options.pkgs_to_build) > 1:
+ action['msg'] = "Only one target can be used with the --environment option."
+ elif self.options.buildfile and len(self.options.pkgs_to_build) > 0:
+ action['msg'] = "No target should be used with the --environment and --buildfile options."
+ elif len(self.options.pkgs_to_build) > 0:
+ action['action'] = ["showEnvironmentTarget", self.options.pkgs_to_build]
+ else:
+ action['action'] = ["showEnvironment", self.options.buildfile]
+ elif self.options.buildfile is not None:
+ action['action'] = ["buildFile", self.options.buildfile, self.options.cmd]
+ elif self.options.revisions_changed:
+ action['action'] = ["compareRevisions"]
+ elif self.options.show_versions:
+ action['action'] = ["showVersions"]
+ elif self.options.parse_only:
+ action['action'] = ["parseFiles"]
+ elif self.options.dot_graph:
+ if self.options.pkgs_to_build:
+ action['action'] = ["generateDotGraph", self.options.pkgs_to_build, self.options.cmd]
+ else:
+ action['msg'] = "Please specify a package name for dependency graph generation."
+ else:
+ if self.options.pkgs_to_build:
+ action['action'] = ["buildTargets", self.options.pkgs_to_build, self.options.cmd]
+ else:
+ #action['msg'] = "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information."
+ action = None
+ self.options.initialaction = action
+ return action
+
+class CookerConfiguration(object):
+ """
+ Manages build options and configurations for one run
+ """
+
+ def __init__(self):
+ self.debug_domains = []
+ self.extra_assume_provided = []
+ self.prefile = []
+ self.postfile = []
+ self.debug = 0
+ self.cmd = None
+ self.abort = True
+ self.force = False
+ self.profile = False
+ self.nosetscene = False
+ self.setsceneonly = False
+ self.invalidate_stamp = False
+ self.dump_signatures = []
+ self.dry_run = False
+ self.tracking = False
+ self.xmlrpcinterface = []
+ self.server_timeout = None
+ self.writeeventlog = False
+ self.server_only = False
+ self.limited_deps = False
+ self.runall = []
+ self.runonly = []
+
+ self.env = {}
+
+ def setConfigParameters(self, parameters):
+ for key in self.__dict__.keys():
+ if key in parameters.options.__dict__:
+ setattr(self, key, parameters.options.__dict__[key])
+ self.env = parameters.environment.copy()
+
+ def setServerRegIdleCallback(self, srcb):
+ self.server_register_idlecallback = srcb
+
+ def __getstate__(self):
+ state = {}
+ for key in self.__dict__.keys():
+ if key == "server_register_idlecallback":
+ state[key] = None
+ else:
+ state[key] = getattr(self, key)
+ return state
+
+ def __setstate__(self,state):
+ for k in state:
+ setattr(self, k, state[k])
+
+
+def catch_parse_error(func):
+ """Exception handling bits for our parsing"""
+ @wraps(func)
+ def wrapped(fn, *args):
+ try:
+ return func(fn, *args)
+ except IOError as exc:
+ import traceback
+ parselog.critical(traceback.format_exc())
+ parselog.critical("Unable to parse %s: %s" % (fn, exc))
+ sys.exit(1)
+ except bb.data_smart.ExpansionError as exc:
+ import traceback
+
+ bbdir = os.path.dirname(__file__) + os.sep
+ exc_class, exc, tb = sys.exc_info()
+ for tb in iter(lambda: tb.tb_next, None):
+ # Skip frames in bitbake itself, we only want the metadata
+ fn, _, _, _ = traceback.extract_tb(tb, 1)[0]
+ if not fn.startswith(bbdir):
+ break
+ parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb))
+ sys.exit(1)
+ except bb.parse.ParseError as exc:
+ parselog.critical(str(exc))
+ sys.exit(1)
+ return wrapped
+
+@catch_parse_error
+def parse_config_file(fn, data, include=True):
+ return bb.parse.handle(fn, data, include)
+
+@catch_parse_error
+def _inherit(bbclass, data):
+ bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
+ return data
+
+def findConfigFile(configfile, data):
+ search = []
+ bbpath = data.getVar("BBPATH")
+ if bbpath:
+ for i in bbpath.split(":"):
+ search.append(os.path.join(i, "conf", configfile))
+ path = os.getcwd()
+ while path != "/":
+ search.append(os.path.join(path, "conf", configfile))
+ path, _ = os.path.split(path)
+
+ for i in search:
+ if os.path.exists(i):
+ return i
+
+ return None
+
+#
+# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
+# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
+#
+
+def findTopdir():
+ d = bb.data.init()
+ bbpath = None
+ if 'BBPATH' in os.environ:
+ bbpath = os.environ['BBPATH']
+ d.setVar('BBPATH', bbpath)
+
+ layerconf = findConfigFile("bblayers.conf", d)
+ if layerconf:
+ return os.path.dirname(os.path.dirname(layerconf))
+ if bbpath:
+ bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf")
+ if bitbakeconf:
+ return os.path.dirname(os.path.dirname(bitbakeconf))
+ return None
+
+class CookerDataBuilder(object):
+
+ def __init__(self, cookercfg, worker = False):
+
+ self.prefiles = cookercfg.prefile
+ self.postfiles = cookercfg.postfile
+ self.tracking = cookercfg.tracking
+
+ bb.utils.set_context(bb.utils.clean_context())
+ bb.event.set_class_handlers(bb.event.clean_class_handlers())
+ self.basedata = bb.data.init()
+ if self.tracking:
+ self.basedata.enableTracking()
+
+ # Keep a datastore of the initial environment variables and their
+ # values from when BitBake was launched to enable child processes
+ # to use environment variables which have been cleaned from the
+ # BitBake processes env
+ self.savedenv = bb.data.init()
+ for k in cookercfg.env:
+ self.savedenv.setVar(k, cookercfg.env[k])
+
+ filtered_keys = bb.utils.approved_variables()
+ bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
+ self.basedata.setVar("BB_ORIGENV", self.savedenv)
+
+ if worker:
+ self.basedata.setVar("BB_WORKERCONTEXT", "1")
+
+ self.data = self.basedata
+ self.mcdata = {}
+
+ def parseBaseConfiguration(self):
+ try:
+ bb.parse.init_parser(self.basedata)
+ self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
+
+ if self.data.getVar("BB_WORKERCONTEXT", False) is None:
+ bb.fetch.fetcher_init(self.data)
+ bb.codeparser.parser_cache_init(self.data)
+
+ bb.event.fire(bb.event.ConfigParsed(), self.data)
+
+ reparse_cnt = 0
+ while self.data.getVar("BB_INVALIDCONF", False) is True:
+ if reparse_cnt > 20:
+ logger.error("Configuration has been re-parsed over 20 times, "
+ "breaking out of the loop...")
+ raise Exception("Too deep config re-parse loop. Check locations where "
+ "BB_INVALIDCONF is being set (ConfigParsed event handlers)")
+ self.data.setVar("BB_INVALIDCONF", False)
+ self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
+ reparse_cnt += 1
+ bb.event.fire(bb.event.ConfigParsed(), self.data)
+
+ bb.parse.init_parser(self.data)
+ self.data_hash = self.data.get_hash()
+ self.mcdata[''] = self.data
+
+ multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
+ for config in multiconfig:
+ mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
+ bb.event.fire(bb.event.ConfigParsed(), mcdata)
+ self.mcdata[config] = mcdata
+ if multiconfig:
+ bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
+
+ except (SyntaxError, bb.BBHandledException):
+ raise bb.BBHandledException
+ except bb.data_smart.ExpansionError as e:
+ logger.error(str(e))
+ raise bb.BBHandledException
+ except Exception:
+ logger.exception("Error parsing configuration files")
+ raise bb.BBHandledException
+
+ # Create a copy so we can reset at a later date when UIs disconnect
+ self.origdata = self.data
+ self.data = bb.data.createCopy(self.origdata)
+ self.mcdata[''] = self.data
+
+ def reset(self):
+ # We may not have run parseBaseConfiguration() yet
+ if not hasattr(self, 'origdata'):
+ return
+ self.data = bb.data.createCopy(self.origdata)
+ self.mcdata[''] = self.data
+
+ def _findLayerConf(self, data):
+ return findConfigFile("bblayers.conf", data)
+
+ def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"):
+ data = bb.data.createCopy(self.basedata)
+ data.setVar("BB_CURRENT_MC", mc)
+
+ # Parse files for loading *before* bitbake.conf and any includes
+ for f in prefiles:
+ data = parse_config_file(f, data)
+
+ layerconf = self._findLayerConf(data)
+ if layerconf:
+ parselog.debug(2, "Found bblayers.conf (%s)", layerconf)
+ # By definition bblayers.conf is in conf/ of TOPDIR.
+ # We may have been called with cwd somewhere else so reset TOPDIR
+ data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
+ data = parse_config_file(layerconf, data)
+
+ layers = (data.getVar('BBLAYERS') or "").split()
+
+ data = bb.data.createCopy(data)
+ approved = bb.utils.approved_variables()
+ for layer in layers:
+ if not os.path.isdir(layer):
+ parselog.critical("Layer directory '%s' does not exist! "
+ "Please check BBLAYERS in %s" % (layer, layerconf))
+ sys.exit(1)
+ parselog.debug(2, "Adding layer %s", layer)
+ if 'HOME' in approved and '~' in layer:
+ layer = os.path.expanduser(layer)
+ if layer.endswith('/'):
+ layer = layer.rstrip('/')
+ data.setVar('LAYERDIR', layer)
+ data.setVar('LAYERDIR_RE', re.escape(layer))
+ data = parse_config_file(os.path.join(layer, "conf", "layer.conf"), data)
+ data.expandVarref('LAYERDIR')
+ data.expandVarref('LAYERDIR_RE')
+
+ data.delVar('LAYERDIR_RE')
+ data.delVar('LAYERDIR')
+
+ bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
+ collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
+ invalid = []
+ for entry in bbfiles_dynamic:
+ parts = entry.split(":", 1)
+ if len(parts) != 2:
+ invalid.append(entry)
+ continue
+ l, f = parts
+ if l in collections:
+ data.appendVar("BBFILES", " " + f)
+ if invalid:
+ bb.fatal("BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid))
+
+ layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
+ for c in collections:
+ compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
+ if compat and not (compat & layerseries):
+ bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
+ % (c, " ".join(layerseries), " ".join(compat)))
+ elif not compat and not data.getVar("BB_WORKERCONTEXT"):
+ bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c))
+
+ if not data.getVar("BBPATH"):
+ msg = "The BBPATH variable is not set"
+ if not layerconf:
+ msg += (" and bitbake did not find a conf/bblayers.conf file in"
+ " the expected location.\nMaybe you accidentally"
+ " invoked bitbake from the wrong directory?")
+ raise SystemExit(msg)
+
+ data = parse_config_file(os.path.join("conf", "bitbake.conf"), data)
+
+ # Parse files for loading *after* bitbake.conf and any includes
+ for p in postfiles:
+ data = parse_config_file(p, data)
+
+ # Handle any INHERITs and inherit the base class
+ bbclasses = ["base"] + (data.getVar('INHERIT') or "").split()
+ for bbclass in bbclasses:
+ data = _inherit(bbclass, data)
+
+ # Nomally we only register event handlers at the end of parsing .bb files
+ # We register any handlers we've found so far here...
+ for var in data.getVar('__BBHANDLERS', False) or []:
+ handlerfn = data.getVarFlag(var, "filename", False)
+ if not handlerfn:
+ parselog.critical("Undefined event handler function '%s'" % var)
+ sys.exit(1)
+ handlerln = int(data.getVarFlag(var, "lineno", False))
+ bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
+
+ data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
+
+ return data
+
diff --git a/poky/bitbake/lib/bb/daemonize.py b/poky/bitbake/lib/bb/daemonize.py
new file mode 100644
index 000000000..8300d1d0f
--- /dev/null
+++ b/poky/bitbake/lib/bb/daemonize.py
@@ -0,0 +1,82 @@
+"""
+Python Daemonizing helper
+
+Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified
+to allow a function to be daemonized and return for bitbake use by Richard Purdie
+"""
+
+import os
+import sys
+import io
+import traceback
+
+def createDaemon(function, logfile):
+ """
+ Detach a process from the controlling terminal and run it in the
+ background as a daemon, returning control to the caller.
+ """
+
+ try:
+ # Fork a child process so the parent can exit. This returns control to
+ # the command-line or shell. It also guarantees that the child will not
+ # be a process group leader, since the child receives a new process ID
+ # and inherits the parent's process group ID. This step is required
+ # to insure that the next call to os.setsid is successful.
+ pid = os.fork()
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
+
+ if (pid == 0): # The first child.
+ # To become the session leader of this new session and the process group
+ # leader of the new process group, we call os.setsid(). The process is
+ # also guaranteed not to have a controlling terminal.
+ os.setsid()
+ try:
+ # Fork a second child and exit immediately to prevent zombies. This
+ # causes the second child process to be orphaned, making the init
+ # process responsible for its cleanup. And, since the first child is
+ # a session leader without a controlling terminal, it's possible for
+ # it to acquire one by opening a terminal in the future (System V-
+ # based systems). This second fork guarantees that the child is no
+ # longer a session leader, preventing the daemon from ever acquiring
+ # a controlling terminal.
+ pid = os.fork() # Fork a second child.
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
+
+ if (pid != 0):
+ # Parent (the first child) of the second child.
+ # exit() or _exit()?
+ # _exit is like exit(), but it doesn't call any functions registered
+ # with atexit (and on_exit) or any registered signal handlers. It also
+ # closes any open file descriptors. Using exit() may cause all stdio
+ # streams to be flushed twice and any temporary files may be unexpectedly
+ # removed. It's therefore recommended that child branches of a fork()
+ # and the parent branch(es) of a daemon use _exit().
+ os._exit(0)
+ else:
+ os.waitpid(pid, 0)
+ return
+
+ # The second child.
+
+ # Replace standard fds with our own
+ si = open('/dev/null', 'r')
+ os.dup2(si.fileno(), sys.stdin.fileno())
+
+ try:
+ so = open(logfile, 'a+')
+ se = so
+ os.dup2(so.fileno(), sys.stdout.fileno())
+ os.dup2(se.fileno(), sys.stderr.fileno())
+ except io.UnsupportedOperation:
+ sys.stdout = open(logfile, 'a+')
+ sys.stderr = sys.stdout
+
+ try:
+ function()
+ except Exception as e:
+ traceback.print_exc()
+ finally:
+ bb.event.print_ui_queue()
+ os._exit(0)
diff --git a/poky/bitbake/lib/bb/data.py b/poky/bitbake/lib/bb/data.py
new file mode 100644
index 000000000..80a7879cb
--- /dev/null
+++ b/poky/bitbake/lib/bb/data.py
@@ -0,0 +1,403 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Data' implementations
+
+Functions for interacting with the data structure used by the
+BitBake build tools.
+
+The expandKeys and update_data are the most expensive
+operations. At night the cookie monster came by and
+suggested 'give me cookies on setting the variables and
+things will work out'. Taking this suggestion into account
+applying the skills from the not yet passed 'Entwurf und
+Analyse von Algorithmen' lecture and the cookie
+monster seems to be right. We will track setVar more carefully
+to have faster update_data and expandKeys operations.
+
+This is a trade-off between speed and memory again but
+the speed is more critical here.
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2005 Holger Hans Peter Freyther
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import sys, os, re
+if sys.argv[0][-5:] == "pydoc":
+ path = os.path.dirname(os.path.dirname(sys.argv[1]))
+else:
+ path = os.path.dirname(os.path.dirname(sys.argv[0]))
+sys.path.insert(0, path)
+from itertools import groupby
+
+from bb import data_smart
+from bb import codeparser
+import bb
+
+logger = data_smart.logger
+_dict_type = data_smart.DataSmart
+
+def init():
+ """Return a new object representing the Bitbake data"""
+ return _dict_type()
+
+def init_db(parent = None):
+ """Return a new object representing the Bitbake data,
+ optionally based on an existing object"""
+ if parent is not None:
+ return parent.createCopy()
+ else:
+ return _dict_type()
+
+def createCopy(source):
+ """Link the source set to the destination
+ If one does not find the value in the destination set,
+ search will go on to the source set to get the value.
+ Value from source are copy-on-write. i.e. any try to
+ modify one of them will end up putting the modified value
+ in the destination set.
+ """
+ return source.createCopy()
+
+def initVar(var, d):
+ """Non-destructive var init for data structure"""
+ d.initVar(var)
+
+def keys(d):
+ """Return a list of keys in d"""
+ return d.keys()
+
+
+__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
+__expand_python_regexp__ = re.compile(r"\${@.+?}")
+
+def expand(s, d, varname = None):
+ """Variable expansion using the data store"""
+ return d.expand(s, varname)
+
+def expandKeys(alterdata, readdata = None):
+ if readdata == None:
+ readdata = alterdata
+
+ todolist = {}
+ for key in alterdata:
+ if not '${' in key:
+ continue
+
+ ekey = expand(key, readdata)
+ if key == ekey:
+ continue
+ todolist[key] = ekey
+
+ # These two for loops are split for performance to maximise the
+ # usefulness of the expand cache
+ for key in sorted(todolist):
+ ekey = todolist[key]
+ newval = alterdata.getVar(ekey, False)
+ if newval is not None:
+ val = alterdata.getVar(key, False)
+ if val is not None:
+ bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval))
+ alterdata.renameVar(key, ekey)
+
+def inheritFromOS(d, savedenv, permitted):
+ """Inherit variables from the initial environment."""
+ exportlist = bb.utils.preserved_envvars_exported()
+ for s in savedenv.keys():
+ if s in permitted:
+ try:
+ d.setVar(s, savedenv.getVar(s), op = 'from env')
+ if s in exportlist:
+ d.setVarFlag(s, "export", True, op = 'auto env export')
+ except TypeError:
+ pass
+
+def emit_var(var, o=sys.__stdout__, d = init(), all=False):
+ """Emit a variable to be sourced by a shell."""
+ func = d.getVarFlag(var, "func", False)
+ if d.getVarFlag(var, 'python', False) and func:
+ return False
+
+ export = d.getVarFlag(var, "export", False)
+ unexport = d.getVarFlag(var, "unexport", False)
+ if not all and not export and not unexport and not func:
+ return False
+
+ try:
+ if all:
+ oval = d.getVar(var, False)
+ val = d.getVar(var)
+ except (KeyboardInterrupt, bb.build.FuncFailed):
+ raise
+ except Exception as exc:
+ o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
+ return False
+
+ if all:
+ d.varhistory.emit(var, oval, val, o, d)
+
+ if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:
+ return False
+
+ varExpanded = d.expand(var)
+
+ if unexport:
+ o.write('unset %s\n' % varExpanded)
+ return False
+
+ if val is None:
+ return False
+
+ val = str(val)
+
+ if varExpanded.startswith("BASH_FUNC_"):
+ varExpanded = varExpanded[10:-2]
+ val = val[3:] # Strip off "() "
+ o.write("%s() %s\n" % (varExpanded, val))
+ o.write("export -f %s\n" % (varExpanded))
+ return True
+
+ if func:
+ # NOTE: should probably check for unbalanced {} within the var
+ val = val.rstrip('\n')
+ o.write("%s() {\n%s\n}\n" % (varExpanded, val))
+ return 1
+
+ if export:
+ o.write('export ')
+
+ # if we're going to output this within doublequotes,
+ # to a shell, we need to escape the quotes in the var
+ alter = re.sub('"', '\\"', val)
+ alter = re.sub('\n', ' \\\n', alter)
+ alter = re.sub('\\$', '\\\\$', alter)
+ o.write('%s="%s"\n' % (varExpanded, alter))
+ return False
+
+def emit_env(o=sys.__stdout__, d = init(), all=False):
+ """Emits all items in the data store in a format such that it can be sourced by a shell."""
+
+ isfunc = lambda key: bool(d.getVarFlag(key, "func", False))
+ keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
+ grouped = groupby(keys, isfunc)
+ for isfunc, keys in grouped:
+ for key in sorted(keys):
+ emit_var(key, o, d, all and not isfunc) and o.write('\n')
+
+def exported_keys(d):
+ return (key for key in d.keys() if not key.startswith('__') and
+ d.getVarFlag(key, 'export', False) and
+ not d.getVarFlag(key, 'unexport', False))
+
+def exported_vars(d):
+ k = list(exported_keys(d))
+ for key in k:
+ try:
+ value = d.getVar(key)
+ except Exception as err:
+ bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err))
+ continue
+
+ if value is not None:
+ yield key, str(value)
+
+def emit_func(func, o=sys.__stdout__, d = init()):
+ """Emits all items in the data store in a format such that it can be sourced by a shell."""
+
+ keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False))
+ for key in sorted(keys):
+ emit_var(key, o, d, False)
+
+ o.write('\n')
+ emit_var(func, o, d, False) and o.write('\n')
+ newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
+ newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
+ seen = set()
+ while newdeps:
+ deps = newdeps
+ seen |= deps
+ newdeps = set()
+ for dep in deps:
+ if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
+ emit_var(dep, o, d, False) and o.write('\n')
+ newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep))
+ newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
+ newdeps -= seen
+
+_functionfmt = """
+def {function}(d):
+{body}"""
+
+def emit_func_python(func, o=sys.__stdout__, d = init()):
+ """Emits all items in the data store in a format such that it can be sourced by a shell."""
+
+ def write_func(func, o, call = False):
+ body = d.getVar(func, False)
+ if not body.startswith("def"):
+ body = _functionfmt.format(function=func, body=body)
+
+ o.write(body.strip() + "\n\n")
+ if call:
+ o.write(func + "(d)" + "\n\n")
+
+ write_func(func, o, True)
+ pp = bb.codeparser.PythonParser(func, logger)
+ pp.parse_python(d.getVar(func, False))
+ newdeps = pp.execs
+ newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
+ seen = set()
+ while newdeps:
+ deps = newdeps
+ seen |= deps
+ newdeps = set()
+ for dep in deps:
+ if d.getVarFlag(dep, "func", False) and d.getVarFlag(dep, "python", False):
+ write_func(dep, o)
+ pp = bb.codeparser.PythonParser(dep, logger)
+ pp.parse_python(d.getVar(dep, False))
+ newdeps |= pp.execs
+ newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
+ newdeps -= seen
+
+def update_data(d):
+ """Performs final steps upon the datastore, including application of overrides"""
+ d.finalize(parent = True)
+
+def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
+ deps = set()
+ try:
+ if key[-1] == ']':
+ vf = key[:-1].split('[')
+ value = d.getVarFlag(vf[0], vf[1], False)
+ parser = d.expandWithRefs(value, key)
+ deps |= parser.references
+ deps = deps | (keys & parser.execs)
+ return deps, value
+ varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
+ vardeps = varflags.get("vardeps")
+ value = d.getVarFlag(key, "_content", False)
+
+ def handle_contains(value, contains, d):
+ newvalue = ""
+ for k in sorted(contains):
+ l = (d.getVar(k) or "").split()
+ for item in sorted(contains[k]):
+ for word in item.split():
+ if not word in l:
+ newvalue += "\n%s{%s} = Unset" % (k, item)
+ break
+ else:
+ newvalue += "\n%s{%s} = Set" % (k, item)
+ if not newvalue:
+ return value
+ if not value:
+ return newvalue
+ return value + newvalue
+
+ if "vardepvalue" in varflags:
+ value = varflags.get("vardepvalue")
+ elif varflags.get("func"):
+ if varflags.get("python"):
+ parser = bb.codeparser.PythonParser(key, logger)
+ if value and "\t" in value:
+ logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE")))
+ parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
+ deps = deps | parser.references
+ deps = deps | (keys & parser.execs)
+ value = handle_contains(value, parser.contains, d)
+ else:
+ parsedvar = d.expandWithRefs(value, key)
+ parser = bb.codeparser.ShellParser(key, logger)
+ parser.parse_shell(parsedvar.value)
+ deps = deps | shelldeps
+ deps = deps | parsedvar.references
+ deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
+ value = handle_contains(value, parsedvar.contains, d)
+ if vardeps is None:
+ parser.log.flush()
+ if "prefuncs" in varflags:
+ deps = deps | set(varflags["prefuncs"].split())
+ if "postfuncs" in varflags:
+ deps = deps | set(varflags["postfuncs"].split())
+ if "exports" in varflags:
+ deps = deps | set(varflags["exports"].split())
+ else:
+ parser = d.expandWithRefs(value, key)
+ deps |= parser.references
+ deps = deps | (keys & parser.execs)
+ value = handle_contains(value, parser.contains, d)
+
+ if "vardepvalueexclude" in varflags:
+ exclude = varflags.get("vardepvalueexclude")
+ for excl in exclude.split('|'):
+ if excl:
+ value = value.replace(excl, '')
+
+ # Add varflags, assuming an exclusion list is set
+ if varflagsexcl:
+ varfdeps = []
+ for f in varflags:
+ if f not in varflagsexcl:
+ varfdeps.append('%s[%s]' % (key, f))
+ if varfdeps:
+ deps |= set(varfdeps)
+
+ deps |= set((vardeps or "").split())
+ deps -= set(varflags.get("vardepsexclude", "").split())
+ except bb.parse.SkipRecipe:
+ raise
+ except Exception as e:
+ bb.warn("Exception during build_dependencies for %s" % key)
+ raise
+ return deps, value
+ #bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
+ #d.setVarFlag(key, "vardeps", deps)
+
+def generate_dependencies(d):
+
+ keys = set(key for key in d if not key.startswith("__"))
+ shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
+ varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
+
+ deps = {}
+ values = {}
+
+ tasklist = d.getVar('__BBTASKS', False) or []
+ for task in tasklist:
+ deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d)
+ newdeps = deps[task]
+ seen = set()
+ while newdeps:
+ nextdeps = newdeps
+ seen |= nextdeps
+ newdeps = set()
+ for dep in nextdeps:
+ if dep not in deps:
+ deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d)
+ newdeps |= deps[dep]
+ newdeps -= seen
+ #print "For %s: %s" % (task, str(deps[task]))
+ return tasklist, deps, values
+
+def inherits_class(klass, d):
+ val = d.getVar('__inherit_cache', False) or []
+ needle = os.path.join('classes', '%s.bbclass' % klass)
+ for v in val:
+ if v.endswith(needle):
+ return True
+ return False
diff --git a/poky/bitbake/lib/bb/data_smart.py b/poky/bitbake/lib/bb/data_smart.py
new file mode 100644
index 000000000..7b09af5cf
--- /dev/null
+++ b/poky/bitbake/lib/bb/data_smart.py
@@ -0,0 +1,1037 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake Smart Dictionary Implementation
+
+Functions for interacting with the data structure used by the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2004, 2005 Seb Frankengul
+# Copyright (C) 2005, 2006 Holger Hans Peter Freyther
+# Copyright (C) 2005 Uli Luckas
+# Copyright (C) 2005 ROAD GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import copy, re, sys, traceback
+from collections import MutableMapping
+import logging
+import hashlib
+import bb, bb.codeparser
+from bb import utils
+from bb.COW import COWDictBase
+
+logger = logging.getLogger("BitBake.Data")
+
+__setvar_keyword__ = ["_append", "_prepend", "_remove"]
+__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
+__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
+__expand_python_regexp__ = re.compile(r"\${@.+?}")
+
+def infer_caller_details(loginfo, parent = False, varval = True):
+ """Save the caller the trouble of specifying everything."""
+ # Save effort.
+ if 'ignore' in loginfo and loginfo['ignore']:
+ return
+ # If nothing was provided, mark this as possibly unneeded.
+ if not loginfo:
+ loginfo['ignore'] = True
+ return
+ # Infer caller's likely values for variable (var) and value (value),
+ # to reduce clutter in the rest of the code.
+ above = None
+ def set_above():
+ try:
+ raise Exception
+ except Exception:
+ tb = sys.exc_info()[2]
+ if parent:
+ return tb.tb_frame.f_back.f_back.f_back
+ else:
+ return tb.tb_frame.f_back.f_back
+
+ if varval and ('variable' not in loginfo or 'detail' not in loginfo):
+ if not above:
+ above = set_above()
+ lcls = above.f_locals.items()
+ for k, v in lcls:
+ if k == 'value' and 'detail' not in loginfo:
+ loginfo['detail'] = v
+ if k == 'var' and 'variable' not in loginfo:
+ loginfo['variable'] = v
+ # Infer file/line/function from traceback
+ # Don't use traceback.extract_stack() since it fills the line contents which
+ # we don't need and that hits stat syscalls
+ if 'file' not in loginfo:
+ if not above:
+ above = set_above()
+ f = above.f_back
+ line = f.f_lineno
+ file = f.f_code.co_filename
+ func = f.f_code.co_name
+ loginfo['file'] = file
+ loginfo['line'] = line
+ if func not in loginfo:
+ loginfo['func'] = func
+
+class VariableParse:
+ def __init__(self, varname, d, val = None):
+ self.varname = varname
+ self.d = d
+ self.value = val
+
+ self.references = set()
+ self.execs = set()
+ self.contains = {}
+
+ def var_sub(self, match):
+ key = match.group()[2:-1]
+ if self.varname and key:
+ if self.varname == key:
+ raise Exception("variable %s references itself!" % self.varname)
+ if key in self.d.expand_cache:
+ varparse = self.d.expand_cache[key]
+ var = varparse.value
+ else:
+ var = self.d.getVarFlag(key, "_content")
+ self.references.add(key)
+ if var is not None:
+ return var
+ else:
+ return match.group()
+
+ def python_sub(self, match):
+ if isinstance(match, str):
+ code = match
+ else:
+ code = match.group()[3:-1]
+
+ if "_remote_data" in self.d:
+ connector = self.d["_remote_data"]
+ return connector.expandPythonRef(self.varname, code, self.d)
+
+ codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
+
+ parser = bb.codeparser.PythonParser(self.varname, logger)
+ parser.parse_python(code)
+ if self.varname:
+ vardeps = self.d.getVarFlag(self.varname, "vardeps")
+ if vardeps is None:
+ parser.log.flush()
+ else:
+ parser.log.flush()
+ self.references |= parser.references
+ self.execs |= parser.execs
+
+ for k in parser.contains:
+ if k not in self.contains:
+ self.contains[k] = parser.contains[k].copy()
+ else:
+ self.contains[k].update(parser.contains[k])
+ value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
+ return str(value)
+
+
+class DataContext(dict):
+ def __init__(self, metadata, **kwargs):
+ self.metadata = metadata
+ dict.__init__(self, **kwargs)
+ self['d'] = metadata
+
+ def __missing__(self, key):
+ value = self.metadata.getVar(key)
+ if value is None or self.metadata.getVarFlag(key, 'func', False):
+ raise KeyError(key)
+ else:
+ return value
+
+class ExpansionError(Exception):
+ def __init__(self, varname, expression, exception):
+ self.expression = expression
+ self.variablename = varname
+ self.exception = exception
+ if varname:
+ if expression:
+ self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception)
+ else:
+ self.msg = "Failure expanding variable %s: %s: %s" % (varname, type(exception).__name__, exception)
+ else:
+ self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception)
+ Exception.__init__(self, self.msg)
+ self.args = (varname, expression, exception)
+ def __str__(self):
+ return self.msg
+
+class IncludeHistory(object):
+ def __init__(self, parent = None, filename = '[TOP LEVEL]'):
+ self.parent = parent
+ self.filename = filename
+ self.children = []
+ self.current = self
+
+ def copy(self):
+ new = IncludeHistory(self.parent, self.filename)
+ for c in self.children:
+ new.children.append(c)
+ return new
+
+ def include(self, filename):
+ newfile = IncludeHistory(self.current, filename)
+ self.current.children.append(newfile)
+ self.current = newfile
+ return self
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, a, b, c):
+ if self.current.parent:
+ self.current = self.current.parent
+ else:
+ bb.warn("Include log: Tried to finish '%s' at top level." % filename)
+ return False
+
+ def emit(self, o, level = 0):
+ """Emit an include history file, and its children."""
+ if level:
+ spaces = " " * (level - 1)
+ o.write("# %s%s" % (spaces, self.filename))
+ if len(self.children) > 0:
+ o.write(" includes:")
+ else:
+ o.write("#\n# INCLUDE HISTORY:\n#")
+ level = level + 1
+ for child in self.children:
+ o.write("\n")
+ child.emit(o, level)
+
+class VariableHistory(object):
+ def __init__(self, dataroot):
+ self.dataroot = dataroot
+ self.variables = COWDictBase.copy()
+
+ def copy(self):
+ new = VariableHistory(self.dataroot)
+ new.variables = self.variables.copy()
+ return new
+
+ def __getstate__(self):
+ vardict = {}
+ for k, v in self.variables.iteritems():
+ vardict[k] = v
+ return {'dataroot': self.dataroot,
+ 'variables': vardict}
+
+ def __setstate__(self, state):
+ self.dataroot = state['dataroot']
+ self.variables = COWDictBase.copy()
+ for k, v in state['variables'].items():
+ self.variables[k] = v
+
+ def record(self, *kwonly, **loginfo):
+ if not self.dataroot._tracking:
+ return
+ if len(kwonly) > 0:
+ raise TypeError
+ infer_caller_details(loginfo, parent = True)
+ if 'ignore' in loginfo and loginfo['ignore']:
+ return
+ if 'op' not in loginfo or not loginfo['op']:
+ loginfo['op'] = 'set'
+ if 'detail' in loginfo:
+ loginfo['detail'] = str(loginfo['detail'])
+ if 'variable' not in loginfo or 'file' not in loginfo:
+ raise ValueError("record() missing variable or file.")
+ var = loginfo['variable']
+
+ if var not in self.variables:
+ self.variables[var] = []
+ if not isinstance(self.variables[var], list):
+ return
+ if 'nodups' in loginfo and loginfo in self.variables[var]:
+ return
+ self.variables[var].append(loginfo.copy())
+
+ def variable(self, var):
+ remote_connector = self.dataroot.getVar('_remote_data', False)
+ if remote_connector:
+ varhistory = remote_connector.getVarHistory(var)
+ else:
+ varhistory = []
+
+ if var in self.variables:
+ varhistory.extend(self.variables[var])
+ return varhistory
+
+ def emit(self, var, oval, val, o, d):
+ history = self.variable(var)
+
+ # Append override history
+ if var in d.overridedata:
+ for (r, override) in d.overridedata[var]:
+ for event in self.variable(r):
+ loginfo = event.copy()
+ if 'flag' in loginfo and not loginfo['flag'].startswith("_"):
+ continue
+ loginfo['variable'] = var
+ loginfo['op'] = 'override[%s]:%s' % (override, loginfo['op'])
+ history.append(loginfo)
+
+ commentVal = re.sub('\n', '\n#', str(oval))
+ if history:
+ if len(history) == 1:
+ o.write("#\n# $%s\n" % var)
+ else:
+ o.write("#\n# $%s [%d operations]\n" % (var, len(history)))
+ for event in history:
+ # o.write("# %s\n" % str(event))
+ if 'func' in event:
+ # If we have a function listed, this is internal
+ # code, not an operation in a config file, and the
+ # full path is distracting.
+ event['file'] = re.sub('.*/', '', event['file'])
+ display_func = ' [%s]' % event['func']
+ else:
+ display_func = ''
+ if 'flag' in event:
+ flag = '[%s] ' % (event['flag'])
+ else:
+ flag = ''
+ o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
+ if len(history) > 1:
+ o.write("# pre-expansion value:\n")
+ o.write('# "%s"\n' % (commentVal))
+ else:
+ o.write("#\n# $%s\n# [no history recorded]\n#\n" % var)
+ o.write('# "%s"\n' % (commentVal))
+
+ def get_variable_files(self, var):
+ """Get the files where operations are made on a variable"""
+ var_history = self.variable(var)
+ files = []
+ for event in var_history:
+ files.append(event['file'])
+ return files
+
+ def get_variable_lines(self, var, f):
+ """Get the line where a operation is made on a variable in file f"""
+ var_history = self.variable(var)
+ lines = []
+ for event in var_history:
+ if f== event['file']:
+ line = event['line']
+ lines.append(line)
+ return lines
+
+ def get_variable_items_files(self, var, d):
+ """
+ Use variable history to map items added to a list variable and
+ the files in which they were added.
+ """
+ history = self.variable(var)
+ finalitems = (d.getVar(var) or '').split()
+ filemap = {}
+ isset = False
+ for event in history:
+ if 'flag' in event:
+ continue
+ if event['op'] == '_remove':
+ continue
+ if isset and event['op'] == 'set?':
+ continue
+ isset = True
+ items = d.expand(event['detail']).split()
+ for item in items:
+ # This is a little crude but is belt-and-braces to avoid us
+ # having to handle every possible operation type specifically
+ if item in finalitems and not item in filemap:
+ filemap[item] = event['file']
+ return filemap
+
+ def del_var_history(self, var, f=None, line=None):
+ """If file f and line are not given, the entire history of var is deleted"""
+ if var in self.variables:
+ if f and line:
+ self.variables[var] = [ x for x in self.variables[var] if x['file']!=f and x['line']!=line]
+ else:
+ self.variables[var] = []
+
+class DataSmart(MutableMapping):
+ def __init__(self):
+ self.dict = {}
+
+ self.inchistory = IncludeHistory()
+ self.varhistory = VariableHistory(self)
+ self._tracking = False
+
+ self.expand_cache = {}
+
+ # cookie monster tribute
+ # Need to be careful about writes to overridedata as
+ # its only a shallow copy, could influence other data store
+ # copies!
+ self.overridedata = {}
+ self.overrides = None
+ self.overridevars = set(["OVERRIDES", "FILE"])
+ self.inoverride = False
+
+ def enableTracking(self):
+ self._tracking = True
+
+ def disableTracking(self):
+ self._tracking = False
+
+ def expandWithRefs(self, s, varname):
+
+ if not isinstance(s, str): # sanity check
+ return VariableParse(varname, self, s)
+
+ if varname and varname in self.expand_cache:
+ return self.expand_cache[varname]
+
+ varparse = VariableParse(varname, self)
+
+ while s.find('${') != -1:
+ olds = s
+ try:
+ s = __expand_var_regexp__.sub(varparse.var_sub, s)
+ try:
+ s = __expand_python_regexp__.sub(varparse.python_sub, s)
+ except SyntaxError as e:
+ # Likely unmatched brackets, just don't expand the expression
+ if e.msg != "EOL while scanning string literal":
+ raise
+ if s == olds:
+ break
+ except ExpansionError:
+ raise
+ except bb.parse.SkipRecipe:
+ raise
+ except Exception as exc:
+ raise ExpansionError(varname, s, exc) from exc
+
+ varparse.value = s
+
+ if varname:
+ self.expand_cache[varname] = varparse
+
+ return varparse
+
+ def expand(self, s, varname = None):
+ return self.expandWithRefs(s, varname).value
+
+ def finalize(self, parent = False):
+ return
+
+ def internal_finalize(self, parent = False):
+ """Performs final steps upon the datastore, including application of overrides"""
+ self.overrides = None
+
+ def need_overrides(self):
+ if self.overrides is not None:
+ return
+ if self.inoverride:
+ return
+ for count in range(5):
+ self.inoverride = True
+ # Can end up here recursively so setup dummy values
+ self.overrides = []
+ self.overridesset = set()
+ self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
+ self.overridesset = set(self.overrides)
+ self.inoverride = False
+ self.expand_cache = {}
+ newoverrides = (self.getVar("OVERRIDES") or "").split(":") or []
+ if newoverrides == self.overrides:
+ break
+ self.overrides = newoverrides
+ self.overridesset = set(self.overrides)
+ else:
+ bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work.")
+
+ def initVar(self, var):
+ self.expand_cache = {}
+ if not var in self.dict:
+ self.dict[var] = {}
+
+ def _findVar(self, var):
+ dest = self.dict
+ while dest:
+ if var in dest:
+ return dest[var], self.overridedata.get(var, None)
+
+ if "_remote_data" in dest:
+ connector = dest["_remote_data"]["_content"]
+ return connector.getVar(var)
+
+ if "_data" not in dest:
+ break
+ dest = dest["_data"]
+ return None, self.overridedata.get(var, None)
+
+ def _makeShadowCopy(self, var):
+ if var in self.dict:
+ return
+
+ local_var, _ = self._findVar(var)
+
+ if local_var:
+ self.dict[var] = copy.copy(local_var)
+ else:
+ self.initVar(var)
+
+
+ def setVar(self, var, value, **loginfo):
+ #print("var=" + str(var) + " val=" + str(value))
+ parsing=False
+ if 'parsing' in loginfo:
+ parsing=True
+
+ if '_remote_data' in self.dict:
+ connector = self.dict["_remote_data"]["_content"]
+ res = connector.setVar(var, value)
+ if not res:
+ return
+
+ if 'op' not in loginfo:
+ loginfo['op'] = "set"
+ self.expand_cache = {}
+ match = __setvar_regexp__.match(var)
+ if match and match.group("keyword") in __setvar_keyword__:
+ base = match.group('base')
+ keyword = match.group("keyword")
+ override = match.group('add')
+ l = self.getVarFlag(base, keyword, False) or []
+ l.append([value, override])
+ self.setVarFlag(base, keyword, l, ignore=True)
+ # And cause that to be recorded:
+ loginfo['detail'] = value
+ loginfo['variable'] = base
+ if override:
+ loginfo['op'] = '%s[%s]' % (keyword, override)
+ else:
+ loginfo['op'] = keyword
+ self.varhistory.record(**loginfo)
+ # todo make sure keyword is not __doc__ or __module__
+ # pay the cookie monster
+
+ # more cookies for the cookie monster
+ if '_' in var:
+ self._setvar_update_overrides(base, **loginfo)
+
+ if base in self.overridevars:
+ self._setvar_update_overridevars(var, value)
+ return
+
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ if not parsing:
+ if "_append" in self.dict[var]:
+ del self.dict[var]["_append"]
+ if "_prepend" in self.dict[var]:
+ del self.dict[var]["_prepend"]
+ if "_remove" in self.dict[var]:
+ del self.dict[var]["_remove"]
+ if var in self.overridedata:
+ active = []
+ self.need_overrides()
+ for (r, o) in self.overridedata[var]:
+ if o in self.overridesset:
+ active.append(r)
+ elif "_" in o:
+ if set(o.split("_")).issubset(self.overridesset):
+ active.append(r)
+ for a in active:
+ self.delVar(a)
+ del self.overridedata[var]
+
+ # more cookies for the cookie monster
+ if '_' in var:
+ self._setvar_update_overrides(var, **loginfo)
+
+ # setting var
+ self.dict[var]["_content"] = value
+ self.varhistory.record(**loginfo)
+
+ if var in self.overridevars:
+ self._setvar_update_overridevars(var, value)
+
+ def _setvar_update_overridevars(self, var, value):
+ vardata = self.expandWithRefs(value, var)
+ new = vardata.references
+ new.update(vardata.contains.keys())
+ while not new.issubset(self.overridevars):
+ nextnew = set()
+ self.overridevars.update(new)
+ for i in new:
+ vardata = self.expandWithRefs(self.getVar(i), i)
+ nextnew.update(vardata.references)
+ nextnew.update(vardata.contains.keys())
+ new = nextnew
+ self.internal_finalize(True)
+
+ def _setvar_update_overrides(self, var, **loginfo):
+ # aka pay the cookie monster
+ override = var[var.rfind('_')+1:]
+ shortvar = var[:var.rfind('_')]
+ while override and override.islower():
+ if shortvar not in self.overridedata:
+ self.overridedata[shortvar] = []
+ if [var, override] not in self.overridedata[shortvar]:
+ # Force CoW by recreating the list first
+ self.overridedata[shortvar] = list(self.overridedata[shortvar])
+ self.overridedata[shortvar].append([var, override])
+ override = None
+ if "_" in shortvar:
+ override = var[shortvar.rfind('_')+1:]
+ shortvar = var[:shortvar.rfind('_')]
+ if len(shortvar) == 0:
+ override = None
+
+ def getVar(self, var, expand=True, noweakdefault=False, parsing=False):
+ return self.getVarFlag(var, "_content", expand, noweakdefault, parsing)
+
+ def renameVar(self, key, newkey, **loginfo):
+ """
+ Rename the variable key to newkey
+ """
+ if '_remote_data' in self.dict:
+ connector = self.dict["_remote_data"]["_content"]
+ res = connector.renameVar(key, newkey)
+ if not res:
+ return
+
+ val = self.getVar(key, 0, parsing=True)
+ if val is not None:
+ loginfo['variable'] = newkey
+ loginfo['op'] = 'rename from %s' % key
+ loginfo['detail'] = val
+ self.varhistory.record(**loginfo)
+ self.setVar(newkey, val, ignore=True, parsing=True)
+
+ for i in (__setvar_keyword__):
+ src = self.getVarFlag(key, i, False)
+ if src is None:
+ continue
+
+ dest = self.getVarFlag(newkey, i, False) or []
+ dest.extend(src)
+ self.setVarFlag(newkey, i, dest, ignore=True)
+
+ if key in self.overridedata:
+ self.overridedata[newkey] = []
+ for (v, o) in self.overridedata[key]:
+ self.overridedata[newkey].append([v.replace(key, newkey), o])
+ self.renameVar(v, v.replace(key, newkey))
+
+ if '_' in newkey and val is None:
+ self._setvar_update_overrides(newkey, **loginfo)
+
+ loginfo['variable'] = key
+ loginfo['op'] = 'rename (to)'
+ loginfo['detail'] = newkey
+ self.varhistory.record(**loginfo)
+ self.delVar(key, ignore=True)
+
+ def appendVar(self, var, value, **loginfo):
+ loginfo['op'] = 'append'
+ self.varhistory.record(**loginfo)
+ self.setVar(var + "_append", value, ignore=True, parsing=True)
+
+ def prependVar(self, var, value, **loginfo):
+ loginfo['op'] = 'prepend'
+ self.varhistory.record(**loginfo)
+ self.setVar(var + "_prepend", value, ignore=True, parsing=True)
+
+ def delVar(self, var, **loginfo):
+ if '_remote_data' in self.dict:
+ connector = self.dict["_remote_data"]["_content"]
+ res = connector.delVar(var)
+ if not res:
+ return
+
+ loginfo['detail'] = ""
+ loginfo['op'] = 'del'
+ self.varhistory.record(**loginfo)
+ self.expand_cache = {}
+ self.dict[var] = {}
+ if var in self.overridedata:
+ del self.overridedata[var]
+ if '_' in var:
+ override = var[var.rfind('_')+1:]
+ shortvar = var[:var.rfind('_')]
+ while override and override.islower():
+ try:
+ if shortvar in self.overridedata:
+ # Force CoW by recreating the list first
+ self.overridedata[shortvar] = list(self.overridedata[shortvar])
+ self.overridedata[shortvar].remove([var, override])
+ except ValueError as e:
+ pass
+ override = None
+ if "_" in shortvar:
+ override = var[shortvar.rfind('_')+1:]
+ shortvar = var[:shortvar.rfind('_')]
+ if len(shortvar) == 0:
+ override = None
+
+ def setVarFlag(self, var, flag, value, **loginfo):
+ if '_remote_data' in self.dict:
+ connector = self.dict["_remote_data"]["_content"]
+ res = connector.setVarFlag(var, flag, value)
+ if not res:
+ return
+
+ self.expand_cache = {}
+ if 'op' not in loginfo:
+ loginfo['op'] = "set"
+ loginfo['flag'] = flag
+ self.varhistory.record(**loginfo)
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+ self.dict[var][flag] = value
+
+ if flag == "_defaultval" and '_' in var:
+ self._setvar_update_overrides(var, **loginfo)
+ if flag == "_defaultval" and var in self.overridevars:
+ self._setvar_update_overridevars(var, value)
+
+ if flag == "unexport" or flag == "export":
+ if not "__exportlist" in self.dict:
+ self._makeShadowCopy("__exportlist")
+ if not "_content" in self.dict["__exportlist"]:
+ self.dict["__exportlist"]["_content"] = set()
+ self.dict["__exportlist"]["_content"].add(var)
+
+ def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False):
+ local_var, overridedata = self._findVar(var)
+ value = None
+ if flag == "_content" and overridedata is not None and not parsing:
+ match = False
+ active = {}
+ self.need_overrides()
+ for (r, o) in overridedata:
+ # What about double overrides both with "_" in the name?
+ if o in self.overridesset:
+ active[o] = r
+ elif "_" in o:
+ if set(o.split("_")).issubset(self.overridesset):
+ active[o] = r
+
+ mod = True
+ while mod:
+ mod = False
+ for o in self.overrides:
+ for a in active.copy():
+ if a.endswith("_" + o):
+ t = active[a]
+ del active[a]
+ active[a.replace("_" + o, "")] = t
+ mod = True
+ elif a == o:
+ match = active[a]
+ del active[a]
+ if match:
+ value = self.getVar(match, False)
+
+ if local_var is not None and value is None:
+ if flag in local_var:
+ value = copy.copy(local_var[flag])
+ elif flag == "_content" and "_defaultval" in local_var and not noweakdefault:
+ value = copy.copy(local_var["_defaultval"])
+
+
+ if flag == "_content" and local_var is not None and "_append" in local_var and not parsing:
+ if not value:
+ value = ""
+ self.need_overrides()
+ for (r, o) in local_var["_append"]:
+ match = True
+ if o:
+ for o2 in o.split("_"):
+ if not o2 in self.overrides:
+ match = False
+ if match:
+ value = value + r
+
+ if flag == "_content" and local_var is not None and "_prepend" in local_var and not parsing:
+ if not value:
+ value = ""
+ self.need_overrides()
+ for (r, o) in local_var["_prepend"]:
+
+ match = True
+ if o:
+ for o2 in o.split("_"):
+ if not o2 in self.overrides:
+ match = False
+ if match:
+ value = r + value
+
+ if expand and value:
+ # Only getvar (flag == _content) hits the expand cache
+ cachename = None
+ if flag == "_content":
+ cachename = var
+ else:
+ cachename = var + "[" + flag + "]"
+ value = self.expand(value, cachename)
+
+ if value and flag == "_content" and local_var is not None and "_remove" in local_var:
+ removes = []
+ self.need_overrides()
+ for (r, o) in local_var["_remove"]:
+ match = True
+ if o:
+ for o2 in o.split("_"):
+ if not o2 in self.overrides:
+ match = False
+ if match:
+ removes.extend(self.expand(r).split())
+
+ if removes:
+ filtered = filter(lambda v: v not in removes,
+ value.split())
+ value = " ".join(filtered)
+ if expand and var in self.expand_cache:
+ # We need to ensure the expand cache has the correct value
+ # flag == "_content" here
+ self.expand_cache[var].value = value
+ return value
+
+ def delVarFlag(self, var, flag, **loginfo):
+ if '_remote_data' in self.dict:
+ connector = self.dict["_remote_data"]["_content"]
+ res = connector.delVarFlag(var, flag)
+ if not res:
+ return
+
+ self.expand_cache = {}
+ local_var, _ = self._findVar(var)
+ if not local_var:
+ return
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ if var in self.dict and flag in self.dict[var]:
+ loginfo['detail'] = ""
+ loginfo['op'] = 'delFlag'
+ loginfo['flag'] = flag
+ self.varhistory.record(**loginfo)
+
+ del self.dict[var][flag]
+
+ def appendVarFlag(self, var, flag, value, **loginfo):
+ loginfo['op'] = 'append'
+ loginfo['flag'] = flag
+ self.varhistory.record(**loginfo)
+ newvalue = (self.getVarFlag(var, flag, False) or "") + value
+ self.setVarFlag(var, flag, newvalue, ignore=True)
+
+ def prependVarFlag(self, var, flag, value, **loginfo):
+ loginfo['op'] = 'prepend'
+ loginfo['flag'] = flag
+ self.varhistory.record(**loginfo)
+ newvalue = value + (self.getVarFlag(var, flag, False) or "")
+ self.setVarFlag(var, flag, newvalue, ignore=True)
+
+ def setVarFlags(self, var, flags, **loginfo):
+ self.expand_cache = {}
+ infer_caller_details(loginfo)
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ for i in flags:
+ if i == "_content":
+ continue
+ loginfo['flag'] = i
+ loginfo['detail'] = flags[i]
+ self.varhistory.record(**loginfo)
+ self.dict[var][i] = flags[i]
+
+ def getVarFlags(self, var, expand = False, internalflags=False):
+ local_var, _ = self._findVar(var)
+ flags = {}
+
+ if local_var:
+ for i in local_var:
+ if i.startswith("_") and not internalflags:
+ continue
+ flags[i] = local_var[i]
+ if expand and i in expand:
+ flags[i] = self.expand(flags[i], var + "[" + i + "]")
+ if len(flags) == 0:
+ return None
+ return flags
+
+
+ def delVarFlags(self, var, **loginfo):
+ self.expand_cache = {}
+ if not var in self.dict:
+ self._makeShadowCopy(var)
+
+ if var in self.dict:
+ content = None
+
+ loginfo['op'] = 'delete flags'
+ self.varhistory.record(**loginfo)
+
+ # try to save the content
+ if "_content" in self.dict[var]:
+ content = self.dict[var]["_content"]
+ self.dict[var] = {}
+ self.dict[var]["_content"] = content
+ else:
+ del self.dict[var]
+
+ def createCopy(self):
+ """
+ Create a copy of self by setting _data to self
+ """
+ # we really want this to be a DataSmart...
+ data = DataSmart()
+ data.dict["_data"] = self.dict
+ data.varhistory = self.varhistory.copy()
+ data.varhistory.dataroot = data
+ data.inchistory = self.inchistory.copy()
+
+ data._tracking = self._tracking
+
+ data.overrides = None
+ data.overridevars = copy.copy(self.overridevars)
+ # Should really be a deepcopy but has heavy overhead.
+ # Instead, we're careful with writes.
+ data.overridedata = copy.copy(self.overridedata)
+
+ return data
+
+ def expandVarref(self, variable, parents=False):
+ """Find all references to variable in the data and expand it
+ in place, optionally descending to parent datastores."""
+
+ if parents:
+ keys = iter(self)
+ else:
+ keys = self.localkeys()
+
+ ref = '${%s}' % variable
+ value = self.getVar(variable, False)
+ for key in keys:
+ referrervalue = self.getVar(key, False)
+ if referrervalue and ref in referrervalue:
+ self.setVar(key, referrervalue.replace(ref, value))
+
+ def localkeys(self):
+ for key in self.dict:
+ if key not in ['_data', '_remote_data']:
+ yield key
+
+ def __iter__(self):
+ deleted = set()
+ overrides = set()
+ def keylist(d):
+ klist = set()
+ for key in d:
+ if key in ["_data", "_remote_data"]:
+ continue
+ if key in deleted:
+ continue
+ if key in overrides:
+ continue
+ if not d[key]:
+ deleted.add(key)
+ continue
+ klist.add(key)
+
+ if "_data" in d:
+ klist |= keylist(d["_data"])
+
+ if "_remote_data" in d:
+ connector = d["_remote_data"]["_content"]
+ for key in connector.getKeys():
+ if key in deleted:
+ continue
+ klist.add(key)
+
+ return klist
+
+ self.need_overrides()
+ for var in self.overridedata:
+ for (r, o) in self.overridedata[var]:
+ if o in self.overridesset:
+ overrides.add(var)
+ elif "_" in o:
+ if set(o.split("_")).issubset(self.overridesset):
+ overrides.add(var)
+
+ for k in keylist(self.dict):
+ yield k
+
+ for k in overrides:
+ yield k
+
+ def __len__(self):
+ return len(frozenset(iter(self)))
+
+ def __getitem__(self, item):
+ value = self.getVar(item, False)
+ if value is None:
+ raise KeyError(item)
+ else:
+ return value
+
+ def __setitem__(self, var, value):
+ self.setVar(var, value)
+
+ def __delitem__(self, var):
+ self.delVar(var)
+
+ def get_hash(self):
+ data = {}
+ d = self.createCopy()
+ bb.data.expandKeys(d)
+
+ config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split())
+ keys = set(key for key in iter(d) if not key.startswith("__"))
+ for key in keys:
+ if key in config_whitelist:
+ continue
+
+ value = d.getVar(key, False) or ""
+ data.update({key:value})
+
+ varflags = d.getVarFlags(key, internalflags = True)
+ if not varflags:
+ continue
+ for f in varflags:
+ if f == "_content":
+ continue
+ data.update({'%s[%s]' % (key, f):varflags[f]})
+
+ for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
+ bb_list = d.getVar(key, False) or []
+ data.update({key:str(bb_list)})
+
+ if key == "__BBANONFUNCS":
+ for i in bb_list:
+ value = d.getVar(i, False) or ""
+ data.update({i:value})
+
+ data_str = str([(k, data[k]) for k in sorted(data.keys())])
+ return hashlib.md5(data_str.encode("utf-8")).hexdigest()
diff --git a/poky/bitbake/lib/bb/event.py b/poky/bitbake/lib/bb/event.py
new file mode 100644
index 000000000..5d0049626
--- /dev/null
+++ b/poky/bitbake/lib/bb/event.py
@@ -0,0 +1,831 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Event' implementation
+
+Classes and functions for manipulating 'events' in the
+BitBake build tools.
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, sys
+import warnings
+import pickle
+import logging
+import atexit
+import traceback
+import ast
+import threading
+
+import bb.utils
+import bb.compat
+import bb.exceptions
+
+# This is the pid for which we should generate the event. This is set when
+# the runqueue forks off.
+worker_pid = 0
+worker_fire = None
+
+logger = logging.getLogger('BitBake.Event')
+
+class Event(object):
+ """Base class for events"""
+
+ def __init__(self):
+ self.pid = worker_pid
+
+
+class HeartbeatEvent(Event):
+ """Triggered at regular time intervals of 10 seconds. Other events can fire much more often
+ (runQueueTaskStarted when there are many short tasks) or not at all for long periods
+ of time (again runQueueTaskStarted, when there is just one long-running task), so this
+ event is more suitable for doing some task-independent work occassionally."""
+ def __init__(self, time):
+ Event.__init__(self)
+ self.time = time
+
+Registered = 10
+AlreadyRegistered = 14
+
+def get_class_handlers():
+ return _handlers
+
+def set_class_handlers(h):
+ global _handlers
+ _handlers = h
+
+def clean_class_handlers():
+ return bb.compat.OrderedDict()
+
+# Internal
+_handlers = clean_class_handlers()
+_ui_handlers = {}
+_ui_logfilters = {}
+_ui_handler_seq = 0
+_event_handler_map = {}
+_catchall_handlers = {}
+_eventfilter = None
+_uiready = False
+_thread_lock = threading.Lock()
+_thread_lock_enabled = False
+
+if hasattr(__builtins__, '__setitem__'):
+ builtins = __builtins__
+else:
+ builtins = __builtins__.__dict__
+
+def enable_threadlock():
+ global _thread_lock_enabled
+ _thread_lock_enabled = True
+
+def disable_threadlock():
+ global _thread_lock_enabled
+ _thread_lock_enabled = False
+
+def execute_handler(name, handler, event, d):
+ event.data = d
+ addedd = False
+ if 'd' not in builtins:
+ builtins['d'] = d
+ addedd = True
+ try:
+ ret = handler(event)
+ except (bb.parse.SkipRecipe, bb.BBHandledException):
+ raise
+ except Exception:
+ etype, value, tb = sys.exc_info()
+ logger.error("Execution of event handler '%s' failed" % name,
+ exc_info=(etype, value, tb.tb_next))
+ raise
+ except SystemExit as exc:
+ if exc.code != 0:
+ logger.error("Execution of event handler '%s' failed" % name)
+ raise
+ finally:
+ del event.data
+ if addedd:
+ del builtins['d']
+
+def fire_class_handlers(event, d):
+ if isinstance(event, logging.LogRecord):
+ return
+
+ eid = str(event.__class__)[8:-2]
+ evt_hmap = _event_handler_map.get(eid, {})
+ for name, handler in list(_handlers.items()):
+ if name in _catchall_handlers or name in evt_hmap:
+ if _eventfilter:
+ if not _eventfilter(name, handler, event, d):
+ continue
+ execute_handler(name, handler, event, d)
+
+ui_queue = []
+@atexit.register
+def print_ui_queue():
+ """If we're exiting before a UI has been spawned, display any queued
+ LogRecords to the console."""
+ logger = logging.getLogger("BitBake")
+ if not _uiready:
+ from bb.msg import BBLogFormatter
+ stdout = logging.StreamHandler(sys.stdout)
+ stderr = logging.StreamHandler(sys.stderr)
+ formatter = BBLogFormatter("%(levelname)s: %(message)s")
+ stdout.setFormatter(formatter)
+ stderr.setFormatter(formatter)
+
+ # First check to see if we have any proper messages
+ msgprint = False
+ msgerrs = False
+
+ # Should we print to stderr?
+ for event in ui_queue[:]:
+ if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING:
+ msgerrs = True
+ break
+
+ if msgerrs:
+ logger.addHandler(stderr)
+ else:
+ logger.addHandler(stdout)
+
+ for event in ui_queue[:]:
+ if isinstance(event, logging.LogRecord):
+ if event.levelno > logging.DEBUG:
+ logger.handle(event)
+ msgprint = True
+
+ # Nope, so just print all of the messages we have (including debug messages)
+ if not msgprint:
+ for event in ui_queue[:]:
+ if isinstance(event, logging.LogRecord):
+ logger.handle(event)
+ if msgerrs:
+ logger.removeHandler(stderr)
+ else:
+ logger.removeHandler(stdout)
+
+def fire_ui_handlers(event, d):
+ global _thread_lock
+ global _thread_lock_enabled
+
+ if not _uiready:
+ # No UI handlers registered yet, queue up the messages
+ ui_queue.append(event)
+ return
+
+ if _thread_lock_enabled:
+ _thread_lock.acquire()
+
+ errors = []
+ for h in _ui_handlers:
+ #print "Sending event %s" % event
+ try:
+ if not _ui_logfilters[h].filter(event):
+ continue
+ # We use pickle here since it better handles object instances
+ # which xmlrpc's marshaller does not. Events *must* be serializable
+ # by pickle.
+ if hasattr(_ui_handlers[h].event, "sendpickle"):
+ _ui_handlers[h].event.sendpickle((pickle.dumps(event)))
+ else:
+ _ui_handlers[h].event.send(event)
+ except:
+ errors.append(h)
+ for h in errors:
+ del _ui_handlers[h]
+
+ if _thread_lock_enabled:
+ _thread_lock.release()
+
+def fire(event, d):
+ """Fire off an Event"""
+
+ # We can fire class handlers in the worker process context and this is
+ # desired so they get the task based datastore.
+ # UI handlers need to be fired in the server context so we defer this. They
+ # don't have a datastore so the datastore context isn't a problem.
+
+ fire_class_handlers(event, d)
+ if worker_fire:
+ worker_fire(event, d)
+ else:
+ # If messages have been queued up, clear the queue
+ global _uiready, ui_queue
+ if _uiready and ui_queue:
+ for queue_event in ui_queue:
+ fire_ui_handlers(queue_event, d)
+ ui_queue = []
+ fire_ui_handlers(event, d)
+
+def fire_from_worker(event, d):
+ fire_ui_handlers(event, d)
+
+noop = lambda _: None
+def register(name, handler, mask=None, filename=None, lineno=None):
+ """Register an Event handler"""
+
+ # already registered
+ if name in _handlers:
+ return AlreadyRegistered
+
+ if handler is not None:
+ # handle string containing python code
+ if isinstance(handler, str):
+ tmp = "def %s(e):\n%s" % (name, handler)
+ try:
+ code = bb.methodpool.compile_cache(tmp)
+ if not code:
+ if filename is None:
+ filename = "%s(e)" % name
+ code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
+ if lineno is not None:
+ ast.increment_lineno(code, lineno-1)
+ code = compile(code, filename, "exec")
+ bb.methodpool.compile_cache_add(tmp, code)
+ except SyntaxError:
+ logger.error("Unable to register event handler '%s':\n%s", name,
+ ''.join(traceback.format_exc(limit=0)))
+ _handlers[name] = noop
+ return
+ env = {}
+ bb.utils.better_exec(code, env)
+ func = bb.utils.better_eval(name, env)
+ _handlers[name] = func
+ else:
+ _handlers[name] = handler
+
+ if not mask or '*' in mask:
+ _catchall_handlers[name] = True
+ else:
+ for m in mask:
+ if _event_handler_map.get(m, None) is None:
+ _event_handler_map[m] = {}
+ _event_handler_map[m][name] = True
+
+ return Registered
+
+def remove(name, handler):
+ """Remove an Event handler"""
+ _handlers.pop(name)
+ if name in _catchall_handlers:
+ _catchall_handlers.pop(name)
+ for event in _event_handler_map.keys():
+ if name in _event_handler_map[event]:
+ _event_handler_map[event].pop(name)
+
+def get_handlers():
+ return _handlers
+
+def set_handlers(handlers):
+ global _handlers
+ _handlers = handlers
+
+def set_eventfilter(func):
+ global _eventfilter
+ _eventfilter = func
+
+def register_UIHhandler(handler, mainui=False):
+ bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
+ _ui_handlers[_ui_handler_seq] = handler
+ level, debug_domains = bb.msg.constructLogOptions()
+ _ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
+ if mainui:
+ global _uiready
+ _uiready = _ui_handler_seq
+ return _ui_handler_seq
+
+def unregister_UIHhandler(handlerNum, mainui=False):
+ if mainui:
+ global _uiready
+ _uiready = False
+ if handlerNum in _ui_handlers:
+ del _ui_handlers[handlerNum]
+ return
+
+def get_uihandler():
+ if _uiready is False:
+ return None
+ return _uiready
+
+# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC
+class UIEventFilter(object):
+ def __init__(self, level, debug_domains):
+ self.update(None, level, debug_domains)
+
+ def update(self, eventmask, level, debug_domains):
+ self.eventmask = eventmask
+ self.stdlevel = level
+ self.debug_domains = debug_domains
+
+ def filter(self, event):
+ if isinstance(event, logging.LogRecord):
+ if event.levelno >= self.stdlevel:
+ return True
+ if event.name in self.debug_domains and event.levelno >= self.debug_domains[event.name]:
+ return True
+ return False
+ eid = str(event.__class__)[8:-2]
+ if self.eventmask and eid not in self.eventmask:
+ return False
+ return True
+
+def set_UIHmask(handlerNum, level, debug_domains, mask):
+ if not handlerNum in _ui_handlers:
+ return False
+ if '*' in mask:
+ _ui_logfilters[handlerNum].update(None, level, debug_domains)
+ else:
+ _ui_logfilters[handlerNum].update(mask, level, debug_domains)
+ return True
+
+def getName(e):
+ """Returns the name of a class or class instance"""
+ if getattr(e, "__name__", None) == None:
+ return e.__class__.__name__
+ else:
+ return e.__name__
+
+class OperationStarted(Event):
+ """An operation has begun"""
+ def __init__(self, msg = "Operation Started"):
+ Event.__init__(self)
+ self.msg = msg
+
+class OperationCompleted(Event):
+ """An operation has completed"""
+ def __init__(self, total, msg = "Operation Completed"):
+ Event.__init__(self)
+ self.total = total
+ self.msg = msg
+
+class OperationProgress(Event):
+ """An operation is in progress"""
+ def __init__(self, current, total, msg = "Operation in Progress"):
+ Event.__init__(self)
+ self.current = current
+ self.total = total
+ self.msg = msg + ": %s/%s" % (current, total);
+
+class ConfigParsed(Event):
+ """Configuration Parsing Complete"""
+
+class MultiConfigParsed(Event):
+ """Multi-Config Parsing Complete"""
+ def __init__(self, mcdata):
+ self.mcdata = mcdata
+ Event.__init__(self)
+
+class RecipeEvent(Event):
+ def __init__(self, fn):
+ self.fn = fn
+ Event.__init__(self)
+
+class RecipePreFinalise(RecipeEvent):
+ """ Recipe Parsing Complete but not yet finialised"""
+
+class RecipeTaskPreProcess(RecipeEvent):
+ """
+ Recipe Tasks about to be finalised
+ The list of tasks should be final at this point and handlers
+ are only able to change interdependencies
+ """
+ def __init__(self, fn, tasklist):
+ self.fn = fn
+ self.tasklist = tasklist
+ Event.__init__(self)
+
+class RecipeParsed(RecipeEvent):
+ """ Recipe Parsing Complete """
+
+class StampUpdate(Event):
+ """Trigger for any adjustment of the stamp files to happen"""
+
+ def __init__(self, targets, stampfns):
+ self._targets = targets
+ self._stampfns = stampfns
+ Event.__init__(self)
+
+ def getStampPrefix(self):
+ return self._stampfns
+
+ def getTargets(self):
+ return self._targets
+
+ stampPrefix = property(getStampPrefix)
+ targets = property(getTargets)
+
+class BuildBase(Event):
+ """Base class for bitbake build events"""
+
+ def __init__(self, n, p, failures = 0):
+ self._name = n
+ self._pkgs = p
+ Event.__init__(self)
+ self._failures = failures
+
+ def getPkgs(self):
+ return self._pkgs
+
+ def setPkgs(self, pkgs):
+ self._pkgs = pkgs
+
+ def getName(self):
+ return self._name
+
+ def setName(self, name):
+ self._name = name
+
+ def getFailures(self):
+ """
+ Return the number of failed packages
+ """
+ return self._failures
+
+ pkgs = property(getPkgs, setPkgs, None, "pkgs property")
+ name = property(getName, setName, None, "name property")
+
+class BuildInit(BuildBase):
+ """buildFile or buildTargets was invoked"""
+ def __init__(self, p=[]):
+ name = None
+ BuildBase.__init__(self, name, p)
+
+class BuildStarted(BuildBase, OperationStarted):
+ """Event when builds start"""
+ def __init__(self, n, p, failures = 0):
+ OperationStarted.__init__(self, "Building Started")
+ BuildBase.__init__(self, n, p, failures)
+
+class BuildCompleted(BuildBase, OperationCompleted):
+ """Event when builds have completed"""
+ def __init__(self, total, n, p, failures=0, interrupted=0):
+ if not failures:
+ OperationCompleted.__init__(self, total, "Building Succeeded")
+ else:
+ OperationCompleted.__init__(self, total, "Building Failed")
+ self._interrupted = interrupted
+ BuildBase.__init__(self, n, p, failures)
+
+class DiskFull(Event):
+ """Disk full case build aborted"""
+ def __init__(self, dev, type, freespace, mountpoint):
+ Event.__init__(self)
+ self._dev = dev
+ self._type = type
+ self._free = freespace
+ self._mountpoint = mountpoint
+
+class DiskUsageSample:
+ def __init__(self, available_bytes, free_bytes, total_bytes):
+ # Number of bytes available to non-root processes.
+ self.available_bytes = available_bytes
+ # Number of bytes available to root processes.
+ self.free_bytes = free_bytes
+ # Total capacity of the volume.
+ self.total_bytes = total_bytes
+
+class MonitorDiskEvent(Event):
+ """If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked.
+ Provides information about devices that are getting monitored."""
+ def __init__(self, disk_usage):
+ Event.__init__(self)
+ # hash of device root path -> DiskUsageSample
+ self.disk_usage = disk_usage
+
+class NoProvider(Event):
+ """No Provider for an Event"""
+
+ def __init__(self, item, runtime=False, dependees=None, reasons=None, close_matches=None):
+ Event.__init__(self)
+ self._item = item
+ self._runtime = runtime
+ self._dependees = dependees
+ self._reasons = reasons
+ self._close_matches = close_matches
+
+ def getItem(self):
+ return self._item
+
+ def isRuntime(self):
+ return self._runtime
+
+ def __str__(self):
+ msg = ''
+ if self._runtime:
+ r = "R"
+ else:
+ r = ""
+
+ extra = ''
+ if not self._reasons:
+ if self._close_matches:
+ extra = ". Close matches:\n %s" % '\n '.join(self._close_matches)
+
+ if self._dependees:
+ msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra)
+ else:
+ msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra)
+ if self._reasons:
+ for reason in self._reasons:
+ msg += '\n' + reason
+ return msg
+
+
+class MultipleProviders(Event):
+ """Multiple Providers"""
+
+ def __init__(self, item, candidates, runtime = False):
+ Event.__init__(self)
+ self._item = item
+ self._candidates = candidates
+ self._is_runtime = runtime
+
+ def isRuntime(self):
+ """
+ Is this a runtime issue?
+ """
+ return self._is_runtime
+
+ def getItem(self):
+ """
+ The name for the to be build item
+ """
+ return self._item
+
+ def getCandidates(self):
+ """
+ Get the possible Candidates for a PROVIDER.
+ """
+ return self._candidates
+
+ def __str__(self):
+ msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "",
+ self._item,
+ ", ".join(self._candidates))
+ rtime = ""
+ if self._is_runtime:
+ rtime = "R"
+ msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item)
+ return msg
+
+class ParseStarted(OperationStarted):
+ """Recipe parsing for the runqueue has begun"""
+ def __init__(self, total):
+ OperationStarted.__init__(self, "Recipe parsing Started")
+ self.total = total
+
+class ParseCompleted(OperationCompleted):
+ """Recipe parsing for the runqueue has completed"""
+ def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total):
+ OperationCompleted.__init__(self, total, "Recipe parsing Completed")
+ self.cached = cached
+ self.parsed = parsed
+ self.skipped = skipped
+ self.virtuals = virtuals
+ self.masked = masked
+ self.errors = errors
+ self.sofar = cached + parsed
+
+class ParseProgress(OperationProgress):
+ """Recipe parsing progress"""
+ def __init__(self, current, total):
+ OperationProgress.__init__(self, current, total, "Recipe parsing")
+
+
+class CacheLoadStarted(OperationStarted):
+ """Loading of the dependency cache has begun"""
+ def __init__(self, total):
+ OperationStarted.__init__(self, "Loading cache Started")
+ self.total = total
+
+class CacheLoadProgress(OperationProgress):
+ """Cache loading progress"""
+ def __init__(self, current, total):
+ OperationProgress.__init__(self, current, total, "Loading cache")
+
+class CacheLoadCompleted(OperationCompleted):
+ """Cache loading is complete"""
+ def __init__(self, total, num_entries):
+ OperationCompleted.__init__(self, total, "Loading cache Completed")
+ self.num_entries = num_entries
+
+class TreeDataPreparationStarted(OperationStarted):
+ """Tree data preparation started"""
+ def __init__(self):
+ OperationStarted.__init__(self, "Preparing tree data Started")
+
+class TreeDataPreparationProgress(OperationProgress):
+ """Tree data preparation is in progress"""
+ def __init__(self, current, total):
+ OperationProgress.__init__(self, current, total, "Preparing tree data")
+
+class TreeDataPreparationCompleted(OperationCompleted):
+ """Tree data preparation completed"""
+ def __init__(self, total):
+ OperationCompleted.__init__(self, total, "Preparing tree data Completed")
+
+class DepTreeGenerated(Event):
+ """
+ Event when a dependency tree has been generated
+ """
+
+ def __init__(self, depgraph):
+ Event.__init__(self)
+ self._depgraph = depgraph
+
+class TargetsTreeGenerated(Event):
+ """
+ Event when a set of buildable targets has been generated
+ """
+ def __init__(self, model):
+ Event.__init__(self)
+ self._model = model
+
+class ReachableStamps(Event):
+ """
+ An event listing all stamps reachable after parsing
+ which the metadata may use to clean up stale data
+ """
+
+ def __init__(self, stamps):
+ Event.__init__(self)
+ self.stamps = stamps
+
+class FilesMatchingFound(Event):
+ """
+ Event when a list of files matching the supplied pattern has
+ been generated
+ """
+ def __init__(self, pattern, matches):
+ Event.__init__(self)
+ self._pattern = pattern
+ self._matches = matches
+
+class ConfigFilesFound(Event):
+ """
+ Event when a list of appropriate config files has been generated
+ """
+ def __init__(self, variable, values):
+ Event.__init__(self)
+ self._variable = variable
+ self._values = values
+
+class ConfigFilePathFound(Event):
+ """
+ Event when a path for a config file has been found
+ """
+ def __init__(self, path):
+ Event.__init__(self)
+ self._path = path
+
+class MsgBase(Event):
+ """Base class for messages"""
+
+ def __init__(self, msg):
+ self._message = msg
+ Event.__init__(self)
+
+class MsgDebug(MsgBase):
+ """Debug Message"""
+
+class MsgNote(MsgBase):
+ """Note Message"""
+
+class MsgWarn(MsgBase):
+ """Warning Message"""
+
+class MsgError(MsgBase):
+ """Error Message"""
+
+class MsgFatal(MsgBase):
+ """Fatal Message"""
+
+class MsgPlain(MsgBase):
+ """General output"""
+
+class LogExecTTY(Event):
+ """Send event containing program to spawn on tty of the logger"""
+ def __init__(self, msg, prog, sleep_delay, retries):
+ Event.__init__(self)
+ self.msg = msg
+ self.prog = prog
+ self.sleep_delay = sleep_delay
+ self.retries = retries
+
+class LogHandler(logging.Handler):
+ """Dispatch logging messages as bitbake events"""
+
+ def emit(self, record):
+ if record.exc_info:
+ etype, value, tb = record.exc_info
+ if hasattr(tb, 'tb_next'):
+ tb = list(bb.exceptions.extract_traceback(tb, context=3))
+ # Need to turn the value into something the logging system can pickle
+ record.bb_exc_info = (etype, value, tb)
+ record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
+ value = str(value)
+ record.exc_info = None
+ fire(record, None)
+
+ def filter(self, record):
+ record.taskpid = worker_pid
+ return True
+
+class MetadataEvent(Event):
+ """
+ Generic event that target for OE-Core classes
+ to report information during asynchrous execution
+ """
+ def __init__(self, eventtype, eventdata):
+ Event.__init__(self)
+ self.type = eventtype
+ self._localdata = eventdata
+
+class ProcessStarted(Event):
+ """
+ Generic process started event (usually part of the initial startup)
+ where further progress events will be delivered
+ """
+ def __init__(self, processname, total):
+ Event.__init__(self)
+ self.processname = processname
+ self.total = total
+
+class ProcessProgress(Event):
+ """
+ Generic process progress event (usually part of the initial startup)
+ """
+ def __init__(self, processname, progress):
+ Event.__init__(self)
+ self.processname = processname
+ self.progress = progress
+
+class ProcessFinished(Event):
+ """
+ Generic process finished event (usually part of the initial startup)
+ """
+ def __init__(self, processname):
+ Event.__init__(self)
+ self.processname = processname
+
+class SanityCheck(Event):
+ """
+ Event to run sanity checks, either raise errors or generate events as return status.
+ """
+ def __init__(self, generateevents = True):
+ Event.__init__(self)
+ self.generateevents = generateevents
+
+class SanityCheckPassed(Event):
+ """
+ Event to indicate sanity check has passed
+ """
+
+class SanityCheckFailed(Event):
+ """
+ Event to indicate sanity check has failed
+ """
+ def __init__(self, msg, network_error=False):
+ Event.__init__(self)
+ self._msg = msg
+ self._network_error = network_error
+
+class NetworkTest(Event):
+ """
+ Event to run network connectivity tests, either raise errors or generate events as return status.
+ """
+ def __init__(self, generateevents = True):
+ Event.__init__(self)
+ self.generateevents = generateevents
+
+class NetworkTestPassed(Event):
+ """
+ Event to indicate network test has passed
+ """
+
+class NetworkTestFailed(Event):
+ """
+ Event to indicate network test has failed
+ """
+
+class FindSigInfoResult(Event):
+ """
+ Event to return results from findSigInfo command
+ """
+ def __init__(self, result):
+ Event.__init__(self)
+ self.result = result
diff --git a/poky/bitbake/lib/bb/exceptions.py b/poky/bitbake/lib/bb/exceptions.py
new file mode 100644
index 000000000..cd713439e
--- /dev/null
+++ b/poky/bitbake/lib/bb/exceptions.py
@@ -0,0 +1,91 @@
+
+import inspect
+import traceback
+import bb.namedtuple_with_abc
+from collections import namedtuple
+
+
+class TracebackEntry(namedtuple.abc):
+ """Pickleable representation of a traceback entry"""
+ _fields = 'filename lineno function args code_context index'
+ _header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
+
+ def format(self, formatter=None):
+ if not self.code_context:
+ return self._header.format(self) + '\n'
+
+ formatted = [self._header.format(self) + ':\n']
+
+ for lineindex, line in enumerate(self.code_context):
+ if formatter:
+ line = formatter(line)
+
+ if lineindex == self.index:
+ formatted.append(' >%s' % line)
+ else:
+ formatted.append(' %s' % line)
+ return formatted
+
+ def __str__(self):
+ return ''.join(self.format())
+
+def _get_frame_args(frame):
+ """Get the formatted arguments and class (if available) for a frame"""
+ arginfo = inspect.getargvalues(frame)
+
+ try:
+ if not arginfo.args:
+ return '', None
+ # There have been reports from the field of python 2.6 which doesn't
+ # return a namedtuple here but simply a tuple so fallback gracefully if
+ # args isn't present.
+ except AttributeError:
+ return '', None
+
+ firstarg = arginfo.args[0]
+ if firstarg == 'self':
+ self = arginfo.locals['self']
+ cls = self.__class__.__name__
+
+ arginfo.args.pop(0)
+ del arginfo.locals['self']
+ else:
+ cls = None
+
+ formatted = inspect.formatargvalues(*arginfo)
+ return formatted, cls
+
+def extract_traceback(tb, context=1):
+ frames = inspect.getinnerframes(tb, context)
+ for frame, filename, lineno, function, code_context, index in frames:
+ formatted_args, cls = _get_frame_args(frame)
+ if cls:
+ function = '%s.%s' % (cls, function)
+ yield TracebackEntry(filename, lineno, function, formatted_args,
+ code_context, index)
+
+def format_extracted(extracted, formatter=None, limit=None):
+ if limit:
+ extracted = extracted[-limit:]
+
+ formatted = []
+ for tracebackinfo in extracted:
+ formatted.extend(tracebackinfo.format(formatter))
+ return formatted
+
+
+def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
+ formatted = ['Traceback (most recent call last):\n']
+
+ if hasattr(tb, 'tb_next'):
+ tb = extract_traceback(tb, context)
+
+ formatted.extend(format_extracted(tb, formatter, limit))
+ formatted.extend(traceback.format_exception_only(etype, value))
+ return formatted
+
+def to_string(exc):
+ if isinstance(exc, SystemExit):
+ if not isinstance(exc.code, str):
+ return 'Exited with "%d"' % exc.code
+ return str(exc)
diff --git a/poky/bitbake/lib/bb/fetch2/__init__.py b/poky/bitbake/lib/bb/fetch2/__init__.py
new file mode 100644
index 000000000..72d6092de
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/__init__.py
@@ -0,0 +1,1864 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2012 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os, re
+import signal
+import logging
+import urllib.request, urllib.parse, urllib.error
+if 'git' not in urllib.parse.uses_netloc:
+ urllib.parse.uses_netloc.append('git')
+import operator
+import collections
+import subprocess
+import pickle
+import errno
+import bb.persist_data, bb.utils
+import bb.checksum
+import bb.process
+import bb.event
+
+__version__ = "2"
+_checksum_cache = bb.checksum.FileChecksumCache()
+
+logger = logging.getLogger("BitBake.Fetcher")
+
+class BBFetchException(Exception):
+ """Class all fetch exceptions inherit from"""
+ def __init__(self, message):
+ self.msg = message
+ Exception.__init__(self, message)
+
+ def __str__(self):
+ return self.msg
+
+class UntrustedUrl(BBFetchException):
+ """Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS"""
+ def __init__(self, url, message=''):
+ if message:
+ msg = message
+ else:
+ msg = "The URL: '%s' is not trusted and cannot be used" % url
+ self.url = url
+ BBFetchException.__init__(self, msg)
+ self.args = (url,)
+
+class MalformedUrl(BBFetchException):
+ """Exception raised when encountering an invalid url"""
+ def __init__(self, url, message=''):
+ if message:
+ msg = message
+ else:
+ msg = "The URL: '%s' is invalid and cannot be interpreted" % url
+ self.url = url
+ BBFetchException.__init__(self, msg)
+ self.args = (url,)
+
+class FetchError(BBFetchException):
+ """General fetcher exception when something happens incorrectly"""
+ def __init__(self, message, url = None):
+ if url:
+ msg = "Fetcher failure for URL: '%s'. %s" % (url, message)
+ else:
+ msg = "Fetcher failure: %s" % message
+ self.url = url
+ BBFetchException.__init__(self, msg)
+ self.args = (message, url)
+
+class ChecksumError(FetchError):
+ """Exception when mismatched checksum encountered"""
+ def __init__(self, message, url = None, checksum = None):
+ self.checksum = checksum
+ FetchError.__init__(self, message, url)
+
+class NoChecksumError(FetchError):
+ """Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set"""
+
+class UnpackError(BBFetchException):
+ """General fetcher exception when something happens incorrectly when unpacking"""
+ def __init__(self, message, url):
+ msg = "Unpack failure for URL: '%s'. %s" % (url, message)
+ self.url = url
+ BBFetchException.__init__(self, msg)
+ self.args = (message, url)
+
+class NoMethodError(BBFetchException):
+ """Exception raised when there is no method to obtain a supplied url or set of urls"""
+ def __init__(self, url):
+ msg = "Could not find a fetcher which supports the URL: '%s'" % url
+ self.url = url
+ BBFetchException.__init__(self, msg)
+ self.args = (url,)
+
+class MissingParameterError(BBFetchException):
+ """Exception raised when a fetch method is missing a critical parameter in the url"""
+ def __init__(self, missing, url):
+ msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
+ self.url = url
+ self.missing = missing
+ BBFetchException.__init__(self, msg)
+ self.args = (missing, url)
+
+class ParameterError(BBFetchException):
+ """Exception raised when a url cannot be proccessed due to invalid parameters."""
+ def __init__(self, message, url):
+ msg = "URL: '%s' has invalid parameters. %s" % (url, message)
+ self.url = url
+ BBFetchException.__init__(self, msg)
+ self.args = (message, url)
+
+class NetworkAccess(BBFetchException):
+ """Exception raised when network access is disabled but it is required."""
+ def __init__(self, url, cmd):
+ msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
+ self.url = url
+ self.cmd = cmd
+ BBFetchException.__init__(self, msg)
+ self.args = (url, cmd)
+
+class NonLocalMethod(Exception):
+ def __init__(self):
+ Exception.__init__(self)
+
+class MissingChecksumEvent(bb.event.Event):
+ def __init__(self, url, md5sum, sha256sum):
+ self.url = url
+ self.checksums = {'md5sum': md5sum,
+ 'sha256sum': sha256sum}
+ bb.event.Event.__init__(self)
+
+
+class URI(object):
+ """
+ A class representing a generic URI, with methods for
+ accessing the URI components, and stringifies to the
+ URI.
+
+ It is constructed by calling it with a URI, or setting
+ the attributes manually:
+
+ uri = URI("http://example.com/")
+
+ uri = URI()
+ uri.scheme = 'http'
+ uri.hostname = 'example.com'
+ uri.path = '/'
+
+ It has the following attributes:
+
+ * scheme (read/write)
+ * userinfo (authentication information) (read/write)
+ * username (read/write)
+ * password (read/write)
+
+ Note, password is deprecated as of RFC 3986.
+
+ * hostname (read/write)
+ * port (read/write)
+ * hostport (read only)
+ "hostname:port", if both are set, otherwise just "hostname"
+ * path (read/write)
+ * path_quoted (read/write)
+ A URI quoted version of path
+ * params (dict) (read/write)
+ * query (dict) (read/write)
+ * relative (bool) (read only)
+ True if this is a "relative URI", (e.g. file:foo.diff)
+
+ It stringifies to the URI itself.
+
+ Some notes about relative URIs: while it's specified that
+ a URI beginning with <scheme>:// should either be directly
+ followed by a hostname or a /, the old URI handling of the
+ fetch2 library did not comform to this. Therefore, this URI
+ class has some kludges to make sure that URIs are parsed in
+ a way comforming to bitbake's current usage. This URI class
+ supports the following:
+
+ file:relative/path.diff (IETF compliant)
+ git:relative/path.git (IETF compliant)
+ git:///absolute/path.git (IETF compliant)
+ file:///absolute/path.diff (IETF compliant)
+
+ file://relative/path.diff (not IETF compliant)
+
+ But it does not support the following:
+
+ file://hostname/absolute/path.diff (would be IETF compliant)
+
+ Note that the last case only applies to a list of
+ "whitelisted" schemes (currently only file://), that requires
+ its URIs to not have a network location.
+ """
+
+ _relative_schemes = ['file', 'git']
+ _netloc_forbidden = ['file']
+
+ def __init__(self, uri=None):
+ self.scheme = ''
+ self.userinfo = ''
+ self.hostname = ''
+ self.port = None
+ self._path = ''
+ self.params = {}
+ self.query = {}
+ self.relative = False
+
+ if not uri:
+ return
+
+ # We hijack the URL parameters, since the way bitbake uses
+ # them are not quite RFC compliant.
+ uri, param_str = (uri.split(";", 1) + [None])[:2]
+
+ urlp = urllib.parse.urlparse(uri)
+ self.scheme = urlp.scheme
+
+ reparse = 0
+
+ # Coerce urlparse to make URI scheme use netloc
+ if not self.scheme in urllib.parse.uses_netloc:
+ urllib.parse.uses_params.append(self.scheme)
+ reparse = 1
+
+ # Make urlparse happy(/ier) by converting local resources
+ # to RFC compliant URL format. E.g.:
+ # file://foo.diff -> file:foo.diff
+ if urlp.scheme in self._netloc_forbidden:
+ uri = re.sub("(?<=:)//(?!/)", "", uri, 1)
+ reparse = 1
+
+ if reparse:
+ urlp = urllib.parse.urlparse(uri)
+
+ # Identify if the URI is relative or not
+ if urlp.scheme in self._relative_schemes and \
+ re.compile("^\w+:(?!//)").match(uri):
+ self.relative = True
+
+ if not self.relative:
+ self.hostname = urlp.hostname or ''
+ self.port = urlp.port
+
+ self.userinfo += urlp.username or ''
+
+ if urlp.password:
+ self.userinfo += ':%s' % urlp.password
+
+ self.path = urllib.parse.unquote(urlp.path)
+
+ if param_str:
+ self.params = self._param_str_split(param_str, ";")
+ if urlp.query:
+ self.query = self._param_str_split(urlp.query, "&")
+
+ def __str__(self):
+ userinfo = self.userinfo
+ if userinfo:
+ userinfo += '@'
+
+ return "%s:%s%s%s%s%s%s" % (
+ self.scheme,
+ '' if self.relative else '//',
+ userinfo,
+ self.hostport,
+ self.path_quoted,
+ self._query_str(),
+ self._param_str())
+
+ def _param_str(self):
+ return (
+ ''.join([';', self._param_str_join(self.params, ";")])
+ if self.params else '')
+
+ def _query_str(self):
+ return (
+ ''.join(['?', self._param_str_join(self.query, "&")])
+ if self.query else '')
+
+ def _param_str_split(self, string, elmdelim, kvdelim="="):
+ ret = collections.OrderedDict()
+ for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]:
+ ret[k] = v
+ return ret
+
+ def _param_str_join(self, dict_, elmdelim, kvdelim="="):
+ return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()])
+
+ @property
+ def hostport(self):
+ if not self.port:
+ return self.hostname
+ return "%s:%d" % (self.hostname, self.port)
+
+ @property
+ def path_quoted(self):
+ return urllib.parse.quote(self.path)
+
+ @path_quoted.setter
+ def path_quoted(self, path):
+ self.path = urllib.parse.unquote(path)
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ self._path = path
+
+ if not path or re.compile("^/").match(path):
+ self.relative = False
+ else:
+ self.relative = True
+
+ @property
+ def username(self):
+ if self.userinfo:
+ return (self.userinfo.split(":", 1))[0]
+ return ''
+
+ @username.setter
+ def username(self, username):
+ password = self.password
+ self.userinfo = username
+ if password:
+ self.userinfo += ":%s" % password
+
+ @property
+ def password(self):
+ if self.userinfo and ":" in self.userinfo:
+ return (self.userinfo.split(":", 1))[1]
+ return ''
+
+ @password.setter
+ def password(self, password):
+ self.userinfo = "%s:%s" % (self.username, password)
+
+def decodeurl(url):
+ """Decodes an URL into the tokens (scheme, network location, path,
+ user, password, parameters).
+ """
+
+ m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
+ if not m:
+ raise MalformedUrl(url)
+
+ type = m.group('type')
+ location = m.group('location')
+ if not location:
+ raise MalformedUrl(url)
+ user = m.group('user')
+ parm = m.group('parm')
+
+ locidx = location.find('/')
+ if locidx != -1 and type.lower() != 'file':
+ host = location[:locidx]
+ path = location[locidx:]
+ elif type.lower() == 'file':
+ host = ""
+ path = location
+ else:
+ host = location
+ path = ""
+ if user:
+ m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
+ if m:
+ user = m.group('user')
+ pswd = m.group('pswd')
+ else:
+ user = ''
+ pswd = ''
+
+ p = collections.OrderedDict()
+ if parm:
+ for s in parm.split(';'):
+ if s:
+ if not '=' in s:
+ raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s))
+ s1, s2 = s.split('=')
+ p[s1] = s2
+
+ return type, host, urllib.parse.unquote(path), user, pswd, p
+
+def encodeurl(decoded):
+ """Encodes a URL from tokens (scheme, network location, path,
+ user, password, parameters).
+ """
+
+ type, host, path, user, pswd, p = decoded
+
+ if not type:
+ raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
+ url = '%s://' % type
+ if user and type != "file":
+ url += "%s" % user
+ if pswd:
+ url += ":%s" % pswd
+ url += "@"
+ if host and type != "file":
+ url += "%s" % host
+ if path:
+ # Standardise path to ensure comparisons work
+ while '//' in path:
+ path = path.replace("//", "/")
+ url += "%s" % urllib.parse.quote(path)
+ if p:
+ for parm in p:
+ url += ";%s=%s" % (parm, p[parm])
+
+ return url
+
+def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
+ if not ud.url or not uri_find or not uri_replace:
+ logger.error("uri_replace: passed an undefined value, not replacing")
+ return None
+ uri_decoded = list(decodeurl(ud.url))
+ uri_find_decoded = list(decodeurl(uri_find))
+ uri_replace_decoded = list(decodeurl(uri_replace))
+ logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
+ result_decoded = ['', '', '', '', '', {}]
+ for loc, i in enumerate(uri_find_decoded):
+ result_decoded[loc] = uri_decoded[loc]
+ regexp = i
+ if loc == 0 and regexp and not regexp.endswith("$"):
+ # Leaving the type unanchored can mean "https" matching "file" can become "files"
+ # which is clearly undesirable.
+ regexp += "$"
+ if loc == 5:
+ # Handle URL parameters
+ if i:
+ # Any specified URL parameters must match
+ for k in uri_replace_decoded[loc]:
+ if uri_decoded[loc][k] != uri_replace_decoded[loc][k]:
+ return None
+ # Overwrite any specified replacement parameters
+ for k in uri_replace_decoded[loc]:
+ for l in replacements:
+ uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l])
+ result_decoded[loc][k] = uri_replace_decoded[loc][k]
+ elif (re.match(regexp, uri_decoded[loc])):
+ if not uri_replace_decoded[loc]:
+ result_decoded[loc] = ""
+ else:
+ for k in replacements:
+ uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
+ #bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc]))
+ result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1)
+ if loc == 2:
+ # Handle path manipulations
+ basename = None
+ if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball:
+ # If the source and destination url types differ, must be a mirrortarball mapping
+ basename = os.path.basename(mirrortarball)
+ # Kill parameters, they make no sense for mirror tarballs
+ uri_decoded[5] = {}
+ elif ud.localpath and ud.method.supports_checksum(ud):
+ basename = os.path.basename(ud.localpath)
+ if basename and not result_decoded[loc].endswith(basename):
+ result_decoded[loc] = os.path.join(result_decoded[loc], basename)
+ else:
+ return None
+ result = encodeurl(result_decoded)
+ if result == ud.url:
+ return None
+ logger.debug(2, "For url %s returning %s" % (ud.url, result))
+ return result
+
+methods = []
+urldata_cache = {}
+saved_headrevs = {}
+
+def fetcher_init(d):
+ """
+ Called to initialize the fetchers once the configuration data is known.
+ Calls before this must not hit the cache.
+ """
+ # When to drop SCM head revisions controlled by user policy
+ srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
+ if srcrev_policy == "cache":
+ logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
+ elif srcrev_policy == "clear":
+ logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
+ revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
+ try:
+ bb.fetch2.saved_headrevs = revs.items()
+ except:
+ pass
+ revs.clear()
+ else:
+ raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
+
+ _checksum_cache.init_cache(d)
+
+ for m in methods:
+ if hasattr(m, "init"):
+ m.init(d)
+
+def fetcher_parse_save():
+ _checksum_cache.save_extras()
+
+def fetcher_parse_done():
+ _checksum_cache.save_merge()
+
+def fetcher_compare_revisions():
+ """
+ Compare the revisions in the persistant cache with current values and
+ return true/false on whether they've changed.
+ """
+
+ data = bb.persist_data.persist('BB_URI_HEADREVS', d).items()
+ data2 = bb.fetch2.saved_headrevs
+
+ changed = False
+ for key in data:
+ if key not in data2 or data2[key] != data[key]:
+ logger.debug(1, "%s changed", key)
+ changed = True
+ return True
+ else:
+ logger.debug(2, "%s did not change", key)
+ return False
+
+def mirror_from_string(data):
+ mirrors = (data or "").replace('\\n',' ').split()
+ # Split into pairs
+ if len(mirrors) % 2 != 0:
+ bb.warn('Invalid mirror data %s, should have paired members.' % data)
+ return list(zip(*[iter(mirrors)]*2))
+
+def verify_checksum(ud, d, precomputed={}):
+ """
+ verify the MD5 and SHA256 checksum for downloaded src
+
+ Raises a FetchError if one or both of the SRC_URI checksums do not match
+ the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no
+ checksums specified.
+
+ Returns a dict of checksums that can be stored in a done stamp file and
+ passed in as precomputed parameter in a later call to avoid re-computing
+ the checksums from the file. This allows verifying the checksums of the
+ file against those in the recipe each time, rather than only after
+ downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571.
+ """
+
+ _MD5_KEY = "md5"
+ _SHA256_KEY = "sha256"
+
+ if ud.ignore_checksums or not ud.method.supports_checksum(ud):
+ return {}
+
+ if _MD5_KEY in precomputed:
+ md5data = precomputed[_MD5_KEY]
+ else:
+ md5data = bb.utils.md5_file(ud.localpath)
+
+ if _SHA256_KEY in precomputed:
+ sha256data = precomputed[_SHA256_KEY]
+ else:
+ sha256data = bb.utils.sha256_file(ud.localpath)
+
+ if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected:
+ # If strict checking enabled and neither sum defined, raise error
+ strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
+ if strict == "1":
+ logger.error('No checksum specified for %s, please add at least one to the recipe:\n'
+ 'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
+ (ud.localpath, ud.md5_name, md5data,
+ ud.sha256_name, sha256data))
+ raise NoChecksumError('Missing SRC_URI checksum', ud.url)
+
+ bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d)
+
+ if strict == "ignore":
+ return {
+ _MD5_KEY: md5data,
+ _SHA256_KEY: sha256data
+ }
+
+ # Log missing sums so user can more easily add them
+ logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
+ 'SRC_URI[%s] = "%s"',
+ ud.localpath, ud.md5_name, md5data)
+ logger.warning('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n'
+ 'SRC_URI[%s] = "%s"',
+ ud.localpath, ud.sha256_name, sha256data)
+
+ # We want to alert the user if a checksum is defined in the recipe but
+ # it does not match.
+ msg = ""
+ mismatch = False
+ if ud.md5_expected and ud.md5_expected != md5data:
+ msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected)
+ mismatch = True;
+
+ if ud.sha256_expected and ud.sha256_expected != sha256data:
+ msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected)
+ mismatch = True;
+
+ if mismatch:
+ msg = msg + '\nIf this change is expected (e.g. you have upgraded to a new version without updating the checksums) then you can use these lines within the recipe:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"\nOtherwise you should retry the download and/or check with upstream to determine if the file has become corrupted or otherwise unexpectedly modified.\n' % (ud.md5_name, md5data, ud.sha256_name, sha256data)
+
+ if len(msg):
+ raise ChecksumError('Checksum mismatch!%s' % msg, ud.url, md5data)
+
+ return {
+ _MD5_KEY: md5data,
+ _SHA256_KEY: sha256data
+ }
+
+
+def verify_donestamp(ud, d, origud=None):
+ """
+ Check whether the done stamp file has the right checksums (if the fetch
+ method supports them). If it doesn't, delete the done stamp and force
+ a re-download.
+
+ Returns True, if the donestamp exists and is valid, False otherwise. When
+ returning False, any existing done stamps are removed.
+ """
+ if not ud.needdonestamp or (origud and not origud.needdonestamp):
+ return True
+
+ if not os.path.exists(ud.localpath):
+ # local path does not exist
+ if os.path.exists(ud.donestamp):
+ # done stamp exists, but the downloaded file does not; the done stamp
+ # must be incorrect, re-trigger the download
+ bb.utils.remove(ud.donestamp)
+ return False
+
+ if (not ud.method.supports_checksum(ud) or
+ (origud and not origud.method.supports_checksum(origud))):
+ # if done stamp exists and checksums not supported; assume the local
+ # file is current
+ return os.path.exists(ud.donestamp)
+
+ precomputed_checksums = {}
+ # Only re-use the precomputed checksums if the donestamp is newer than the
+ # file. Do not rely on the mtime of directories, though. If ud.localpath is
+ # a directory, there will probably not be any checksums anyway.
+ if os.path.exists(ud.donestamp) and (os.path.isdir(ud.localpath) or
+ os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)):
+ try:
+ with open(ud.donestamp, "rb") as cachefile:
+ pickled = pickle.Unpickler(cachefile)
+ precomputed_checksums.update(pickled.load())
+ except Exception as e:
+ # Avoid the warnings on the upgrade path from emtpy done stamp
+ # files to those containing the checksums.
+ if not isinstance(e, EOFError):
+ # Ignore errors, they aren't fatal
+ logger.warning("Couldn't load checksums from donestamp %s: %s "
+ "(msg: %s)" % (ud.donestamp, type(e).__name__,
+ str(e)))
+
+ try:
+ checksums = verify_checksum(ud, d, precomputed_checksums)
+ # If the cache file did not have the checksums, compute and store them
+ # as an upgrade path from the previous done stamp file format.
+ if checksums != precomputed_checksums:
+ with open(ud.donestamp, "wb") as cachefile:
+ p = pickle.Pickler(cachefile, 2)
+ p.dump(checksums)
+ return True
+ except ChecksumError as e:
+ # Checksums failed to verify, trigger re-download and remove the
+ # incorrect stamp file.
+ logger.warning("Checksum mismatch for local file %s\n"
+ "Cleaning and trying again." % ud.localpath)
+ if os.path.exists(ud.localpath):
+ rename_bad_checksum(ud, e.checksum)
+ bb.utils.remove(ud.donestamp)
+ return False
+
+
+def update_stamp(ud, d):
+ """
+ donestamp is file stamp indicating the whole fetching is done
+ this function update the stamp after verifying the checksum
+ """
+ if not ud.needdonestamp:
+ return
+
+ if os.path.exists(ud.donestamp):
+ # Touch the done stamp file to show active use of the download
+ try:
+ os.utime(ud.donestamp, None)
+ except:
+ # Errors aren't fatal here
+ pass
+ else:
+ try:
+ checksums = verify_checksum(ud, d)
+ # Store the checksums for later re-verification against the recipe
+ with open(ud.donestamp, "wb") as cachefile:
+ p = pickle.Pickler(cachefile, 2)
+ p.dump(checksums)
+ except ChecksumError as e:
+ # Checksums failed to verify, trigger re-download and remove the
+ # incorrect stamp file.
+ logger.warning("Checksum mismatch for local file %s\n"
+ "Cleaning and trying again." % ud.localpath)
+ if os.path.exists(ud.localpath):
+ rename_bad_checksum(ud, e.checksum)
+ bb.utils.remove(ud.donestamp)
+ raise
+
+def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ # SIGPIPE errors are known issues with gzip/bash
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+def get_autorev(d):
+ # only not cache src rev in autorev case
+ if d.getVar('BB_SRCREV_POLICY') != "cache":
+ d.setVar('BB_DONT_CACHE', '1')
+ return "AUTOINC"
+
+def get_srcrev(d, method_name='sortable_revision'):
+ """
+ Return the revision string, usually for use in the version string (PV) of the current package
+ Most packages usually only have one SCM so we just pass on the call.
+ In the multi SCM case, we build a value based on SRCREV_FORMAT which must
+ have been set.
+
+ The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
+ incremental, other code is then responsible for turning that into an increasing value (if needed)
+
+ A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if
+ that fetcher provides a method with the given name and the same signature as sortable_revision.
+ """
+
+ scms = []
+ fetcher = Fetch(d.getVar('SRC_URI').split(), d)
+ urldata = fetcher.ud
+ for u in urldata:
+ if urldata[u].method.supports_srcrev():
+ scms.append(u)
+
+ if len(scms) == 0:
+ raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
+
+ if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
+ autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
+ if len(rev) > 10:
+ rev = rev[:10]
+ if autoinc:
+ return "AUTOINC+" + rev
+ return rev
+
+ #
+ # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
+ #
+ format = d.getVar('SRCREV_FORMAT')
+ if not format:
+ raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
+
+ name_to_rev = {}
+ seenautoinc = False
+ for scm in scms:
+ ud = urldata[scm]
+ for name in ud.names:
+ autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
+ seenautoinc = seenautoinc or autoinc
+ if len(rev) > 10:
+ rev = rev[:10]
+ name_to_rev[name] = rev
+ # Replace names by revisions in the SRCREV_FORMAT string. The approach used
+ # here can handle names being prefixes of other names and names appearing
+ # as substrings in revisions (in which case the name should not be
+ # expanded). The '|' regular expression operator tries matches from left to
+ # right, so we need to sort the names with the longest ones first.
+ names_descending_len = sorted(name_to_rev, key=len, reverse=True)
+ name_to_rev_re = "|".join(re.escape(name) for name in names_descending_len)
+ format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format)
+
+ if seenautoinc:
+ format = "AUTOINC+" + format
+
+ return format
+
+def localpath(url, d):
+ fetcher = bb.fetch2.Fetch([url], d)
+ return fetcher.localpath(url)
+
+def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
+ """
+ Run cmd returning the command output
+ Raise an error if interrupted or cmd fails
+ Optionally echo command output to stdout
+ Optionally remove the files/directories listed in cleanup upon failure
+ """
+
+ # Need to export PATH as binary could be in metadata paths
+ # rather than host provided
+ # Also include some other variables.
+ # FIXME: Should really include all export varaiables?
+ exportvars = ['HOME', 'PATH',
+ 'HTTP_PROXY', 'http_proxy',
+ 'HTTPS_PROXY', 'https_proxy',
+ 'FTP_PROXY', 'ftp_proxy',
+ 'FTPS_PROXY', 'ftps_proxy',
+ 'NO_PROXY', 'no_proxy',
+ 'ALL_PROXY', 'all_proxy',
+ 'GIT_PROXY_COMMAND',
+ 'GIT_SSL_CAINFO',
+ 'GIT_SMART_HTTP',
+ 'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
+ 'SOCKS5_USER', 'SOCKS5_PASSWD',
+ 'DBUS_SESSION_BUS_ADDRESS',
+ 'P4CONFIG']
+
+ if not cleanup:
+ cleanup = []
+
+ # If PATH contains WORKDIR which contains PV which contains SRCPV we
+ # can end up in circular recursion here so give the option of breaking it
+ # in a data store copy.
+ try:
+ d.getVar("PV")
+ except bb.data_smart.ExpansionError:
+ d = bb.data.createCopy(d)
+ d.setVar("PV", "fetcheravoidrecurse")
+
+ origenv = d.getVar("BB_ORIGENV", False)
+ for var in exportvars:
+ val = d.getVar(var) or (origenv and origenv.getVar(var))
+ if val:
+ cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
+
+ # Disable pseudo as it may affect ssh, potentially causing it to hang.
+ cmd = 'export PSEUDO_DISABLED=1; ' + cmd
+
+ logger.debug(1, "Running %s", cmd)
+
+ success = False
+ error_message = ""
+
+ try:
+ (output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir)
+ success = True
+ except bb.process.NotFoundError as e:
+ error_message = "Fetch command %s" % (e.command)
+ except bb.process.ExecutionError as e:
+ if e.stdout:
+ output = "output:\n%s\n%s" % (e.stdout, e.stderr)
+ elif e.stderr:
+ output = "output:\n%s" % e.stderr
+ else:
+ output = "no output"
+ error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output)
+ except bb.process.CmdError as e:
+ error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
+ if not success:
+ for f in cleanup:
+ try:
+ bb.utils.remove(f, True)
+ except OSError:
+ pass
+
+ raise FetchError(error_message)
+
+ return output
+
+def check_network_access(d, info, url):
+ """
+ log remote network access, and error if BB_NO_NETWORK is set or the given
+ URI is untrusted
+ """
+ if d.getVar("BB_NO_NETWORK") == "1":
+ raise NetworkAccess(url, info)
+ elif not trusted_network(d, url):
+ raise UntrustedUrl(url, info)
+ else:
+ logger.debug(1, "Fetcher accessed the network with the command %s" % info)
+
+def build_mirroruris(origud, mirrors, ld):
+ uris = []
+ uds = []
+
+ replacements = {}
+ replacements["TYPE"] = origud.type
+ replacements["HOST"] = origud.host
+ replacements["PATH"] = origud.path
+ replacements["BASENAME"] = origud.path.split("/")[-1]
+ replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.')
+
+ def adduri(ud, uris, uds, mirrors, tarballs):
+ for line in mirrors:
+ try:
+ (find, replace) = line
+ except ValueError:
+ continue
+
+ for tarball in tarballs:
+ newuri = uri_replace(ud, find, replace, replacements, ld, tarball)
+ if not newuri or newuri in uris or newuri == origud.url:
+ continue
+
+ if not trusted_network(ld, newuri):
+ logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
+ continue
+
+ # Create a local copy of the mirrors minus the current line
+ # this will prevent us from recursively processing the same line
+ # as well as indirect recursion A -> B -> C -> A
+ localmirrors = list(mirrors)
+ localmirrors.remove(line)
+
+ try:
+ newud = FetchData(newuri, ld)
+ newud.setup_localpath(ld)
+ except bb.fetch2.BBFetchException as e:
+ logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
+ logger.debug(1, str(e))
+ try:
+ # setup_localpath of file:// urls may fail, we should still see
+ # if mirrors of the url exist
+ adduri(newud, uris, uds, localmirrors, tarballs)
+ except UnboundLocalError:
+ pass
+ continue
+ uris.append(newuri)
+ uds.append(newud)
+
+ adduri(newud, uris, uds, localmirrors, tarballs)
+
+ adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None])
+
+ return uris, uds
+
+def rename_bad_checksum(ud, suffix):
+ """
+ Renames files to have suffix from parameter
+ """
+
+ if ud.localpath is None:
+ return
+
+ new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix)
+ bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath))
+ bb.utils.movefile(ud.localpath, new_localpath)
+
+
+def try_mirror_url(fetch, origud, ud, ld, check = False):
+ # Return of None or a value means we're finished
+ # False means try another url
+
+ if ud.lockfile and ud.lockfile != origud.lockfile:
+ lf = bb.utils.lockfile(ud.lockfile)
+
+ try:
+ if check:
+ found = ud.method.checkstatus(fetch, ud, ld)
+ if found:
+ return found
+ return False
+
+ if not verify_donestamp(ud, ld, origud) or ud.method.need_update(ud, ld):
+ ud.method.download(ud, ld)
+ if hasattr(ud.method,"build_mirror_data"):
+ ud.method.build_mirror_data(ud, ld)
+
+ if not ud.localpath or not os.path.exists(ud.localpath):
+ return False
+
+ if ud.localpath == origud.localpath:
+ return ud.localpath
+
+ # We may be obtaining a mirror tarball which needs further processing by the real fetcher
+ # If that tarball is a local file:// we need to provide a symlink to it
+ dldir = ld.getVar("DL_DIR")
+
+ if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
+ # Create donestamp in old format to avoid triggering a re-download
+ if ud.donestamp:
+ bb.utils.mkdirhier(os.path.dirname(ud.donestamp))
+ open(ud.donestamp, 'w').close()
+ dest = os.path.join(dldir, os.path.basename(ud.localpath))
+ if not os.path.exists(dest):
+ # In case this is executing without any file locks held (as is
+ # the case for file:// URLs), two tasks may end up here at the
+ # same time, in which case we do not want the second task to
+ # fail when the link has already been created by the first task.
+ try:
+ os.symlink(ud.localpath, dest)
+ except FileExistsError:
+ pass
+ if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld):
+ origud.method.download(origud, ld)
+ if hasattr(origud.method, "build_mirror_data"):
+ origud.method.build_mirror_data(origud, ld)
+ return origud.localpath
+ # Otherwise the result is a local file:// and we symlink to it
+ if not os.path.exists(origud.localpath):
+ if os.path.islink(origud.localpath):
+ # Broken symbolic link
+ os.unlink(origud.localpath)
+
+ # As per above, in case two tasks end up here simultaneously.
+ try:
+ os.symlink(ud.localpath, origud.localpath)
+ except FileExistsError:
+ pass
+ update_stamp(origud, ld)
+ return ud.localpath
+
+ except bb.fetch2.NetworkAccess:
+ raise
+
+ except IOError as e:
+ if e.errno in [os.errno.ESTALE]:
+ logger.warning("Stale Error Observed %s." % ud.url)
+ return False
+ raise
+
+ except bb.fetch2.BBFetchException as e:
+ if isinstance(e, ChecksumError):
+ logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url))
+ logger.warning(str(e))
+ if os.path.exists(ud.localpath):
+ rename_bad_checksum(ud, e.checksum)
+ elif isinstance(e, NoChecksumError):
+ raise
+ else:
+ logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
+ logger.debug(1, str(e))
+ try:
+ ud.method.clean(ud, ld)
+ except UnboundLocalError:
+ pass
+ return False
+ finally:
+ if ud.lockfile and ud.lockfile != origud.lockfile:
+ bb.utils.unlockfile(lf)
+
+
+def try_mirrors(fetch, d, origud, mirrors, check = False):
+ """
+ Try to use a mirrored version of the sources.
+ This method will be automatically called before the fetchers go.
+
+ d Is a bb.data instance
+ uri is the original uri we're trying to download
+ mirrors is the list of mirrors we're going to try
+ """
+ ld = d.createCopy()
+
+ uris, uds = build_mirroruris(origud, mirrors, ld)
+
+ for index, uri in enumerate(uris):
+ ret = try_mirror_url(fetch, origud, uds[index], ld, check)
+ if ret != False:
+ return ret
+ return None
+
+def trusted_network(d, url):
+ """
+ Use a trusted url during download if networking is enabled and
+ BB_ALLOWED_NETWORKS is set globally or for a specific recipe.
+ Note: modifies SRC_URI & mirrors.
+ """
+ if d.getVar('BB_NO_NETWORK') == "1":
+ return True
+
+ pkgname = d.expand(d.getVar('PN', False))
+ trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
+
+ if not trusted_hosts:
+ trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS')
+
+ # Not enabled.
+ if not trusted_hosts:
+ return True
+
+ scheme, network, path, user, passwd, param = decodeurl(url)
+
+ if not network:
+ return True
+
+ network = network.split(':')[0]
+ network = network.lower()
+
+ for host in trusted_hosts.split(" "):
+ host = host.lower()
+ if host.startswith("*.") and ("." + network).endswith(host[1:]):
+ return True
+ if host == network:
+ return True
+
+ return False
+
+def srcrev_internal_helper(ud, d, name):
+ """
+ Return:
+ a) a source revision if specified
+ b) latest revision if SRCREV="AUTOINC"
+ c) None if not specified
+ """
+
+ srcrev = None
+ pn = d.getVar("PN")
+ attempts = []
+ if name != '' and pn:
+ attempts.append("SRCREV_%s_pn-%s" % (name, pn))
+ if name != '':
+ attempts.append("SRCREV_%s" % name)
+ if pn:
+ attempts.append("SRCREV_pn-%s" % pn)
+ attempts.append("SRCREV")
+
+ for a in attempts:
+ srcrev = d.getVar(a)
+ if srcrev and srcrev != "INVALID":
+ break
+
+ if 'rev' in ud.parm and 'tag' in ud.parm:
+ raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url))
+
+ if 'rev' in ud.parm or 'tag' in ud.parm:
+ if 'rev' in ud.parm:
+ parmrev = ud.parm['rev']
+ else:
+ parmrev = ud.parm['tag']
+ if srcrev == "INVALID" or not srcrev:
+ return parmrev
+ if srcrev != parmrev:
+ raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
+ return parmrev
+
+ if srcrev == "INVALID" or not srcrev:
+ raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url)
+ if srcrev == "AUTOINC":
+ srcrev = ud.method.latest_revision(ud, d, name)
+
+ return srcrev
+
+def get_checksum_file_list(d):
+ """ Get a list of files checksum in SRC_URI
+
+ Returns the resolved local paths of all local file entries in
+ SRC_URI as a space-separated string
+ """
+ fetch = Fetch([], d, cache = False, localonly = True)
+
+ dl_dir = d.getVar('DL_DIR')
+ filelist = []
+ for u in fetch.urls:
+ ud = fetch.ud[u]
+
+ if ud and isinstance(ud.method, local.Local):
+ paths = ud.method.localpaths(ud, d)
+ for f in paths:
+ pth = ud.decodedurl
+ if '*' in pth:
+ f = os.path.join(os.path.abspath(f), pth)
+ if f.startswith(dl_dir):
+ # The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
+ if os.path.exists(f):
+ bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
+ else:
+ bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
+ filelist.append(f + ":" + str(os.path.exists(f)))
+
+ return " ".join(filelist)
+
+def get_file_checksums(filelist, pn):
+ """Get a list of the checksums for a list of local files
+
+ Returns the checksums for a list of local files, caching the results as
+ it proceeds
+
+ """
+ return _checksum_cache.get_checksums(filelist, pn)
+
+
+class FetchData(object):
+ """
+ A class which represents the fetcher state for a given URI.
+ """
+ def __init__(self, url, d, localonly = False):
+ # localpath is the location of a downloaded result. If not set, the file is local.
+ self.donestamp = None
+ self.needdonestamp = True
+ self.localfile = ""
+ self.localpath = None
+ self.lockfile = None
+ self.mirrortarballs = []
+ self.basename = None
+ self.basepath = None
+ (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url))
+ self.date = self.getSRCDate(d)
+ self.url = url
+ if not self.user and "user" in self.parm:
+ self.user = self.parm["user"]
+ if not self.pswd and "pswd" in self.parm:
+ self.pswd = self.parm["pswd"]
+ self.setup = False
+
+ if "name" in self.parm:
+ self.md5_name = "%s.md5sum" % self.parm["name"]
+ self.sha256_name = "%s.sha256sum" % self.parm["name"]
+ else:
+ self.md5_name = "md5sum"
+ self.sha256_name = "sha256sum"
+ if self.md5_name in self.parm:
+ self.md5_expected = self.parm[self.md5_name]
+ elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
+ self.md5_expected = None
+ else:
+ self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name)
+ if self.sha256_name in self.parm:
+ self.sha256_expected = self.parm[self.sha256_name]
+ elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
+ self.sha256_expected = None
+ else:
+ self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name)
+ self.ignore_checksums = False
+
+ self.names = self.parm.get("name",'default').split(',')
+
+ self.method = None
+ for m in methods:
+ if m.supports(self, d):
+ self.method = m
+ break
+
+ if not self.method:
+ raise NoMethodError(url)
+
+ if localonly and not isinstance(self.method, local.Local):
+ raise NonLocalMethod()
+
+ if self.parm.get("proto", None) and "protocol" not in self.parm:
+ logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN'))
+ self.parm["protocol"] = self.parm.get("proto", None)
+
+ if hasattr(self.method, "urldata_init"):
+ self.method.urldata_init(self, d)
+
+ if "localpath" in self.parm:
+ # if user sets localpath for file, use it instead.
+ self.localpath = self.parm["localpath"]
+ self.basename = os.path.basename(self.localpath)
+ elif self.localfile:
+ self.localpath = self.method.localpath(self, d)
+
+ dldir = d.getVar("DL_DIR")
+
+ if not self.needdonestamp:
+ return
+
+ # Note: .done and .lock files should always be in DL_DIR whereas localpath may not be.
+ if self.localpath and self.localpath.startswith(dldir):
+ basepath = self.localpath
+ elif self.localpath:
+ basepath = dldir + os.sep + os.path.basename(self.localpath)
+ elif self.basepath or self.basename:
+ basepath = dldir + os.sep + (self.basepath or self.basename)
+ else:
+ bb.fatal("Can't determine lock path for url %s" % url)
+
+ self.donestamp = basepath + '.done'
+ self.lockfile = basepath + '.lock'
+
+ def setup_revisions(self, d):
+ self.revisions = {}
+ for name in self.names:
+ self.revisions[name] = srcrev_internal_helper(self, d, name)
+
+ # add compatibility code for non name specified case
+ if len(self.names) == 1:
+ self.revision = self.revisions[self.names[0]]
+
+ def setup_localpath(self, d):
+ if not self.localpath:
+ self.localpath = self.method.localpath(self, d)
+
+ def getSRCDate(self, d):
+ """
+ Return the SRC Date for the component
+
+ d the bb.data module
+ """
+ if "srcdate" in self.parm:
+ return self.parm['srcdate']
+
+ pn = d.getVar("PN")
+
+ if pn:
+ return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE")
+
+ return d.getVar("SRCDATE") or d.getVar("DATE")
+
+class FetchMethod(object):
+ """Base class for 'fetch'ing data"""
+
+ def __init__(self, urls=None):
+ self.urls = []
+
+ def supports(self, urldata, d):
+ """
+ Check to see if this fetch class supports a given url.
+ """
+ return 0
+
+ def localpath(self, urldata, d):
+ """
+ Return the local filename of a given url assuming a successful fetch.
+ Can also setup variables in urldata for use in go (saving code duplication
+ and duplicate code execution)
+ """
+ return os.path.join(d.getVar("DL_DIR"), urldata.localfile)
+
+ def supports_checksum(self, urldata):
+ """
+ Is localpath something that can be represented by a checksum?
+ """
+
+ # We cannot compute checksums for directories
+ if os.path.isdir(urldata.localpath) == True:
+ return False
+ if urldata.localpath.find("*") != -1:
+ return False
+
+ return True
+
+ def recommends_checksum(self, urldata):
+ """
+ Is the backend on where checksumming is recommended (should warnings
+ be displayed if there is no checksum)?
+ """
+ return False
+
+ def _strip_leading_slashes(self, relpath):
+ """
+ Remove leading slash as os.path.join can't cope
+ """
+ while os.path.isabs(relpath):
+ relpath = relpath[1:]
+ return relpath
+
+ def setUrls(self, urls):
+ self.__urls = urls
+
+ def getUrls(self):
+ return self.__urls
+
+ urls = property(getUrls, setUrls, None, "Urls property")
+
+ def need_update(self, ud, d):
+ """
+ Force a fetch, even if localpath exists?
+ """
+ if os.path.exists(ud.localpath):
+ return False
+ return True
+
+ def supports_srcrev(self):
+ """
+ The fetcher supports auto source revisions (SRCREV)
+ """
+ return False
+
+ def download(self, urldata, d):
+ """
+ Fetch urls
+ Assumes localpath was called first
+ """
+ raise NoMethodError(url)
+
+ def unpack(self, urldata, rootdir, data):
+ iterate = False
+ file = urldata.localpath
+
+ # Localpath can't deal with 'dir/*' entries, so it converts them to '.',
+ # but it must be corrected back for local files copying
+ if urldata.basename == '*' and file.endswith('/.'):
+ file = '%s/%s' % (file.rstrip('/.'), urldata.path)
+
+ try:
+ unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True)
+ except ValueError as exc:
+ bb.fatal("Invalid value for 'unpack' parameter for %s: %s" %
+ (file, urldata.parm.get('unpack')))
+
+ base, ext = os.path.splitext(file)
+ if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']:
+ efile = os.path.join(rootdir, os.path.basename(base))
+ else:
+ efile = file
+ cmd = None
+
+ if unpack:
+ if file.endswith('.tar'):
+ cmd = 'tar x --no-same-owner -f %s' % file
+ elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
+ cmd = 'tar xz --no-same-owner -f %s' % file
+ elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'):
+ cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
+ elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
+ cmd = 'gzip -dc %s > %s' % (file, efile)
+ elif file.endswith('.bz2'):
+ cmd = 'bzip2 -dc %s > %s' % (file, efile)
+ elif file.endswith('.txz') or file.endswith('.tar.xz'):
+ cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
+ elif file.endswith('.xz'):
+ cmd = 'xz -dc %s > %s' % (file, efile)
+ elif file.endswith('.tar.lz'):
+ cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file
+ elif file.endswith('.lz'):
+ cmd = 'lzip -dc %s > %s' % (file, efile)
+ elif file.endswith('.tar.7z'):
+ cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
+ elif file.endswith('.7z'):
+ cmd = '7za x -y %s 1>/dev/null' % file
+ elif file.endswith('.zip') or file.endswith('.jar'):
+ try:
+ dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
+ except ValueError as exc:
+ bb.fatal("Invalid value for 'dos' parameter for %s: %s" %
+ (file, urldata.parm.get('dos')))
+ cmd = 'unzip -q -o'
+ if dos:
+ cmd = '%s -a' % cmd
+ cmd = "%s '%s'" % (cmd, file)
+ elif file.endswith('.rpm') or file.endswith('.srpm'):
+ if 'extract' in urldata.parm:
+ unpack_file = urldata.parm.get('extract')
+ cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file)
+ iterate = True
+ iterate_file = unpack_file
+ else:
+ cmd = 'rpm2cpio.sh %s | cpio -id' % (file)
+ elif file.endswith('.deb') or file.endswith('.ipk'):
+ output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True)
+ datafile = None
+ if output:
+ for line in output.decode().splitlines():
+ if line.startswith('data.tar.'):
+ datafile = line
+ break
+ else:
+ raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar.* file", urldata.url)
+ else:
+ raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url)
+ cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile)
+
+ # If 'subdir' param exists, create a dir and use it as destination for unpack cmd
+ if 'subdir' in urldata.parm:
+ subdir = urldata.parm.get('subdir')
+ if os.path.isabs(subdir):
+ if not os.path.realpath(subdir).startswith(os.path.realpath(rootdir)):
+ raise UnpackError("subdir argument isn't a subdirectory of unpack root %s" % rootdir, urldata.url)
+ unpackdir = subdir
+ else:
+ unpackdir = os.path.join(rootdir, subdir)
+ bb.utils.mkdirhier(unpackdir)
+ else:
+ unpackdir = rootdir
+
+ if not unpack or not cmd:
+ # If file == dest, then avoid any copies, as we already put the file into dest!
+ dest = os.path.join(unpackdir, os.path.basename(file))
+ if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)):
+ destdir = '.'
+ # For file:// entries all intermediate dirs in path must be created at destination
+ if urldata.type == "file":
+ # Trailing '/' does a copying to wrong place
+ urlpath = urldata.path.rstrip('/')
+ # Want files places relative to cwd so no leading '/'
+ urlpath = urlpath.lstrip('/')
+ if urlpath.find("/") != -1:
+ destdir = urlpath.rsplit("/", 1)[0] + '/'
+ bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
+ cmd = 'cp -fpPRH %s %s' % (file, destdir)
+
+ if not cmd:
+ return
+
+ path = data.getVar('PATH')
+ if path:
+ cmd = "PATH=\"%s\" %s" % (path, cmd)
+ bb.note("Unpacking %s to %s/" % (file, unpackdir))
+ ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=unpackdir)
+
+ if ret != 0:
+ raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url)
+
+ if iterate is True:
+ iterate_urldata = urldata
+ iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file)
+ self.unpack(urldata, rootdir, data)
+
+ return
+
+ def clean(self, urldata, d):
+ """
+ Clean any existing full or partial download
+ """
+ bb.utils.remove(urldata.localpath)
+
+ def try_premirror(self, urldata, d):
+ """
+ Should premirrors be used?
+ """
+ return True
+
+ def checkstatus(self, fetch, urldata, d):
+ """
+ Check the status of a URL
+ Assumes localpath was called first
+ """
+ logger.info("URL %s could not be checked for status since no method exists.", url)
+ return True
+
+ def latest_revision(self, ud, d, name):
+ """
+ Look in the cache for the latest revision, if not present ask the SCM.
+ """
+ if not hasattr(self, "_latest_revision"):
+ raise ParameterError("The fetcher for this URL does not support _latest_revision", url)
+
+ revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
+ key = self.generate_revision_key(ud, d, name)
+ try:
+ return revs[key]
+ except KeyError:
+ revs[key] = rev = self._latest_revision(ud, d, name)
+ return rev
+
+ def sortable_revision(self, ud, d, name):
+ latest_rev = self._build_revision(ud, d, name)
+ return True, str(latest_rev)
+
+ def generate_revision_key(self, ud, d, name):
+ key = self._revision_key(ud, d, name)
+ return "%s-%s" % (key, d.getVar("PN") or "")
+
+ def latest_versionstring(self, ud, d):
+ """
+ Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
+ by searching through the tags output of ls-remote, comparing
+ versions and returning the highest match as a (version, revision) pair.
+ """
+ return ('', '')
+
+class Fetch(object):
+ def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
+ if localonly and cache:
+ raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
+
+ if len(urls) == 0:
+ urls = d.getVar("SRC_URI").split()
+ self.urls = urls
+ self.d = d
+ self.ud = {}
+ self.connection_cache = connection_cache
+
+ fn = d.getVar('FILE')
+ mc = d.getVar('__BBMULTICONFIG') or ""
+ if cache and fn and mc + fn in urldata_cache:
+ self.ud = urldata_cache[mc + fn]
+
+ for url in urls:
+ if url not in self.ud:
+ try:
+ self.ud[url] = FetchData(url, d, localonly)
+ except NonLocalMethod:
+ if localonly:
+ self.ud[url] = None
+ pass
+
+ if fn and cache:
+ urldata_cache[mc + fn] = self.ud
+
+ def localpath(self, url):
+ if url not in self.urls:
+ self.ud[url] = FetchData(url, self.d)
+
+ self.ud[url].setup_localpath(self.d)
+ return self.d.expand(self.ud[url].localpath)
+
+ def localpaths(self):
+ """
+ Return a list of the local filenames, assuming successful fetch
+ """
+ local = []
+
+ for u in self.urls:
+ ud = self.ud[u]
+ ud.setup_localpath(self.d)
+ local.append(ud.localpath)
+
+ return local
+
+ def download(self, urls=None):
+ """
+ Fetch all urls
+ """
+ if not urls:
+ urls = self.urls
+
+ network = self.d.getVar("BB_NO_NETWORK")
+ premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1")
+
+ for u in urls:
+ ud = self.ud[u]
+ ud.setup_localpath(self.d)
+ m = ud.method
+ localpath = ""
+
+ if ud.lockfile:
+ lf = bb.utils.lockfile(ud.lockfile)
+
+ try:
+ self.d.setVar("BB_NO_NETWORK", network)
+
+ if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
+ localpath = ud.localpath
+ elif m.try_premirror(ud, self.d):
+ logger.debug(1, "Trying PREMIRRORS")
+ mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
+ localpath = try_mirrors(self, self.d, ud, mirrors, False)
+ if localpath:
+ try:
+ # early checksum verification so that if the checksum of the premirror
+ # contents mismatch the fetcher can still try upstream and mirrors
+ update_stamp(ud, self.d)
+ except ChecksumError as e:
+ logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
+ logger.debug(1, str(e))
+ localpath = ""
+
+ if premirroronly:
+ self.d.setVar("BB_NO_NETWORK", "1")
+
+ firsterr = None
+ verified_stamp = verify_donestamp(ud, self.d)
+ if not localpath and (not verified_stamp or m.need_update(ud, self.d)):
+ try:
+ if not trusted_network(self.d, ud.url):
+ raise UntrustedUrl(ud.url)
+ logger.debug(1, "Trying Upstream")
+ m.download(ud, self.d)
+ if hasattr(m, "build_mirror_data"):
+ m.build_mirror_data(ud, self.d)
+ localpath = ud.localpath
+ # early checksum verify, so that if checksum mismatched,
+ # fetcher still have chance to fetch from mirror
+ update_stamp(ud, self.d)
+
+ except bb.fetch2.NetworkAccess:
+ raise
+
+ except BBFetchException as e:
+ if isinstance(e, ChecksumError):
+ logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u)
+ logger.debug(1, str(e))
+ if os.path.exists(ud.localpath):
+ rename_bad_checksum(ud, e.checksum)
+ elif isinstance(e, NoChecksumError):
+ raise
+ else:
+ logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u)
+ logger.debug(1, str(e))
+ firsterr = e
+ # Remove any incomplete fetch
+ if not verified_stamp:
+ m.clean(ud, self.d)
+ logger.debug(1, "Trying MIRRORS")
+ mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
+ localpath = try_mirrors(self, self.d, ud, mirrors)
+
+ if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1):
+ if firsterr:
+ logger.error(str(firsterr))
+ raise FetchError("Unable to fetch URL from any source.", u)
+
+ update_stamp(ud, self.d)
+
+ except IOError as e:
+ if e.errno in [os.errno.ESTALE]:
+ logger.error("Stale Error Observed %s." % u)
+ raise ChecksumError("Stale Error Detected")
+
+ except BBFetchException as e:
+ if isinstance(e, ChecksumError):
+ logger.error("Checksum failure fetching %s" % u)
+ raise
+
+ finally:
+ if ud.lockfile:
+ bb.utils.unlockfile(lf)
+
+ def checkstatus(self, urls=None):
+ """
+ Check all urls exist upstream
+ """
+
+ if not urls:
+ urls = self.urls
+
+ for u in urls:
+ ud = self.ud[u]
+ ud.setup_localpath(self.d)
+ m = ud.method
+ logger.debug(1, "Testing URL %s", u)
+ # First try checking uri, u, from PREMIRRORS
+ mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
+ ret = try_mirrors(self, self.d, ud, mirrors, True)
+ if not ret:
+ # Next try checking from the original uri, u
+ ret = m.checkstatus(self, ud, self.d)
+ if not ret:
+ # Finally, try checking uri, u, from MIRRORS
+ mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
+ ret = try_mirrors(self, self.d, ud, mirrors, True)
+
+ if not ret:
+ raise FetchError("URL %s doesn't work" % u, u)
+
+ def unpack(self, root, urls=None):
+ """
+ Unpack urls to root
+ """
+
+ if not urls:
+ urls = self.urls
+
+ for u in urls:
+ ud = self.ud[u]
+ ud.setup_localpath(self.d)
+
+ if ud.lockfile:
+ lf = bb.utils.lockfile(ud.lockfile)
+
+ ud.method.unpack(ud, root, self.d)
+
+ if ud.lockfile:
+ bb.utils.unlockfile(lf)
+
+ def clean(self, urls=None):
+ """
+ Clean files that the fetcher gets or places
+ """
+
+ if not urls:
+ urls = self.urls
+
+ for url in urls:
+ if url not in self.ud:
+ self.ud[url] = FetchData(url, d)
+ ud = self.ud[url]
+ ud.setup_localpath(self.d)
+
+ if not ud.localfile and ud.localpath is None:
+ continue
+
+ if ud.lockfile:
+ lf = bb.utils.lockfile(ud.lockfile)
+
+ ud.method.clean(ud, self.d)
+ if ud.donestamp:
+ bb.utils.remove(ud.donestamp)
+
+ if ud.lockfile:
+ bb.utils.unlockfile(lf)
+
+class FetchConnectionCache(object):
+ """
+ A class which represents an container for socket connections.
+ """
+ def __init__(self):
+ self.cache = {}
+
+ def get_connection_name(self, host, port):
+ return host + ':' + str(port)
+
+ def add_connection(self, host, port, connection):
+ cn = self.get_connection_name(host, port)
+
+ if cn not in self.cache:
+ self.cache[cn] = connection
+
+ def get_connection(self, host, port):
+ connection = None
+
+ cn = self.get_connection_name(host, port)
+ if cn in self.cache:
+ connection = self.cache[cn]
+
+ return connection
+
+ def remove_connection(self, host, port):
+ cn = self.get_connection_name(host, port)
+ if cn in self.cache:
+ self.cache[cn].close()
+ del self.cache[cn]
+
+ def close_connections(self):
+ for cn in list(self.cache.keys()):
+ self.cache[cn].close()
+ del self.cache[cn]
+
+from . import cvs
+from . import git
+from . import gitsm
+from . import gitannex
+from . import local
+from . import svn
+from . import wget
+from . import ssh
+from . import sftp
+from . import s3
+from . import perforce
+from . import bzr
+from . import hg
+from . import osc
+from . import repo
+from . import clearcase
+from . import npm
+
+methods.append(local.Local())
+methods.append(wget.Wget())
+methods.append(svn.Svn())
+methods.append(git.Git())
+methods.append(gitsm.GitSM())
+methods.append(gitannex.GitANNEX())
+methods.append(cvs.Cvs())
+methods.append(ssh.SSH())
+methods.append(sftp.SFTP())
+methods.append(s3.S3())
+methods.append(perforce.Perforce())
+methods.append(bzr.Bzr())
+methods.append(hg.Hg())
+methods.append(osc.Osc())
+methods.append(repo.Repo())
+methods.append(clearcase.ClearCase())
+methods.append(npm.Npm())
diff --git a/poky/bitbake/lib/bb/fetch2/bzr.py b/poky/bitbake/lib/bb/fetch2/bzr.py
new file mode 100644
index 000000000..16123f8af
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/bzr.py
@@ -0,0 +1,139 @@
+"""
+BitBake 'Fetch' implementation for bzr.
+
+"""
+
+# Copyright (C) 2007 Ross Burton
+# Copyright (C) 2007 Richard Purdie
+#
+# Classes for obtaining upstream sources for the
+# BitBake build tools.
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+import logging
+import bb
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+class Bzr(FetchMethod):
+ def supports(self, ud, d):
+ return ud.type in ['bzr']
+
+ def urldata_init(self, ud, d):
+ """
+ init bzr specific variable within url data
+ """
+ # Create paths to bzr checkouts
+ relpath = self._strip_leading_slashes(ud.path)
+ ud.pkgdir = os.path.join(d.expand('${BZRDIR}'), ud.host, relpath)
+
+ ud.setup_revisions(d)
+
+ if not ud.revision:
+ ud.revision = self.latest_revision(ud, d)
+
+ ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision))
+
+ def _buildbzrcommand(self, ud, d, command):
+ """
+ Build up an bzr commandline based on ud
+ command is "fetch", "update", "revno"
+ """
+
+ basecmd = d.expand('${FETCHCMD_bzr}')
+
+ proto = ud.parm.get('protocol', 'http')
+
+ bzrroot = ud.host + ud.path
+
+ options = []
+
+ if command == "revno":
+ bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
+ else:
+ if ud.revision:
+ options.append("-r %s" % ud.revision)
+
+ if command == "fetch":
+ bzrcmd = "%s branch %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
+ elif command == "update":
+ bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
+ else:
+ raise FetchError("Invalid bzr command %s" % command, ud.url)
+
+ return bzrcmd
+
+ def download(self, ud, d):
+ """Fetch url"""
+
+ if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
+ bzrcmd = self._buildbzrcommand(ud, d, "update")
+ logger.debug(1, "BZR Update %s", ud.url)
+ bb.fetch2.check_network_access(d, bzrcmd, ud.url)
+ runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path)))
+ else:
+ bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
+ bzrcmd = self._buildbzrcommand(ud, d, "fetch")
+ bb.fetch2.check_network_access(d, bzrcmd, ud.url)
+ logger.debug(1, "BZR Checkout %s", ud.url)
+ bb.utils.mkdirhier(ud.pkgdir)
+ logger.debug(1, "Running %s", bzrcmd)
+ runfetchcmd(bzrcmd, d, workdir=ud.pkgdir)
+
+ scmdata = ud.parm.get("scmdata", "")
+ if scmdata == "keep":
+ tar_flags = ""
+ else:
+ tar_flags = "--exclude='.bzr' --exclude='.bzrtags'"
+
+ # tar them up to a defined filename
+ runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)),
+ d, cleanup=[ud.localpath], workdir=ud.pkgdir)
+
+ def supports_srcrev(self):
+ return True
+
+ def _revision_key(self, ud, d, name):
+ """
+ Return a unique key for the url
+ """
+ return "bzr:" + ud.pkgdir
+
+ def _latest_revision(self, ud, d, name):
+ """
+ Return the latest upstream revision number
+ """
+ logger.debug(2, "BZR fetcher hitting network for %s", ud.url)
+
+ bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url)
+
+ output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True)
+
+ return output.strip()
+
+ def sortable_revision(self, ud, d, name):
+ """
+ Return a sortable revision number which in our case is the revision number
+ """
+
+ return False, self._build_revision(ud, d)
+
+ def _build_revision(self, ud, d):
+ return ud.revision
diff --git a/poky/bitbake/lib/bb/fetch2/clearcase.py b/poky/bitbake/lib/bb/fetch2/clearcase.py
new file mode 100644
index 000000000..36beab6a5
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/clearcase.py
@@ -0,0 +1,260 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' clearcase implementation
+
+The clearcase fetcher is used to retrieve files from a ClearCase repository.
+
+Usage in the recipe:
+
+ SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module"
+ SRCREV = "EXAMPLE_CLEARCASE_TAG"
+ PV = "${@d.getVar("SRCREV", False).replace("/", "+")}"
+
+The fetcher uses the rcleartool or cleartool remote client, depending on which one is available.
+
+Supported SRC_URI options are:
+
+- vob
+ (required) The name of the clearcase VOB (with prepending "/")
+
+- module
+ The module in the selected VOB (with prepending "/")
+
+ The module and vob parameters are combined to create
+ the following load rule in the view config spec:
+ load <vob><module>
+
+- proto
+ http or https
+
+Related variables:
+
+ CCASE_CUSTOM_CONFIG_SPEC
+ Write a config spec to this variable in your recipe to use it instead
+ of the default config spec generated by this fetcher.
+ Please note that the SRCREV loses its functionality if you specify
+ this variable. SRCREV is still used to label the archive after a fetch,
+ but it doesn't define what's fetched.
+
+User credentials:
+ cleartool:
+ The login of cleartool is handled by the system. No special steps needed.
+
+ rcleartool:
+ In order to use rcleartool with authenticated users an `rcleartool login` is
+ necessary before using the fetcher.
+"""
+# Copyright (C) 2014 Siemens AG
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import os
+import sys
+import shutil
+import bb
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+from distutils import spawn
+
+class ClearCase(FetchMethod):
+ """Class to fetch urls via 'clearcase'"""
+ def init(self, d):
+ pass
+
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with Clearcase.
+ """
+ return ud.type in ['ccrc']
+
+ def debug(self, msg):
+ logger.debug(1, "ClearCase: %s", msg)
+
+ def urldata_init(self, ud, d):
+ """
+ init ClearCase specific variable within url data
+ """
+ ud.proto = "https"
+ if 'protocol' in ud.parm:
+ ud.proto = ud.parm['protocol']
+ if not ud.proto in ('http', 'https'):
+ raise fetch2.ParameterError("Invalid protocol type", ud.url)
+
+ ud.vob = ''
+ if 'vob' in ud.parm:
+ ud.vob = ud.parm['vob']
+ else:
+ msg = ud.url+": vob must be defined so the fetcher knows what to get."
+ raise MissingParameterError('vob', msg)
+
+ if 'module' in ud.parm:
+ ud.module = ud.parm['module']
+ else:
+ ud.module = ""
+
+ ud.basecmd = d.getVar("FETCHCMD_ccrc") or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
+
+ if d.getVar("SRCREV") == "INVALID":
+ raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.")
+
+ ud.label = d.getVar("SRCREV", False)
+ ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC")
+
+ ud.server = "%s://%s%s" % (ud.proto, ud.host, ud.path)
+
+ ud.identifier = "clearcase-%s%s-%s" % ( ud.vob.replace("/", ""),
+ ud.module.replace("/", "."),
+ ud.label.replace("/", "."))
+
+ ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
+ ud.csname = "%s-config-spec" % (ud.identifier)
+ ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
+ ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
+ ud.configspecfile = os.path.join(ud.ccasedir, ud.csname)
+ ud.localfile = "%s.tar.gz" % (ud.identifier)
+
+ self.debug("host = %s" % ud.host)
+ self.debug("path = %s" % ud.path)
+ self.debug("server = %s" % ud.server)
+ self.debug("proto = %s" % ud.proto)
+ self.debug("type = %s" % ud.type)
+ self.debug("vob = %s" % ud.vob)
+ self.debug("module = %s" % ud.module)
+ self.debug("basecmd = %s" % ud.basecmd)
+ self.debug("label = %s" % ud.label)
+ self.debug("ccasedir = %s" % ud.ccasedir)
+ self.debug("viewdir = %s" % ud.viewdir)
+ self.debug("viewname = %s" % ud.viewname)
+ self.debug("configspecfile = %s" % ud.configspecfile)
+ self.debug("localfile = %s" % ud.localfile)
+
+ ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
+
+ def _build_ccase_command(self, ud, command):
+ """
+ Build up a commandline based on ud
+ command is: mkview, setcs, rmview
+ """
+ options = []
+
+ if "rcleartool" in ud.basecmd:
+ options.append("-server %s" % ud.server)
+
+ basecmd = "%s %s" % (ud.basecmd, command)
+
+ if command is 'mkview':
+ if not "rcleartool" in ud.basecmd:
+ # Cleartool needs a -snapshot view
+ options.append("-snapshot")
+ options.append("-tag %s" % ud.viewname)
+ options.append(ud.viewdir)
+
+ elif command is 'rmview':
+ options.append("-force")
+ options.append("%s" % ud.viewdir)
+
+ elif command is 'setcs':
+ options.append("-overwrite")
+ options.append(ud.configspecfile)
+
+ else:
+ raise FetchError("Invalid ccase command %s" % command)
+
+ ccasecmd = "%s %s" % (basecmd, " ".join(options))
+ self.debug("ccasecmd = %s" % ccasecmd)
+ return ccasecmd
+
+ def _write_configspec(self, ud, d):
+ """
+ Create config spec file (ud.configspecfile) for ccase view
+ """
+ config_spec = ""
+ custom_config_spec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", d)
+ if custom_config_spec is not None:
+ for line in custom_config_spec.split("\\n"):
+ config_spec += line+"\n"
+ bb.warn("A custom config spec has been set, SRCREV is only relevant for the tarball name.")
+ else:
+ config_spec += "element * CHECKEDOUT\n"
+ config_spec += "element * %s\n" % ud.label
+ config_spec += "load %s%s\n" % (ud.vob, ud.module)
+
+ logger.info("Using config spec: \n%s" % config_spec)
+
+ with open(ud.configspecfile, 'w') as f:
+ f.write(config_spec)
+
+ def _remove_view(self, ud, d):
+ if os.path.exists(ud.viewdir):
+ cmd = self._build_ccase_command(ud, 'rmview');
+ logger.info("cleaning up [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname)
+ bb.fetch2.check_network_access(d, cmd, ud.url)
+ output = runfetchcmd(cmd, d, workdir=ud.ccasedir)
+ logger.info("rmview output: %s", output)
+
+ def need_update(self, ud, d):
+ if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec):
+ ud.identifier += "-%s" % d.getVar("DATETIME",d, True)
+ return True
+ if os.path.exists(ud.localpath):
+ return False
+ return True
+
+ def supports_srcrev(self):
+ return True
+
+ def sortable_revision(self, ud, d, name):
+ return False, ud.identifier
+
+ def download(self, ud, d):
+ """Fetch url"""
+
+ # Make a fresh view
+ bb.utils.mkdirhier(ud.ccasedir)
+ self._write_configspec(ud, d)
+ cmd = self._build_ccase_command(ud, 'mkview')
+ logger.info("creating view [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname)
+ bb.fetch2.check_network_access(d, cmd, ud.url)
+ try:
+ runfetchcmd(cmd, d)
+ except FetchError as e:
+ if "CRCLI2008E" in e.msg:
+ raise FetchError("%s\n%s\n" % (e.msg, "Call `rcleartool login` in your console to authenticate to the clearcase server before running bitbake."))
+ else:
+ raise e
+
+ # Set configspec: Setting the configspec effectively fetches the files as defined in the configspec
+ cmd = self._build_ccase_command(ud, 'setcs');
+ logger.info("fetching data [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname)
+ bb.fetch2.check_network_access(d, cmd, ud.url)
+ output = runfetchcmd(cmd, d, workdir=ud.viewdir)
+ logger.info("%s", output)
+
+ # Copy the configspec to the viewdir so we have it in our source tarball later
+ shutil.copyfile(ud.configspecfile, os.path.join(ud.viewdir, ud.csname))
+
+ # Clean clearcase meta-data before tar
+
+ runfetchcmd('tar -czf "%s" .' % (ud.localpath), d, cleanup = [ud.localpath])
+
+ # Clean up so we can create a new view next time
+ self.clean(ud, d);
+
+ def clean(self, ud, d):
+ self._remove_view(ud, d)
+ bb.utils.remove(ud.configspecfile)
diff --git a/poky/bitbake/lib/bb/fetch2/cvs.py b/poky/bitbake/lib/bb/fetch2/cvs.py
new file mode 100644
index 000000000..490c95471
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/cvs.py
@@ -0,0 +1,172 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#Based on functions from the base bb module, Copyright 2003 Holger Schurig
+#
+
+import os
+import logging
+import bb
+from bb.fetch2 import FetchMethod, FetchError, MissingParameterError, logger
+from bb.fetch2 import runfetchcmd
+
+class Cvs(FetchMethod):
+ """
+ Class to fetch a module or modules from cvs repositories
+ """
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with cvs.
+ """
+ return ud.type in ['cvs']
+
+ def urldata_init(self, ud, d):
+ if not "module" in ud.parm:
+ raise MissingParameterError("module", ud.url)
+ ud.module = ud.parm["module"]
+
+ ud.tag = ud.parm.get('tag', "")
+
+ # Override the default date in certain cases
+ if 'date' in ud.parm:
+ ud.date = ud.parm['date']
+ elif ud.tag:
+ ud.date = ""
+
+ norecurse = ''
+ if 'norecurse' in ud.parm:
+ norecurse = '_norecurse'
+
+ fullpath = ''
+ if 'fullpath' in ud.parm:
+ fullpath = '_fullpath'
+
+ ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath))
+
+ def need_update(self, ud, d):
+ if (ud.date == "now"):
+ return True
+ if not os.path.exists(ud.localpath):
+ return True
+ return False
+
+ def download(self, ud, d):
+
+ method = ud.parm.get('method', 'pserver')
+ localdir = ud.parm.get('localdir', ud.module)
+ cvs_port = ud.parm.get('port', '')
+
+ cvs_rsh = None
+ if method == "ext":
+ if "rsh" in ud.parm:
+ cvs_rsh = ud.parm["rsh"]
+
+ if method == "dir":
+ cvsroot = ud.path
+ else:
+ cvsroot = ":" + method
+ cvsproxyhost = d.getVar('CVS_PROXY_HOST')
+ if cvsproxyhost:
+ cvsroot += ";proxy=" + cvsproxyhost
+ cvsproxyport = d.getVar('CVS_PROXY_PORT')
+ if cvsproxyport:
+ cvsroot += ";proxyport=" + cvsproxyport
+ cvsroot += ":" + ud.user
+ if ud.pswd:
+ cvsroot += ":" + ud.pswd
+ cvsroot += "@" + ud.host + ":" + cvs_port + ud.path
+
+ options = []
+ if 'norecurse' in ud.parm:
+ options.append("-l")
+ if ud.date:
+ # treat YYYYMMDDHHMM specially for CVS
+ if len(ud.date) == 12:
+ options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12]))
+ else:
+ options.append("-D \"%s UTC\"" % ud.date)
+ if ud.tag:
+ options.append("-r %s" % ud.tag)
+
+ cvsbasecmd = d.getVar("FETCHCMD_cvs")
+ cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
+ cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)
+
+ if cvs_rsh:
+ cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)
+ cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
+
+ # create module directory
+ logger.debug(2, "Fetch: checking for module directory")
+ pkg = d.getVar('PN')
+ pkgdir = os.path.join(d.getVar('CVSDIR'), pkg)
+ moddir = os.path.join(pkgdir, localdir)
+ workdir = None
+ if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
+ logger.info("Update " + ud.url)
+ bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url)
+ # update sources there
+ workdir = moddir
+ cmd = cvsupdatecmd
+ else:
+ logger.info("Fetch " + ud.url)
+ # check out sources there
+ bb.utils.mkdirhier(pkgdir)
+ workdir = pkgdir
+ logger.debug(1, "Running %s", cvscmd)
+ bb.fetch2.check_network_access(d, cvscmd, ud.url)
+ cmd = cvscmd
+
+ runfetchcmd(cmd, d, cleanup=[moddir], workdir=workdir)
+
+ if not os.access(moddir, os.R_OK):
+ raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url)
+
+ scmdata = ud.parm.get("scmdata", "")
+ if scmdata == "keep":
+ tar_flags = ""
+ else:
+ tar_flags = "--exclude='CVS'"
+
+ # tar them up to a defined filename
+ workdir = None
+ if 'fullpath' in ud.parm:
+ workdir = pkgdir
+ cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir)
+ else:
+ workdir = os.path.dirname(os.path.realpath(moddir))
+ cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir))
+
+ runfetchcmd(cmd, d, cleanup=[ud.localpath], workdir=workdir)
+
+ def clean(self, ud, d):
+ """ Clean CVS Files and tarballs """
+
+ pkg = d.getVar('PN')
+ pkgdir = os.path.join(d.getVar("CVSDIR"), pkg)
+
+ bb.utils.remove(pkgdir, True)
+ bb.utils.remove(ud.localpath)
+
diff --git a/poky/bitbake/lib/bb/fetch2/git.py b/poky/bitbake/lib/bb/fetch2/git.py
new file mode 100644
index 000000000..3de83bed1
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/git.py
@@ -0,0 +1,664 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' git implementation
+
+git fetcher support the SRC_URI with format of:
+SRC_URI = "git://some.host/somepath;OptionA=xxx;OptionB=xxx;..."
+
+Supported SRC_URI options are:
+
+- branch
+ The git branch to retrieve from. The default is "master"
+
+ This option also supports multiple branch fetching, with branches
+ separated by commas. In multiple branches case, the name option
+ must have the same number of names to match the branches, which is
+ used to specify the SRC_REV for the branch
+ e.g:
+ SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY"
+ SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx"
+ SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY"
+
+- tag
+ The git tag to retrieve. The default is "master"
+
+- protocol
+ The method to use to access the repository. Common options are "git",
+ "http", "https", "file", "ssh" and "rsync". The default is "git".
+
+- rebaseable
+ rebaseable indicates that the upstream git repo may rebase in the future,
+ and current revision may disappear from upstream repo. This option will
+ remind fetcher to preserve local cache carefully for future use.
+ The default value is "0", set rebaseable=1 for rebaseable git repo.
+
+- nocheckout
+ Don't checkout source code when unpacking. set this option for the recipe
+ who has its own routine to checkout code.
+ The default is "0", set nocheckout=1 if needed.
+
+- bareclone
+ Create a bare clone of the source code and don't checkout the source code
+ when unpacking. Set this option for the recipe who has its own routine to
+ checkout code and tracking branch requirements.
+ The default is "0", set bareclone=1 if needed.
+
+- nobranch
+ Don't check the SHA validation for branch. set this option for the recipe
+ referring to commit which is valid in tag instead of branch.
+ The default is "0", set nobranch=1 if needed.
+
+- usehead
+ For local git:// urls to use the current branch HEAD as the revision for use with
+ AUTOREV. Implies nobranch.
+
+"""
+
+#Copyright (C) 2005 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import collections
+import errno
+import fnmatch
+import os
+import re
+import subprocess
+import tempfile
+import bb
+import bb.progress
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+
+class GitProgressHandler(bb.progress.LineFilterProgressHandler):
+ """Extract progress information from git output"""
+ def __init__(self, d):
+ self._buffer = ''
+ self._count = 0
+ super(GitProgressHandler, self).__init__(d)
+ # Send an initial progress event so the bar gets shown
+ self._fire_progress(-1)
+
+ def write(self, string):
+ self._buffer += string
+ stages = ['Counting objects', 'Compressing objects', 'Receiving objects', 'Resolving deltas']
+ stage_weights = [0.2, 0.05, 0.5, 0.25]
+ stagenum = 0
+ for i, stage in reversed(list(enumerate(stages))):
+ if stage in self._buffer:
+ stagenum = i
+ self._buffer = ''
+ break
+ self._status = stages[stagenum]
+ percs = re.findall(r'(\d+)%', string)
+ if percs:
+ progress = int(round((int(percs[-1]) * stage_weights[stagenum]) + (sum(stage_weights[:stagenum]) * 100)))
+ rates = re.findall(r'([\d.]+ [a-zA-Z]*/s+)', string)
+ if rates:
+ rate = rates[-1]
+ else:
+ rate = None
+ self.update(progress, rate)
+ else:
+ if stagenum == 0:
+ percs = re.findall(r': (\d+)', string)
+ if percs:
+ count = int(percs[-1])
+ if count > self._count:
+ self._count = count
+ self._fire_progress(-count)
+ super(GitProgressHandler, self).write(string)
+
+
+class Git(FetchMethod):
+ bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..'))
+ make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
+
+ """Class to fetch a module or modules from git repositories"""
+ def init(self, d):
+ pass
+
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with git.
+ """
+ return ud.type in ['git']
+
+ def supports_checksum(self, urldata):
+ return False
+
+ def urldata_init(self, ud, d):
+ """
+ init git specific variable within url data
+ so that the git method like latest_revision() can work
+ """
+ if 'protocol' in ud.parm:
+ ud.proto = ud.parm['protocol']
+ elif not ud.host:
+ ud.proto = 'file'
+ else:
+ ud.proto = "git"
+
+ if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'):
+ raise bb.fetch2.ParameterError("Invalid protocol type", ud.url)
+
+ ud.nocheckout = ud.parm.get("nocheckout","0") == "1"
+
+ ud.rebaseable = ud.parm.get("rebaseable","0") == "1"
+
+ ud.nobranch = ud.parm.get("nobranch","0") == "1"
+
+ # usehead implies nobranch
+ ud.usehead = ud.parm.get("usehead","0") == "1"
+ if ud.usehead:
+ if ud.proto != "file":
+ raise bb.fetch2.ParameterError("The usehead option is only for use with local ('protocol=file') git repositories", ud.url)
+ ud.nobranch = 1
+
+ # bareclone implies nocheckout
+ ud.bareclone = ud.parm.get("bareclone","0") == "1"
+ if ud.bareclone:
+ ud.nocheckout = 1
+
+ ud.unresolvedrev = {}
+ branches = ud.parm.get("branch", "master").split(',')
+ if len(branches) != len(ud.names):
+ raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
+
+ ud.cloneflags = "-s -n"
+ if ud.bareclone:
+ ud.cloneflags += " --mirror"
+
+ ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
+ ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
+
+ depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH")
+ if depth_default is not None:
+ try:
+ depth_default = int(depth_default or 0)
+ except ValueError:
+ raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
+ else:
+ if depth_default < 0:
+ raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
+ else:
+ depth_default = 1
+ ud.shallow_depths = collections.defaultdict(lambda: depth_default)
+
+ revs_default = d.getVar("BB_GIT_SHALLOW_REVS", True)
+ ud.shallow_revs = []
+ ud.branches = {}
+ for pos, name in enumerate(ud.names):
+ branch = branches[pos]
+ ud.branches[name] = branch
+ ud.unresolvedrev[name] = branch
+
+ shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
+ if shallow_depth is not None:
+ try:
+ shallow_depth = int(shallow_depth or 0)
+ except ValueError:
+ raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
+ else:
+ if shallow_depth < 0:
+ raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
+ ud.shallow_depths[name] = shallow_depth
+
+ revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
+ if revs is not None:
+ ud.shallow_revs.extend(revs.split())
+ elif revs_default is not None:
+ ud.shallow_revs.extend(revs_default.split())
+
+ if (ud.shallow and
+ not ud.shallow_revs and
+ all(ud.shallow_depths[n] == 0 for n in ud.names)):
+ # Shallow disabled for this URL
+ ud.shallow = False
+
+ if ud.usehead:
+ ud.unresolvedrev['default'] = 'HEAD'
+
+ ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
+
+ write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
+ ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
+ ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0"
+
+ ud.setup_revisions(d)
+
+ for name in ud.names:
+ # Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
+ if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]):
+ if ud.revisions[name]:
+ ud.unresolvedrev[name] = ud.revisions[name]
+ ud.revisions[name] = self.latest_revision(ud, d, name)
+
+ gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.'))
+ if gitsrcname.startswith('.'):
+ gitsrcname = gitsrcname[1:]
+
+ # for rebaseable git repo, it is necessary to keep mirror tar ball
+ # per revision, so that even the revision disappears from the
+ # upstream repo in the future, the mirror will remain intact and still
+ # contains the revision
+ if ud.rebaseable:
+ for name in ud.names:
+ gitsrcname = gitsrcname + '_' + ud.revisions[name]
+
+ dl_dir = d.getVar("DL_DIR")
+ gitdir = d.getVar("GITDIR") or (dl_dir + "/git2/")
+ ud.clonedir = os.path.join(gitdir, gitsrcname)
+ ud.localfile = ud.clonedir
+
+ mirrortarball = 'git2_%s.tar.gz' % gitsrcname
+ ud.fullmirror = os.path.join(dl_dir, mirrortarball)
+ ud.mirrortarballs = [mirrortarball]
+ if ud.shallow:
+ tarballname = gitsrcname
+ if ud.bareclone:
+ tarballname = "%s_bare" % tarballname
+
+ if ud.shallow_revs:
+ tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
+
+ for name, revision in sorted(ud.revisions.items()):
+ tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
+ depth = ud.shallow_depths[name]
+ if depth:
+ tarballname = "%s-%s" % (tarballname, depth)
+
+ shallow_refs = []
+ if not ud.nobranch:
+ shallow_refs.extend(ud.branches.values())
+ if ud.shallow_extra_refs:
+ shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
+ if shallow_refs:
+ tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.'))
+
+ fetcher = self.__class__.__name__.lower()
+ ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname)
+ ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball)
+ ud.mirrortarballs.insert(0, ud.shallowtarball)
+
+ def localpath(self, ud, d):
+ return ud.clonedir
+
+ def need_update(self, ud, d):
+ if not os.path.exists(ud.clonedir):
+ return True
+ for name in ud.names:
+ if not self._contains_ref(ud, d, name, ud.clonedir):
+ return True
+ if ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow):
+ return True
+ if ud.write_tarballs and not os.path.exists(ud.fullmirror):
+ return True
+ return False
+
+ def try_premirror(self, ud, d):
+ # If we don't do this, updating an existing checkout with only premirrors
+ # is not possible
+ if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
+ return True
+ if os.path.exists(ud.clonedir):
+ return False
+ return True
+
+ def download(self, ud, d):
+ """Fetch url"""
+
+ no_clone = not os.path.exists(ud.clonedir)
+ need_update = no_clone or self.need_update(ud, d)
+
+ # A current clone is preferred to either tarball, a shallow tarball is
+ # preferred to an out of date clone, and a missing clone will use
+ # either tarball.
+ if ud.shallow and os.path.exists(ud.fullshallow) and need_update:
+ ud.localpath = ud.fullshallow
+ return
+ elif os.path.exists(ud.fullmirror) and no_clone:
+ bb.utils.mkdirhier(ud.clonedir)
+ runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
+
+ repourl = self._get_repo_url(ud)
+
+ # If the repo still doesn't exist, fallback to cloning it
+ if not os.path.exists(ud.clonedir):
+ # We do this since git will use a "-l" option automatically for local urls where possible
+ if repourl.startswith("file://"):
+ repourl = repourl[7:]
+ clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, repourl, ud.clonedir)
+ if ud.proto.lower() != 'file':
+ bb.fetch2.check_network_access(d, clone_cmd, ud.url)
+ progresshandler = GitProgressHandler(d)
+ runfetchcmd(clone_cmd, d, log=progresshandler)
+
+ # Update the checkout if needed
+ needupdate = False
+ for name in ud.names:
+ if not self._contains_ref(ud, d, name, ud.clonedir):
+ needupdate = True
+ if needupdate:
+ try:
+ runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
+ except bb.fetch2.FetchError:
+ logger.debug(1, "No Origin")
+
+ runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d, workdir=ud.clonedir)
+ fetch_cmd = "LANG=C %s fetch -f --prune --progress %s refs/*:refs/*" % (ud.basecmd, repourl)
+ if ud.proto.lower() != 'file':
+ bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
+ progresshandler = GitProgressHandler(d)
+ runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir)
+ runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir)
+ runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir)
+ runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir)
+ try:
+ os.unlink(ud.fullmirror)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ for name in ud.names:
+ if not self._contains_ref(ud, d, name, ud.clonedir):
+ raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
+
+ def build_mirror_data(self, ud, d):
+ if ud.shallow and ud.write_shallow_tarballs:
+ if not os.path.exists(ud.fullshallow):
+ if os.path.islink(ud.fullshallow):
+ os.unlink(ud.fullshallow)
+ tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
+ shallowclone = os.path.join(tempdir, 'git')
+ try:
+ self.clone_shallow_local(ud, shallowclone, d)
+
+ logger.info("Creating tarball of git repository")
+ runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
+ runfetchcmd("touch %s.done" % ud.fullshallow, d)
+ finally:
+ bb.utils.remove(tempdir, recurse=True)
+ elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
+ if os.path.islink(ud.fullmirror):
+ os.unlink(ud.fullmirror)
+
+ logger.info("Creating tarball of git repository")
+ runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
+ runfetchcmd("touch %s.done" % ud.fullmirror, d)
+
+ def clone_shallow_local(self, ud, dest, d):
+ """Clone the repo and make it shallow.
+
+ The upstream url of the new clone isn't set at this time, as it'll be
+ set correctly when unpacked."""
+ runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
+
+ to_parse, shallow_branches = [], []
+ for name in ud.names:
+ revision = ud.revisions[name]
+ depth = ud.shallow_depths[name]
+ if depth:
+ to_parse.append('%s~%d^{}' % (revision, depth - 1))
+
+ # For nobranch, we need a ref, otherwise the commits will be
+ # removed, and for non-nobranch, we truncate the branch to our
+ # srcrev, to avoid keeping unnecessary history beyond that.
+ branch = ud.branches[name]
+ if ud.nobranch:
+ ref = "refs/shallow/%s" % name
+ elif ud.bareclone:
+ ref = "refs/heads/%s" % branch
+ else:
+ ref = "refs/remotes/origin/%s" % branch
+
+ shallow_branches.append(ref)
+ runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
+
+ # Map srcrev+depths to revisions
+ parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
+
+ # Resolve specified revisions
+ parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
+ shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
+
+ # Apply extra ref wildcards
+ all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
+ d, workdir=dest).splitlines()
+ for r in ud.shallow_extra_refs:
+ if not ud.bareclone:
+ r = r.replace('refs/heads/', 'refs/remotes/origin/')
+
+ if '*' in r:
+ matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
+ shallow_branches.extend(matches)
+ else:
+ shallow_branches.append(r)
+
+ # Make the repository shallow
+ shallow_cmd = [self.make_shallow_path, '-s']
+ for b in shallow_branches:
+ shallow_cmd.append('-r')
+ shallow_cmd.append(b)
+ shallow_cmd.extend(shallow_revisions)
+ runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
+
+ def unpack(self, ud, destdir, d):
+ """ unpack the downloaded src to destdir"""
+
+ subdir = ud.parm.get("subpath", "")
+ if subdir != "":
+ readpathspec = ":%s" % subdir
+ def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
+ else:
+ readpathspec = ""
+ def_destsuffix = "git/"
+
+ destsuffix = ud.parm.get("destsuffix", def_destsuffix)
+ destdir = ud.destdir = os.path.join(destdir, destsuffix)
+ if os.path.exists(destdir):
+ bb.utils.prunedir(destdir)
+
+ if ud.shallow and (not os.path.exists(ud.clonedir) or self.need_update(ud, d)):
+ bb.utils.mkdirhier(destdir)
+ runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
+ else:
+ runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
+
+ repourl = self._get_repo_url(ud)
+ runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
+ if not ud.nocheckout:
+ if subdir != "":
+ runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d,
+ workdir=destdir)
+ runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir)
+ elif not ud.nobranch:
+ branchname = ud.branches[ud.names[0]]
+ runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
+ ud.revisions[ud.names[0]]), d, workdir=destdir)
+ runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
+ branchname), d, workdir=destdir)
+ else:
+ runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
+
+ return True
+
+ def clean(self, ud, d):
+ """ clean the git directory """
+
+ bb.utils.remove(ud.localpath, True)
+ bb.utils.remove(ud.fullmirror)
+ bb.utils.remove(ud.fullmirror + ".done")
+
+ def supports_srcrev(self):
+ return True
+
+ def _contains_ref(self, ud, d, name, wd):
+ cmd = ""
+ if ud.nobranch:
+ cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (
+ ud.basecmd, ud.revisions[name])
+ else:
+ cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % (
+ ud.basecmd, ud.revisions[name], ud.branches[name])
+ try:
+ output = runfetchcmd(cmd, d, quiet=True, workdir=wd)
+ except bb.fetch2.FetchError:
+ return False
+ if len(output.split()) > 1:
+ raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
+ return output.split()[0] != "0"
+
+ def _get_repo_url(self, ud):
+ """
+ Return the repository URL
+ """
+ if ud.user:
+ username = ud.user + '@'
+ else:
+ username = ""
+ return "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path)
+
+ def _revision_key(self, ud, d, name):
+ """
+ Return a unique key for the url
+ """
+ return "git:" + ud.host + ud.path.replace('/', '.') + ud.unresolvedrev[name]
+
+ def _lsremote(self, ud, d, search):
+ """
+ Run git ls-remote with the specified search string
+ """
+ # Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR,
+ # and WORKDIR is in PATH (as a result of RSS), our call to
+ # runfetchcmd() exports PATH so this function will get called again (!)
+ # In this scenario the return call of the function isn't actually
+ # important - WORKDIR isn't needed in PATH to call git ls-remote
+ # anyway.
+ if d.getVar('_BB_GIT_IN_LSREMOTE', False):
+ return ''
+ d.setVar('_BB_GIT_IN_LSREMOTE', '1')
+ try:
+ repourl = self._get_repo_url(ud)
+ cmd = "%s ls-remote %s %s" % \
+ (ud.basecmd, repourl, search)
+ if ud.proto.lower() != 'file':
+ bb.fetch2.check_network_access(d, cmd, repourl)
+ output = runfetchcmd(cmd, d, True)
+ if not output:
+ raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
+ finally:
+ d.delVar('_BB_GIT_IN_LSREMOTE')
+ return output
+
+ def _latest_revision(self, ud, d, name):
+ """
+ Compute the HEAD revision for the url
+ """
+ output = self._lsremote(ud, d, "")
+ # Tags of the form ^{} may not work, need to fallback to other form
+ if ud.unresolvedrev[name][:5] == "refs/" or ud.usehead:
+ head = ud.unresolvedrev[name]
+ tag = ud.unresolvedrev[name]
+ else:
+ head = "refs/heads/%s" % ud.unresolvedrev[name]
+ tag = "refs/tags/%s" % ud.unresolvedrev[name]
+ for s in [head, tag + "^{}", tag]:
+ for l in output.strip().split('\n'):
+ sha1, ref = l.split()
+ if s == ref:
+ return sha1
+ raise bb.fetch2.FetchError("Unable to resolve '%s' in upstream git repository in git ls-remote output for %s" % \
+ (ud.unresolvedrev[name], ud.host+ud.path))
+
+ def latest_versionstring(self, ud, d):
+ """
+ Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
+ by searching through the tags output of ls-remote, comparing
+ versions and returning the highest match.
+ """
+ pupver = ('', '')
+
+ tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or "(?P<pver>([0-9][\.|_]?)+)")
+ try:
+ output = self._lsremote(ud, d, "refs/tags/*")
+ except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
+ bb.note("Could not list remote: %s" % str(e))
+ return pupver
+
+ verstring = ""
+ revision = ""
+ for line in output.split("\n"):
+ if not line:
+ break
+
+ tag_head = line.split("/")[-1]
+ # Ignore non-released branches
+ m = re.search("(alpha|beta|rc|final)+", tag_head)
+ if m:
+ continue
+
+ # search for version in the line
+ tag = tagregex.search(tag_head)
+ if tag == None:
+ continue
+
+ tag = tag.group('pver')
+ tag = tag.replace("_", ".")
+
+ if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
+ continue
+
+ verstring = tag
+ revision = line.split()[0]
+ pupver = (verstring, revision)
+
+ return pupver
+
+ def _build_revision(self, ud, d, name):
+ return ud.revisions[name]
+
+ def gitpkgv_revision(self, ud, d, name):
+ """
+ Return a sortable revision number by counting commits in the history
+ Based on gitpkgv.bblass in meta-openembedded
+ """
+ rev = self._build_revision(ud, d, name)
+ localpath = ud.localpath
+ rev_file = os.path.join(localpath, "oe-gitpkgv_" + rev)
+ if not os.path.exists(localpath):
+ commits = None
+ else:
+ if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
+ from pipes import quote
+ commits = bb.fetch2.runfetchcmd(
+ "git rev-list %s -- | wc -l" % quote(rev),
+ d, quiet=True).strip().lstrip('0')
+ if commits:
+ open(rev_file, "w").write("%d\n" % int(commits))
+ else:
+ commits = open(rev_file, "r").readline(128).strip()
+ if commits:
+ return False, "%s+%s" % (commits, rev[:7])
+ else:
+ return True, str(rev)
+
+ def checkstatus(self, fetch, ud, d):
+ try:
+ self._lsremote(ud, d, "")
+ return True
+ except bb.fetch2.FetchError:
+ return False
diff --git a/poky/bitbake/lib/bb/fetch2/gitannex.py b/poky/bitbake/lib/bb/fetch2/gitannex.py
new file mode 100644
index 000000000..a9b69caab
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/gitannex.py
@@ -0,0 +1,91 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' git annex implementation
+"""
+
+# Copyright (C) 2014 Otavio Salvador
+# Copyright (C) 2014 O.S. Systems Software LTDA.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import bb
+from bb.fetch2.git import Git
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+class GitANNEX(Git):
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with git.
+ """
+ return ud.type in ['gitannex']
+
+ def urldata_init(self, ud, d):
+ super(GitANNEX, self).urldata_init(ud, d)
+ if ud.shallow:
+ ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*']
+
+ def uses_annex(self, ud, d, wd):
+ for name in ud.names:
+ try:
+ runfetchcmd("%s rev-list git-annex" % (ud.basecmd), d, quiet=True, workdir=wd)
+ return True
+ except bb.fetch.FetchError:
+ pass
+
+ return False
+
+ def update_annex(self, ud, d, wd):
+ try:
+ runfetchcmd("%s annex get --all" % (ud.basecmd), d, quiet=True, workdir=wd)
+ except bb.fetch.FetchError:
+ return False
+ runfetchcmd("chmod u+w -R %s/annex" % (ud.clonedir), d, quiet=True, workdir=wd)
+
+ return True
+
+ def download(self, ud, d):
+ Git.download(self, ud, d)
+
+ if not ud.shallow or ud.localpath != ud.fullshallow:
+ if self.uses_annex(ud, d, ud.clonedir):
+ self.update_annex(ud, d, ud.clonedir)
+
+ def clone_shallow_local(self, ud, dest, d):
+ super(GitANNEX, self).clone_shallow_local(ud, dest, d)
+
+ try:
+ runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest)
+ except bb.fetch.FetchError:
+ pass
+
+ if self.uses_annex(ud, d, dest):
+ runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest)
+ runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest)
+
+ def unpack(self, ud, destdir, d):
+ Git.unpack(self, ud, destdir, d)
+
+ try:
+ runfetchcmd("%s annex init" % (ud.basecmd), d, workdir=ud.destdir)
+ except bb.fetch.FetchError:
+ pass
+
+ annex = self.uses_annex(ud, d, ud.destdir)
+ if annex:
+ runfetchcmd("%s annex get" % (ud.basecmd), d, workdir=ud.destdir)
+ runfetchcmd("chmod u+w -R %s/.git/annex" % (ud.destdir), d, quiet=True, workdir=ud.destdir)
+
diff --git a/poky/bitbake/lib/bb/fetch2/gitsm.py b/poky/bitbake/lib/bb/fetch2/gitsm.py
new file mode 100644
index 000000000..0aff1008e
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/gitsm.py
@@ -0,0 +1,135 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' git submodules implementation
+
+Inherits from and extends the Git fetcher to retrieve submodules of a git repository
+after cloning.
+
+SRC_URI = "gitsm://<see Git fetcher for syntax>"
+
+See the Git fetcher, git://, for usage documentation.
+
+NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your recipe.
+
+"""
+
+# Copyright (C) 2013 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import bb
+from bb.fetch2.git import Git
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+class GitSM(Git):
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with git.
+ """
+ return ud.type in ['gitsm']
+
+ def uses_submodules(self, ud, d, wd):
+ for name in ud.names:
+ try:
+ runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=wd)
+ return True
+ except bb.fetch.FetchError:
+ pass
+ return False
+
+ def _set_relative_paths(self, repopath):
+ """
+ Fix submodule paths to be relative instead of absolute,
+ so that when we move the repo it doesn't break
+ (In Git 1.7.10+ this is done automatically)
+ """
+ submodules = []
+ with open(os.path.join(repopath, '.gitmodules'), 'r') as f:
+ for line in f.readlines():
+ if line.startswith('[submodule'):
+ submodules.append(line.split('"')[1])
+
+ for module in submodules:
+ repo_conf = os.path.join(repopath, module, '.git')
+ if os.path.exists(repo_conf):
+ with open(repo_conf, 'r') as f:
+ lines = f.readlines()
+ newpath = ''
+ for i, line in enumerate(lines):
+ if line.startswith('gitdir:'):
+ oldpath = line.split(': ')[-1].rstrip()
+ if oldpath.startswith('/'):
+ newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module
+ lines[i] = 'gitdir: %s\n' % newpath
+ break
+ if newpath:
+ with open(repo_conf, 'w') as f:
+ for line in lines:
+ f.write(line)
+
+ repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config')
+ if os.path.exists(repo_conf2):
+ with open(repo_conf2, 'r') as f:
+ lines = f.readlines()
+ newpath = ''
+ for i, line in enumerate(lines):
+ if line.lstrip().startswith('worktree = '):
+ oldpath = line.split(' = ')[-1].rstrip()
+ if oldpath.startswith('/'):
+ newpath = '../' * (module.count('/') + 3) + module
+ lines[i] = '\tworktree = %s\n' % newpath
+ break
+ if newpath:
+ with open(repo_conf2, 'w') as f:
+ for line in lines:
+ f.write(line)
+
+ def update_submodules(self, ud, d):
+ # We have to convert bare -> full repo, do the submodule bit, then convert back
+ tmpclonedir = ud.clonedir + ".tmp"
+ gitdir = tmpclonedir + os.sep + ".git"
+ bb.utils.remove(tmpclonedir, True)
+ os.mkdir(tmpclonedir)
+ os.rename(ud.clonedir, gitdir)
+ runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
+ runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir)
+ runfetchcmd(ud.basecmd + " checkout -f " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
+ runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir)
+ self._set_relative_paths(tmpclonedir)
+ runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir)
+ os.rename(gitdir, ud.clonedir,)
+ bb.utils.remove(tmpclonedir, True)
+
+ def download(self, ud, d):
+ Git.download(self, ud, d)
+
+ if not ud.shallow or ud.localpath != ud.fullshallow:
+ submodules = self.uses_submodules(ud, d, ud.clonedir)
+ if submodules:
+ self.update_submodules(ud, d)
+
+ def clone_shallow_local(self, ud, dest, d):
+ super(GitSM, self).clone_shallow_local(ud, dest, d)
+
+ runfetchcmd('cp -fpPRH "%s/modules" "%s/"' % (ud.clonedir, os.path.join(dest, '.git')), d)
+
+ def unpack(self, ud, destdir, d):
+ Git.unpack(self, ud, destdir, d)
+
+ if self.uses_submodules(ud, d, ud.destdir):
+ runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir)
+ runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir)
diff --git a/poky/bitbake/lib/bb/fetch2/hg.py b/poky/bitbake/lib/bb/fetch2/hg.py
new file mode 100644
index 000000000..d0857e63f
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/hg.py
@@ -0,0 +1,270 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for mercurial DRCS (hg).
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2004 Marcin Juszkiewicz
+# Copyright (C) 2007 Robert Schuster
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import sys
+import logging
+import bb
+import errno
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import MissingParameterError
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+class Hg(FetchMethod):
+ """Class to fetch from mercurial repositories"""
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with mercurial.
+ """
+ return ud.type in ['hg']
+
+ def supports_checksum(self, urldata):
+ """
+ Don't require checksums for local archives created from
+ repository checkouts.
+ """
+ return False
+
+ def urldata_init(self, ud, d):
+ """
+ init hg specific variable within url data
+ """
+ if not "module" in ud.parm:
+ raise MissingParameterError('module', ud.url)
+
+ ud.module = ud.parm["module"]
+
+ if 'protocol' in ud.parm:
+ ud.proto = ud.parm['protocol']
+ elif not ud.host:
+ ud.proto = 'file'
+ else:
+ ud.proto = "hg"
+
+ ud.setup_revisions(d)
+
+ if 'rev' in ud.parm:
+ ud.revision = ud.parm['rev']
+ elif not ud.revision:
+ ud.revision = self.latest_revision(ud, d)
+
+ # Create paths to mercurial checkouts
+ hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \
+ ud.host, ud.path.replace('/', '.'))
+ mirrortarball = 'hg_%s.tar.gz' % hgsrcname
+ ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
+ ud.mirrortarballs = [mirrortarball]
+
+ hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg/")
+ ud.pkgdir = os.path.join(hgdir, hgsrcname)
+ ud.moddir = os.path.join(ud.pkgdir, ud.module)
+ ud.localfile = ud.moddir
+ ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg"
+
+ ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS")
+
+ def need_update(self, ud, d):
+ revTag = ud.parm.get('rev', 'tip')
+ if revTag == "tip":
+ return True
+ if not os.path.exists(ud.localpath):
+ return True
+ return False
+
+ def try_premirror(self, ud, d):
+ # If we don't do this, updating an existing checkout with only premirrors
+ # is not possible
+ if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
+ return True
+ if os.path.exists(ud.moddir):
+ return False
+ return True
+
+ def _buildhgcommand(self, ud, d, command):
+ """
+ Build up an hg commandline based on ud
+ command is "fetch", "update", "info"
+ """
+
+ proto = ud.parm.get('protocol', 'http')
+
+ host = ud.host
+ if proto == "file":
+ host = "/"
+ ud.host = "localhost"
+
+ if not ud.user:
+ hgroot = host + ud.path
+ else:
+ if ud.pswd:
+ hgroot = ud.user + ":" + ud.pswd + "@" + host + ud.path
+ else:
+ hgroot = ud.user + "@" + host + ud.path
+
+ if command == "info":
+ return "%s identify -i %s://%s/%s" % (ud.basecmd, proto, hgroot, ud.module)
+
+ options = [];
+
+ # Don't specify revision for the fetch; clone the entire repo.
+ # This avoids an issue if the specified revision is a tag, because
+ # the tag actually exists in the specified revision + 1, so it won't
+ # be available when used in any successive commands.
+ if ud.revision and command != "fetch":
+ options.append("-r %s" % ud.revision)
+
+ if command == "fetch":
+ if ud.user and ud.pswd:
+ cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" clone %s %s://%s/%s %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options), proto, hgroot, ud.module, ud.module)
+ else:
+ cmd = "%s clone %s %s://%s/%s %s" % (ud.basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
+ elif command == "pull":
+ # do not pass options list; limiting pull to rev causes the local
+ # repo not to contain it and immediately following "update" command
+ # will crash
+ if ud.user and ud.pswd:
+ cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (ud.basecmd, ud.user, ud.pswd, proto)
+ else:
+ cmd = "%s pull" % (ud.basecmd)
+ elif command == "update":
+ if ud.user and ud.pswd:
+ cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" update -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options))
+ else:
+ cmd = "%s update -C %s" % (ud.basecmd, " ".join(options))
+ else:
+ raise FetchError("Invalid hg command %s" % command, ud.url)
+
+ return cmd
+
+ def download(self, ud, d):
+ """Fetch url"""
+
+ logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
+
+ # If the checkout doesn't exist and the mirror tarball does, extract it
+ if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror):
+ bb.utils.mkdirhier(ud.pkgdir)
+ runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.pkgdir)
+
+ if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
+ # Found the source, check whether need pull
+ updatecmd = self._buildhgcommand(ud, d, "update")
+ logger.debug(1, "Running %s", updatecmd)
+ try:
+ runfetchcmd(updatecmd, d, workdir=ud.moddir)
+ except bb.fetch2.FetchError:
+ # Runnning pull in the repo
+ pullcmd = self._buildhgcommand(ud, d, "pull")
+ logger.info("Pulling " + ud.url)
+ # update sources there
+ logger.debug(1, "Running %s", pullcmd)
+ bb.fetch2.check_network_access(d, pullcmd, ud.url)
+ runfetchcmd(pullcmd, d, workdir=ud.moddir)
+ try:
+ os.unlink(ud.fullmirror)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ # No source found, clone it.
+ if not os.path.exists(ud.moddir):
+ fetchcmd = self._buildhgcommand(ud, d, "fetch")
+ logger.info("Fetch " + ud.url)
+ # check out sources there
+ bb.utils.mkdirhier(ud.pkgdir)
+ logger.debug(1, "Running %s", fetchcmd)
+ bb.fetch2.check_network_access(d, fetchcmd, ud.url)
+ runfetchcmd(fetchcmd, d, workdir=ud.pkgdir)
+
+ # Even when we clone (fetch), we still need to update as hg's clone
+ # won't checkout the specified revision if its on a branch
+ updatecmd = self._buildhgcommand(ud, d, "update")
+ logger.debug(1, "Running %s", updatecmd)
+ runfetchcmd(updatecmd, d, workdir=ud.moddir)
+
+ def clean(self, ud, d):
+ """ Clean the hg dir """
+
+ bb.utils.remove(ud.localpath, True)
+ bb.utils.remove(ud.fullmirror)
+ bb.utils.remove(ud.fullmirror + ".done")
+
+ def supports_srcrev(self):
+ return True
+
+ def _latest_revision(self, ud, d, name):
+ """
+ Compute tip revision for the url
+ """
+ bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url)
+ output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
+ return output.strip()
+
+ def _build_revision(self, ud, d, name):
+ return ud.revision
+
+ def _revision_key(self, ud, d, name):
+ """
+ Return a unique key for the url
+ """
+ return "hg:" + ud.moddir
+
+ def build_mirror_data(self, ud, d):
+ # Generate a mirror tarball if needed
+ if ud.write_tarballs == "1" and not os.path.exists(ud.fullmirror):
+ # it's possible that this symlink points to read-only filesystem with PREMIRROR
+ if os.path.islink(ud.fullmirror):
+ os.unlink(ud.fullmirror)
+
+ logger.info("Creating tarball of hg repository")
+ runfetchcmd("tar -czf %s %s" % (ud.fullmirror, ud.module), d, workdir=ud.pkgdir)
+ runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.pkgdir)
+
+ def localpath(self, ud, d):
+ return ud.pkgdir
+
+ def unpack(self, ud, destdir, d):
+ """
+ Make a local clone or export for the url
+ """
+
+ revflag = "-r %s" % ud.revision
+ subdir = ud.parm.get("destsuffix", ud.module)
+ codir = "%s/%s" % (destdir, subdir)
+
+ scmdata = ud.parm.get("scmdata", "")
+ if scmdata != "nokeep":
+ if not os.access(os.path.join(codir, '.hg'), os.R_OK):
+ logger.debug(2, "Unpack: creating new hg repository in '" + codir + "'")
+ runfetchcmd("%s init %s" % (ud.basecmd, codir), d)
+ logger.debug(2, "Unpack: updating source in '" + codir + "'")
+ runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir), d, workdir=codir)
+ runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir)
+ else:
+ logger.debug(2, "Unpack: extracting source to '" + codir + "'")
+ runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir)
diff --git a/poky/bitbake/lib/bb/fetch2/local.py b/poky/bitbake/lib/bb/fetch2/local.py
new file mode 100644
index 000000000..a114ac12e
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/local.py
@@ -0,0 +1,119 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import urllib.request, urllib.parse, urllib.error
+import bb
+import bb.utils
+from bb.fetch2 import FetchMethod, FetchError
+from bb.fetch2 import logger
+
+class Local(FetchMethod):
+ def supports(self, urldata, d):
+ """
+ Check to see if a given url represents a local fetch.
+ """
+ return urldata.type in ['file']
+
+ def urldata_init(self, ud, d):
+ # We don't set localfile as for this fetcher the file is already local!
+ ud.decodedurl = urllib.parse.unquote(ud.url.split("://")[1].split(";")[0])
+ ud.basename = os.path.basename(ud.decodedurl)
+ ud.basepath = ud.decodedurl
+ ud.needdonestamp = False
+ return
+
+ def localpath(self, urldata, d):
+ """
+ Return the local filename of a given url assuming a successful fetch.
+ """
+ return self.localpaths(urldata, d)[-1]
+
+ def localpaths(self, urldata, d):
+ """
+ Return the local filename of a given url assuming a successful fetch.
+ """
+ searched = []
+ path = urldata.decodedurl
+ newpath = path
+ if path[0] == "/":
+ return [path]
+ filespath = d.getVar('FILESPATH')
+ if filespath:
+ logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
+ newpath, hist = bb.utils.which(filespath, path, history=True)
+ searched.extend(hist)
+ if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1:
+ # For expressions using '*', best we can do is take the first directory in FILESPATH that exists
+ newpath, hist = bb.utils.which(filespath, ".", history=True)
+ searched.extend(hist)
+ logger.debug(2, "Searching for %s in path: %s" % (path, newpath))
+ return searched
+ if not os.path.exists(newpath):
+ dldirfile = os.path.join(d.getVar("DL_DIR"), path)
+ logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
+ bb.utils.mkdirhier(os.path.dirname(dldirfile))
+ searched.append(dldirfile)
+ return searched
+ return searched
+
+ def need_update(self, ud, d):
+ if ud.url.find("*") != -1:
+ return False
+ if os.path.exists(ud.localpath):
+ return False
+ return True
+
+ def download(self, urldata, d):
+ """Fetch urls (no-op for Local method)"""
+ # no need to fetch local files, we'll deal with them in place.
+ if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath):
+ locations = []
+ filespath = d.getVar('FILESPATH')
+ if filespath:
+ locations = filespath.split(":")
+ locations.append(d.getVar("DL_DIR"))
+
+ msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
+ raise FetchError(msg)
+
+ return True
+
+ def checkstatus(self, fetch, urldata, d):
+ """
+ Check the status of the url
+ """
+ if urldata.localpath.find("*") != -1:
+ logger.info("URL %s looks like a glob and was therefore not checked.", urldata.url)
+ return True
+ if os.path.exists(urldata.localpath):
+ return True
+ return False
+
+ def clean(self, urldata, d):
+ return
+
diff --git a/poky/bitbake/lib/bb/fetch2/npm.py b/poky/bitbake/lib/bb/fetch2/npm.py
new file mode 100644
index 000000000..730c346a9
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/npm.py
@@ -0,0 +1,309 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' NPM implementation
+
+The NPM fetcher is used to retrieve files from the npmjs repository
+
+Usage in the recipe:
+
+ SRC_URI = "npm://registry.npmjs.org/;name=${PN};version=${PV}"
+ Suported SRC_URI options are:
+
+ - name
+ - version
+
+ npm://registry.npmjs.org/${PN}/-/${PN}-${PV}.tgz would become npm://registry.npmjs.org;name=${PN};version=${PV}
+ The fetcher all triggers off the existence of ud.localpath. If that exists and has the ".done" stamp, its assumed the fetch is good/done
+
+"""
+
+import os
+import sys
+import urllib.request, urllib.parse, urllib.error
+import json
+import subprocess
+import signal
+import bb
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import ChecksumError
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+from bb.fetch2 import UnpackError
+from bb.fetch2 import ParameterError
+from distutils import spawn
+
+def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ # SIGPIPE errors are known issues with gzip/bash
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+class Npm(FetchMethod):
+
+ """Class to fetch urls via 'npm'"""
+ def init(self, d):
+ pass
+
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with npm
+ """
+ return ud.type in ['npm']
+
+ def debug(self, msg):
+ logger.debug(1, "NpmFetch: %s", msg)
+
+ def clean(self, ud, d):
+ logger.debug(2, "Calling cleanup %s" % ud.pkgname)
+ bb.utils.remove(ud.localpath, False)
+ bb.utils.remove(ud.pkgdatadir, True)
+ bb.utils.remove(ud.fullmirror, False)
+
+ def urldata_init(self, ud, d):
+ """
+ init NPM specific variable within url data
+ """
+ if 'downloadfilename' in ud.parm:
+ ud.basename = ud.parm['downloadfilename']
+ else:
+ ud.basename = os.path.basename(ud.path)
+
+ # can't call it ud.name otherwise fetcher base class will start doing sha1stuff
+ # TODO: find a way to get an sha1/sha256 manifest of pkg & all deps
+ ud.pkgname = ud.parm.get("name", None)
+ if not ud.pkgname:
+ raise ParameterError("NPM fetcher requires a name parameter", ud.url)
+ ud.version = ud.parm.get("version", None)
+ if not ud.version:
+ raise ParameterError("NPM fetcher requires a version parameter", ud.url)
+ ud.bbnpmmanifest = "%s-%s.deps.json" % (ud.pkgname, ud.version)
+ ud.bbnpmmanifest = ud.bbnpmmanifest.replace('/', '-')
+ ud.registry = "http://%s" % (ud.url.replace('npm://', '', 1).split(';'))[0]
+ prefixdir = "npm/%s" % ud.pkgname
+ ud.pkgdatadir = d.expand("${DL_DIR}/%s" % prefixdir)
+ if not os.path.exists(ud.pkgdatadir):
+ bb.utils.mkdirhier(ud.pkgdatadir)
+ ud.localpath = d.expand("${DL_DIR}/npm/%s" % ud.bbnpmmanifest)
+
+ self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
+ ud.prefixdir = prefixdir
+
+ ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0")
+ mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
+ mirrortarball = mirrortarball.replace('/', '-')
+ ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
+ ud.mirrortarballs = [mirrortarball]
+
+ def need_update(self, ud, d):
+ if os.path.exists(ud.localpath):
+ return False
+ return True
+
+ def _runwget(self, ud, d, command, quiet):
+ logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
+ bb.fetch2.check_network_access(d, command, ud.url)
+ dldir = d.getVar("DL_DIR")
+ runfetchcmd(command, d, quiet, workdir=dldir)
+
+ def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
+ file = data[pkg]['tgz']
+ logger.debug(2, "file to extract is %s" % file)
+ if file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
+ cmd = 'tar xz --strip 1 --no-same-owner --warning=no-unknown-keyword -f %s/%s' % (dldir, file)
+ else:
+ bb.fatal("NPM package %s downloaded not a tarball!" % file)
+
+ # Change to subdir before executing command
+ if not os.path.exists(destdir):
+ os.makedirs(destdir)
+ path = d.getVar('PATH')
+ if path:
+ cmd = "PATH=\"%s\" %s" % (path, cmd)
+ bb.note("Unpacking %s to %s/" % (file, destdir))
+ ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=destdir)
+
+ if ret != 0:
+ raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)
+
+ if 'deps' not in data[pkg]:
+ return
+ for dep in data[pkg]['deps']:
+ self._unpackdep(ud, dep, data[pkg]['deps'], "%s/node_modules/%s" % (destdir, dep), dldir, d)
+
+
+ def unpack(self, ud, destdir, d):
+ dldir = d.getVar("DL_DIR")
+ with open("%s/npm/%s" % (dldir, ud.bbnpmmanifest)) as datafile:
+ workobj = json.load(datafile)
+ dldir = "%s/%s" % (os.path.dirname(ud.localpath), ud.pkgname)
+
+ if 'subdir' in ud.parm:
+ unpackdir = '%s/%s' % (destdir, ud.parm.get('subdir'))
+ else:
+ unpackdir = '%s/npmpkg' % destdir
+
+ self._unpackdep(ud, ud.pkgname, workobj, unpackdir, dldir, d)
+
+ def _parse_view(self, output):
+ '''
+ Parse the output of npm view --json; the last JSON result
+ is assumed to be the one that we're interested in.
+ '''
+ pdata = None
+ outdeps = {}
+ datalines = []
+ bracelevel = 0
+ for line in output.splitlines():
+ if bracelevel:
+ datalines.append(line)
+ elif '{' in line:
+ datalines = []
+ datalines.append(line)
+ bracelevel = bracelevel + line.count('{') - line.count('}')
+ if datalines:
+ pdata = json.loads('\n'.join(datalines))
+ return pdata
+
+ def _getdependencies(self, pkg, data, version, d, ud, optional=False, fetchedlist=None):
+ if fetchedlist is None:
+ fetchedlist = []
+ pkgfullname = pkg
+ if version != '*' and not '/' in version:
+ pkgfullname += "@'%s'" % version
+ logger.debug(2, "Calling getdeps on %s" % pkg)
+ fetchcmd = "npm view %s --json --registry %s" % (pkgfullname, ud.registry)
+ output = runfetchcmd(fetchcmd, d, True)
+ pdata = self._parse_view(output)
+ if not pdata:
+ raise FetchError("The command '%s' returned no output" % fetchcmd)
+ if optional:
+ pkg_os = pdata.get('os', None)
+ if pkg_os:
+ if not isinstance(pkg_os, list):
+ pkg_os = [pkg_os]
+ blacklist = False
+ for item in pkg_os:
+ if item.startswith('!'):
+ blacklist = True
+ break
+ if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
+ logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
+ return
+ #logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile))
+ outputurl = pdata['dist']['tarball']
+ data[pkg] = {}
+ data[pkg]['tgz'] = os.path.basename(outputurl)
+ if outputurl in fetchedlist:
+ return
+
+ self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
+ fetchedlist.append(outputurl)
+
+ dependencies = pdata.get('dependencies', {})
+ optionalDependencies = pdata.get('optionalDependencies', {})
+ dependencies.update(optionalDependencies)
+ depsfound = {}
+ optdepsfound = {}
+ data[pkg]['deps'] = {}
+ for dep in dependencies:
+ if dep in optionalDependencies:
+ optdepsfound[dep] = dependencies[dep]
+ else:
+ depsfound[dep] = dependencies[dep]
+ for dep, version in optdepsfound.items():
+ self._getdependencies(dep, data[pkg]['deps'], version, d, ud, optional=True, fetchedlist=fetchedlist)
+ for dep, version in depsfound.items():
+ self._getdependencies(dep, data[pkg]['deps'], version, d, ud, fetchedlist=fetchedlist)
+
+ def _getshrinkeddependencies(self, pkg, data, version, d, ud, lockdown, manifest, toplevel=True):
+ logger.debug(2, "NPM shrinkwrap file is %s" % data)
+ if toplevel:
+ name = data.get('name', None)
+ if name and name != pkg:
+ for obj in data.get('dependencies', []):
+ if obj == pkg:
+ self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest, False)
+ return
+ outputurl = "invalid"
+ if ('resolved' not in data) or (not data['resolved'].startswith('http')):
+ # will be the case for ${PN}
+ fetchcmd = "npm view %s@%s dist.tarball --registry %s" % (pkg, version, ud.registry)
+ logger.debug(2, "Found this matching URL: %s" % str(fetchcmd))
+ outputurl = runfetchcmd(fetchcmd, d, True)
+ else:
+ outputurl = data['resolved']
+ self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
+ manifest[pkg] = {}
+ manifest[pkg]['tgz'] = os.path.basename(outputurl).rstrip()
+ manifest[pkg]['deps'] = {}
+
+ if pkg in lockdown:
+ sha1_expected = lockdown[pkg][version]
+ sha1_data = bb.utils.sha1_file("npm/%s/%s" % (ud.pkgname, manifest[pkg]['tgz']))
+ if sha1_expected != sha1_data:
+ msg = "\nFile: '%s' has %s checksum %s when %s was expected" % (manifest[pkg]['tgz'], 'sha1', sha1_data, sha1_expected)
+ raise ChecksumError('Checksum mismatch!%s' % msg)
+ else:
+ logger.debug(2, "No lockdown data for %s@%s" % (pkg, version))
+
+ if 'dependencies' in data:
+ for obj in data['dependencies']:
+ logger.debug(2, "Found dep is %s" % str(obj))
+ self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest[pkg]['deps'], False)
+
+ def download(self, ud, d):
+ """Fetch url"""
+ jsondepobj = {}
+ shrinkobj = {}
+ lockdown = {}
+
+ if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
+ dest = d.getVar("DL_DIR")
+ bb.utils.mkdirhier(dest)
+ runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
+ return
+
+ if ud.parm.get("noverify", None) != '1':
+ shwrf = d.getVar('NPM_SHRINKWRAP')
+ logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
+ if shwrf:
+ try:
+ with open(shwrf) as datafile:
+ shrinkobj = json.load(datafile)
+ except Exception as e:
+ raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e)))
+ elif not ud.ignore_checksums:
+ logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
+ lckdf = d.getVar('NPM_LOCKDOWN')
+ logger.debug(2, "NPM lockdown file is %s" % lckdf)
+ if lckdf:
+ try:
+ with open(lckdf) as datafile:
+ lockdown = json.load(datafile)
+ except Exception as e:
+ raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e)))
+ elif not ud.ignore_checksums:
+ logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
+
+ if ('name' not in shrinkobj):
+ self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
+ else:
+ self._getshrinkeddependencies(ud.pkgname, shrinkobj, ud.version, d, ud, lockdown, jsondepobj)
+
+ with open(ud.localpath, 'w') as outfile:
+ json.dump(jsondepobj, outfile)
+
+ def build_mirror_data(self, ud, d):
+ # Generate a mirror tarball if needed
+ if ud.write_tarballs and not os.path.exists(ud.fullmirror):
+ # it's possible that this symlink points to read-only filesystem with PREMIRROR
+ if os.path.islink(ud.fullmirror):
+ os.unlink(ud.fullmirror)
+
+ dldir = d.getVar("DL_DIR")
+ logger.info("Creating tarball of npm data")
+ runfetchcmd("tar -cJf %s npm/%s npm/%s" % (ud.fullmirror, ud.bbnpmmanifest, ud.pkgname), d,
+ workdir=dldir)
+ runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=dldir)
diff --git a/poky/bitbake/lib/bb/fetch2/osc.py b/poky/bitbake/lib/bb/fetch2/osc.py
new file mode 100644
index 000000000..2b4f7d9c1
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/osc.py
@@ -0,0 +1,132 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+Bitbake "Fetch" implementation for osc (Opensuse build service client).
+Based on the svn "Fetch" implementation.
+
+"""
+
+import os
+import sys
+import logging
+import bb
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import MissingParameterError
+from bb.fetch2 import runfetchcmd
+
+class Osc(FetchMethod):
+ """Class to fetch a module or modules from Opensuse build server
+ repositories."""
+
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with osc.
+ """
+ return ud.type in ['osc']
+
+ def urldata_init(self, ud, d):
+ if not "module" in ud.parm:
+ raise MissingParameterError('module', ud.url)
+
+ ud.module = ud.parm["module"]
+
+ # Create paths to osc checkouts
+ relpath = self._strip_leading_slashes(ud.path)
+ ud.pkgdir = os.path.join(d.getVar('OSCDIR'), ud.host)
+ ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
+
+ if 'rev' in ud.parm:
+ ud.revision = ud.parm['rev']
+ else:
+ pv = d.getVar("PV", False)
+ rev = bb.fetch2.srcrev_internal_helper(ud, d)
+ if rev and rev != True:
+ ud.revision = rev
+ else:
+ ud.revision = ""
+
+ ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
+
+ def _buildosccommand(self, ud, d, command):
+ """
+ Build up an ocs commandline based on ud
+ command is "fetch", "update", "info"
+ """
+
+ basecmd = d.expand('${FETCHCMD_osc}')
+
+ proto = ud.parm.get('protocol', 'ocs')
+
+ options = []
+
+ config = "-c %s" % self.generate_config(ud, d)
+
+ if ud.revision:
+ options.append("-r %s" % ud.revision)
+
+ coroot = self._strip_leading_slashes(ud.path)
+
+ if command == "fetch":
+ osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
+ elif command == "update":
+ osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
+ else:
+ raise FetchError("Invalid osc command %s" % command, ud.url)
+
+ return osccmd
+
+ def download(self, ud, d):
+ """
+ Fetch url
+ """
+
+ logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
+
+ if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
+ oscupdatecmd = self._buildosccommand(ud, d, "update")
+ logger.info("Update "+ ud.url)
+ # update sources there
+ logger.debug(1, "Running %s", oscupdatecmd)
+ bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
+ runfetchcmd(oscupdatecmd, d, workdir=ud.moddir)
+ else:
+ oscfetchcmd = self._buildosccommand(ud, d, "fetch")
+ logger.info("Fetch " + ud.url)
+ # check out sources there
+ bb.utils.mkdirhier(ud.pkgdir)
+ logger.debug(1, "Running %s", oscfetchcmd)
+ bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
+ runfetchcmd(oscfetchcmd, d, workdir=ud.pkgdir)
+
+ # tar them up to a defined filename
+ runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d,
+ cleanup=[ud.localpath], workdir=os.path.join(ud.pkgdir + ud.path))
+
+ def supports_srcrev(self):
+ return False
+
+ def generate_config(self, ud, d):
+ """
+ Generate a .oscrc to be used for this run.
+ """
+
+ config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
+ if (os.path.exists(config_path)):
+ os.remove(config_path)
+
+ f = open(config_path, 'w')
+ f.write("[general]\n")
+ f.write("apisrv = %s\n" % ud.host)
+ f.write("scheme = http\n")
+ f.write("su-wrapper = su -c\n")
+ f.write("build-root = %s\n" % d.getVar('WORKDIR'))
+ f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
+ f.write("extra-pkgs = gzip\n")
+ f.write("\n")
+ f.write("[%s]\n" % ud.host)
+ f.write("user = %s\n" % ud.parm["user"])
+ f.write("pass = %s\n" % ud.parm["pswd"])
+ f.close()
+
+ return config_path
diff --git a/poky/bitbake/lib/bb/fetch2/perforce.py b/poky/bitbake/lib/bb/fetch2/perforce.py
new file mode 100644
index 000000000..3debad59f
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/perforce.py
@@ -0,0 +1,209 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for perforce
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2016 Kodak Alaris, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import logging
+import bb
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import logger
+from bb.fetch2 import runfetchcmd
+
+class Perforce(FetchMethod):
+ """ Class to fetch from perforce repositories """
+ def supports(self, ud, d):
+ """ Check to see if a given url can be fetched with perforce. """
+ return ud.type in ['p4']
+
+ def urldata_init(self, ud, d):
+ """
+ Initialize perforce specific variables within url data. If P4CONFIG is
+ provided by the env, use it. If P4PORT is specified by the recipe, use
+ its values, which may override the settings in P4CONFIG.
+ """
+ ud.basecmd = d.getVar('FETCHCMD_p4')
+ if not ud.basecmd:
+ ud.basecmd = "/usr/bin/env p4"
+
+ ud.dldir = d.getVar('P4DIR')
+ if not ud.dldir:
+ ud.dldir = '%s/%s' % (d.getVar('DL_DIR'), 'p4')
+
+ path = ud.url.split('://')[1]
+ path = path.split(';')[0]
+ delim = path.find('@');
+ if delim != -1:
+ (ud.user, ud.pswd) = path.split('@')[0].split(':')
+ ud.path = path.split('@')[1]
+ else:
+ ud.path = path
+
+ ud.usingp4config = False
+ p4port = d.getVar('P4PORT')
+
+ if p4port:
+ logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
+ ud.host = p4port
+ else:
+ logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
+ ud.usingp4config = True
+ p4cmd = '%s info | grep "Server address"' % ud.basecmd
+ bb.fetch2.check_network_access(d, p4cmd, ud.url)
+ ud.host = runfetchcmd(p4cmd, d, True)
+ ud.host = ud.host.split(': ')[1].strip()
+ logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
+ if not ud.host:
+ raise FetchError('Could not determine P4PORT from P4CONFIG')
+
+ if ud.path.find('/...') >= 0:
+ ud.pathisdir = True
+ else:
+ ud.pathisdir = False
+
+ cleanedpath = ud.path.replace('/...', '').replace('/', '.')
+ cleanedhost = ud.host.replace(':', '.')
+ ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath)
+
+ ud.setup_revisions(d)
+
+ ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
+
+ def _buildp4command(self, ud, d, command, depot_filename=None):
+ """
+ Build a p4 commandline. Valid commands are "changes", "print", and
+ "files". depot_filename is the full path to the file in the depot
+ including the trailing '#rev' value.
+ """
+ p4opt = ""
+
+ if ud.user:
+ p4opt += ' -u "%s"' % (ud.user)
+
+ if ud.pswd:
+ p4opt += ' -P "%s"' % (ud.pswd)
+
+ if ud.host and not ud.usingp4config:
+ p4opt += ' -p %s' % (ud.host)
+
+ if hasattr(ud, 'revision') and ud.revision:
+ pathnrev = '%s@%s' % (ud.path, ud.revision)
+ else:
+ pathnrev = '%s' % (ud.path)
+
+ if depot_filename:
+ if ud.pathisdir: # Remove leading path to obtain filename
+ filename = depot_filename[len(ud.path)-1:]
+ else:
+ filename = depot_filename[depot_filename.rfind('/'):]
+ filename = filename[:filename.find('#')] # Remove trailing '#rev'
+
+ if command == 'changes':
+ p4cmd = '%s%s changes -m 1 //%s' % (ud.basecmd, p4opt, pathnrev)
+ elif command == 'print':
+ if depot_filename != None:
+ p4cmd = '%s%s print -o "p4/%s" "%s"' % (ud.basecmd, p4opt, filename, depot_filename)
+ else:
+ raise FetchError('No depot file name provided to p4 %s' % command, ud.url)
+ elif command == 'files':
+ p4cmd = '%s%s files //%s' % (ud.basecmd, p4opt, pathnrev)
+ else:
+ raise FetchError('Invalid p4 command %s' % command, ud.url)
+
+ return p4cmd
+
+ def _p4listfiles(self, ud, d):
+ """
+ Return a list of the file names which are present in the depot using the
+ 'p4 files' command, including trailing '#rev' file revision indicator
+ """
+ p4cmd = self._buildp4command(ud, d, 'files')
+ bb.fetch2.check_network_access(d, p4cmd, ud.url)
+ p4fileslist = runfetchcmd(p4cmd, d, True)
+ p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()]
+
+ if not p4fileslist:
+ raise FetchError('Unable to fetch listing of p4 files from %s@%s' % (ud.host, ud.path))
+
+ count = 0
+ filelist = []
+
+ for filename in p4fileslist:
+ item = filename.split(' - ')
+ lastaction = item[1].split()
+ logger.debug(1, 'File: %s Last Action: %s' % (item[0], lastaction[0]))
+ if lastaction[0] == 'delete':
+ continue
+ filelist.append(item[0])
+
+ return filelist
+
+ def download(self, ud, d):
+ """ Get the list of files, fetch each one """
+ filelist = self._p4listfiles(ud, d)
+ if not filelist:
+ raise FetchError('No files found in depot %s@%s' % (ud.host, ud.path))
+
+ bb.utils.remove(ud.pkgdir, True)
+ bb.utils.mkdirhier(ud.pkgdir)
+
+ for afile in filelist:
+ p4fetchcmd = self._buildp4command(ud, d, 'print', afile)
+ bb.fetch2.check_network_access(d, p4fetchcmd, ud.url)
+ runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir)
+
+ runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir)
+
+ def clean(self, ud, d):
+ """ Cleanup p4 specific files and dirs"""
+ bb.utils.remove(ud.localpath)
+ bb.utils.remove(ud.pkgdir, True)
+
+ def supports_srcrev(self):
+ return True
+
+ def _revision_key(self, ud, d, name):
+ """ Return a unique key for the url """
+ return 'p4:%s' % ud.pkgdir
+
+ def _latest_revision(self, ud, d, name):
+ """ Return the latest upstream scm revision number """
+ p4cmd = self._buildp4command(ud, d, "changes")
+ bb.fetch2.check_network_access(d, p4cmd, ud.url)
+ tip = runfetchcmd(p4cmd, d, True)
+
+ if not tip:
+ raise FetchError('Could not determine the latest perforce changelist')
+
+ tipcset = tip.split(' ')[1]
+ logger.debug(1, 'p4 tip found to be changelist %s' % tipcset)
+ return tipcset
+
+ def sortable_revision(self, ud, d, name):
+ """ Return a sortable revision number """
+ return False, self._build_revision(ud, d)
+
+ def _build_revision(self, ud, d):
+ return ud.revision
+
diff --git a/poky/bitbake/lib/bb/fetch2/repo.py b/poky/bitbake/lib/bb/fetch2/repo.py
new file mode 100644
index 000000000..c22d9b557
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/repo.py
@@ -0,0 +1,97 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake "Fetch" repo (git) implementation
+
+"""
+
+# Copyright (C) 2009 Tom Rini <trini@embeddedalley.com>
+#
+# Based on git.py which is:
+#Copyright (C) 2005 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import bb
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+class Repo(FetchMethod):
+ """Class to fetch a module or modules from repo (git) repositories"""
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with repo.
+ """
+ return ud.type in ["repo"]
+
+ def urldata_init(self, ud, d):
+ """
+ We don"t care about the git rev of the manifests repository, but
+ we do care about the manifest to use. The default is "default".
+ We also care about the branch or tag to be used. The default is
+ "master".
+ """
+
+ ud.proto = ud.parm.get('protocol', 'git')
+ ud.branch = ud.parm.get('branch', 'master')
+ ud.manifest = ud.parm.get('manifest', 'default.xml')
+ if not ud.manifest.endswith('.xml'):
+ ud.manifest += '.xml'
+
+ ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch))
+
+ def download(self, ud, d):
+ """Fetch url"""
+
+ if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
+ logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
+ return
+
+ gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
+ repodir = d.getVar("REPODIR") or os.path.join(d.getVar("DL_DIR"), "repo")
+ codir = os.path.join(repodir, gitsrcname, ud.manifest)
+
+ if ud.user:
+ username = ud.user + "@"
+ else:
+ username = ""
+
+ repodir = os.path.join(codir, "repo")
+ bb.utils.mkdirhier(repodir)
+ if not os.path.exists(os.path.join(repodir, ".repo")):
+ bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
+ runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
+
+ bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url)
+ runfetchcmd("repo sync", d, workdir=repodir)
+
+ scmdata = ud.parm.get("scmdata", "")
+ if scmdata == "keep":
+ tar_flags = ""
+ else:
+ tar_flags = "--exclude='.repo' --exclude='.git'"
+
+ # Create a cache
+ runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d, workdir=codir)
+
+ def supports_srcrev(self):
+ return False
+
+ def _build_revision(self, ud, d):
+ return ud.manifest
+
+ def _want_sortable_revision(self, ud, d):
+ return False
diff --git a/poky/bitbake/lib/bb/fetch2/s3.py b/poky/bitbake/lib/bb/fetch2/s3.py
new file mode 100644
index 000000000..162928862
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/s3.py
@@ -0,0 +1,98 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for Amazon AWS S3.
+
+Class for fetching files from Amazon S3 using the AWS Command Line Interface.
+The aws tool must be correctly installed and configured prior to use.
+
+"""
+
+# Copyright (C) 2017, Andre McCurdy <armccurdy@gmail.com>
+#
+# Based in part on bb.fetch2.wget:
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import bb
+import urllib.request, urllib.parse, urllib.error
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import runfetchcmd
+
+class S3(FetchMethod):
+ """Class to fetch urls via 'aws s3'"""
+
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with s3.
+ """
+ return ud.type in ['s3']
+
+ def recommends_checksum(self, urldata):
+ return True
+
+ def urldata_init(self, ud, d):
+ if 'downloadfilename' in ud.parm:
+ ud.basename = ud.parm['downloadfilename']
+ else:
+ ud.basename = os.path.basename(ud.path)
+
+ ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
+
+ ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
+
+ def download(self, ud, d):
+ """
+ Fetch urls
+ Assumes localpath was called first
+ """
+
+ cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath)
+ bb.fetch2.check_network_access(d, cmd, ud.url)
+ runfetchcmd(cmd, d)
+
+ # Additional sanity checks copied from the wget class (although there
+ # are no known issues which mean these are required, treat the aws cli
+ # tool with a little healthy suspicion).
+
+ if not os.path.exists(ud.localpath):
+ raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath))
+
+ if os.path.getsize(ud.localpath) == 0:
+ os.remove(ud.localpath)
+ raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path))
+
+ return True
+
+ def checkstatus(self, fetch, ud, d):
+ """
+ Check the status of a URL
+ """
+
+ cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path)
+ bb.fetch2.check_network_access(d, cmd, ud.url)
+ output = runfetchcmd(cmd, d)
+
+ # "aws s3 ls s3://mybucket/foo" will exit with success even if the file
+ # is not found, so check output of the command to confirm success.
+
+ if not output:
+ raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path))
+
+ return True
diff --git a/poky/bitbake/lib/bb/fetch2/sftp.py b/poky/bitbake/lib/bb/fetch2/sftp.py
new file mode 100644
index 000000000..81884a6aa
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/sftp.py
@@ -0,0 +1,125 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake SFTP Fetch implementation
+
+Class for fetching files via SFTP. It tries to adhere to the (now
+expired) IETF Internet Draft for "Uniform Resource Identifier (URI)
+Scheme for Secure File Transfer Protocol (SFTP) and Secure Shell
+(SSH)" (SECSH URI).
+
+It uses SFTP (as to adhere to the SECSH URI specification). It only
+supports key based authentication, not password. This class, unlike
+the SSH fetcher, does not support fetching a directory tree from the
+remote.
+
+ http://tools.ietf.org/html/draft-ietf-secsh-scp-sftp-ssh-uri-04
+ https://www.iana.org/assignments/uri-schemes/prov/sftp
+ https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13
+
+Please note that '/' is used as host path seperator, and not ":"
+as you may be used to from the scp/sftp commands. You can use a
+~ (tilde) to specify a path relative to your home directory.
+(The /~user/ syntax, for specyfing a path relative to another
+user's home directory is not supported.) Note that the tilde must
+still follow the host path seperator ("/"). See exampels below.
+
+Example SRC_URIs:
+
+SRC_URI = "sftp://host.example.com/dir/path.file.txt"
+
+A path relative to your home directory.
+
+SRC_URI = "sftp://host.example.com/~/dir/path.file.txt"
+
+You can also specify a username (specyfing password in the
+URI is not supported, use SSH keys to authenticate):
+
+SRC_URI = "sftp://user@host.example.com/dir/path.file.txt"
+
+"""
+
+# Copyright (C) 2013, Olof Johansson <olof.johansson@axis.com>
+#
+# Based in part on bb.fetch2.wget:
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import bb
+import urllib.request, urllib.parse, urllib.error
+from bb.fetch2 import URI
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import runfetchcmd
+
+class SFTP(FetchMethod):
+ """Class to fetch urls via 'sftp'"""
+
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with sftp.
+ """
+ return ud.type in ['sftp']
+
+ def recommends_checksum(self, urldata):
+ return True
+
+ def urldata_init(self, ud, d):
+ if 'protocol' in ud.parm and ud.parm['protocol'] == 'git':
+ raise bb.fetch2.ParameterError(
+ "Invalid protocol - if you wish to fetch from a " +
+ "git repository using ssh, you need to use the " +
+ "git:// prefix with protocol=ssh", ud.url)
+
+ if 'downloadfilename' in ud.parm:
+ ud.basename = ud.parm['downloadfilename']
+ else:
+ ud.basename = os.path.basename(ud.path)
+
+ ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
+
+ def download(self, ud, d):
+ """Fetch urls"""
+
+ urlo = URI(ud.url)
+ basecmd = 'sftp -oBatchMode=yes'
+ port = ''
+ if urlo.port:
+ port = '-P %d' % urlo.port
+ urlo.port = None
+
+ dldir = d.getVar('DL_DIR')
+ lpath = os.path.join(dldir, ud.localfile)
+
+ user = ''
+ if urlo.userinfo:
+ user = urlo.userinfo + '@'
+
+ path = urlo.path
+
+ # Supoprt URIs relative to the user's home directory, with
+ # the tilde syntax. (E.g. <sftp://example.com/~/foo.diff>).
+ if path[:3] == '/~/':
+ path = path[3:]
+
+ remote = '%s%s:%s' % (user, urlo.hostname, path)
+
+ cmd = '%s %s %s %s' % (basecmd, port, remote, lpath)
+
+ bb.fetch2.check_network_access(d, cmd, ud.url)
+ runfetchcmd(cmd, d)
+ return True
diff --git a/poky/bitbake/lib/bb/fetch2/ssh.py b/poky/bitbake/lib/bb/fetch2/ssh.py
new file mode 100644
index 000000000..6047ee417
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/ssh.py
@@ -0,0 +1,125 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+'''
+BitBake 'Fetch' implementations
+
+This implementation is for Secure Shell (SSH), and attempts to comply with the
+IETF secsh internet draft:
+ http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/
+
+ Currently does not support the sftp parameters, as this uses scp
+ Also does not support the 'fingerprint' connection parameter.
+
+ Please note that '/' is used as host, path separator not ':' as you may
+ be used to, also '~' can be used to specify user HOME, but again after '/'
+
+ Example SRC_URI:
+ SRC_URI = "ssh://user@host.example.com/dir/path/file.txt"
+ SRC_URI = "ssh://user@host.example.com/~/file.txt"
+'''
+
+# Copyright (C) 2006 OpenedHand Ltd.
+#
+#
+# Based in part on svk.py:
+# Copyright (C) 2006 Holger Hans Peter Freyther
+# Based on svn.py:
+# Copyright (C) 2003, 2004 Chris Larson
+# Based on functions from the base bb module:
+# Copyright 2003 Holger Schurig
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re, os
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import logger
+from bb.fetch2 import runfetchcmd
+
+
+__pattern__ = re.compile(r'''
+ \s* # Skip leading whitespace
+ ssh:// # scheme
+ ( # Optional username/password block
+ (?P<user>\S+) # username
+ (:(?P<pass>\S+))? # colon followed by the password (optional)
+ )?
+ (?P<cparam>(;[^;]+)*)? # connection parameters block (optional)
+ @
+ (?P<host>\S+?) # non-greedy match of the host
+ (:(?P<port>[0-9]+))? # colon followed by the port (optional)
+ /
+ (?P<path>[^;]+) # path on the remote system, may be absolute or relative,
+ # and may include the use of '~' to reference the remote home
+ # directory
+ (?P<sparam>(;[^;]+)*)? # parameters block (optional)
+ $
+''', re.VERBOSE)
+
+class SSH(FetchMethod):
+ '''Class to fetch a module or modules via Secure Shell'''
+
+ def supports(self, urldata, d):
+ return __pattern__.match(urldata.url) != None
+
+ def supports_checksum(self, urldata):
+ return False
+
+ def urldata_init(self, urldata, d):
+ if 'protocol' in urldata.parm and urldata.parm['protocol'] == 'git':
+ raise bb.fetch2.ParameterError(
+ "Invalid protocol - if you wish to fetch from a git " +
+ "repository using ssh, you need to use " +
+ "git:// prefix with protocol=ssh", urldata.url)
+ m = __pattern__.match(urldata.url)
+ path = m.group('path')
+ host = m.group('host')
+ urldata.localpath = os.path.join(d.getVar('DL_DIR'),
+ os.path.basename(os.path.normpath(path)))
+
+ def download(self, urldata, d):
+ dldir = d.getVar('DL_DIR')
+
+ m = __pattern__.match(urldata.url)
+ path = m.group('path')
+ host = m.group('host')
+ port = m.group('port')
+ user = m.group('user')
+ password = m.group('pass')
+
+ if port:
+ portarg = '-P %s' % port
+ else:
+ portarg = ''
+
+ if user:
+ fr = user
+ if password:
+ fr += ':%s' % password
+ fr += '@%s' % host
+ else:
+ fr = host
+ fr += ':%s' % path
+
+ cmd = 'scp -B -r %s %s %s/' % (
+ portarg,
+ fr,
+ dldir
+ )
+
+ bb.fetch2.check_network_access(d, cmd, urldata.url)
+
+ runfetchcmd(cmd, d)
+
diff --git a/poky/bitbake/lib/bb/fetch2/svn.py b/poky/bitbake/lib/bb/fetch2/svn.py
new file mode 100644
index 000000000..3f172eec9
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/svn.py
@@ -0,0 +1,193 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for svn.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2004 Marcin Juszkiewicz
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import sys
+import logging
+import bb
+import re
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import MissingParameterError
+from bb.fetch2 import runfetchcmd
+from bb.fetch2 import logger
+
+class Svn(FetchMethod):
+ """Class to fetch a module or modules from svn repositories"""
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with svn.
+ """
+ return ud.type in ['svn']
+
+ def urldata_init(self, ud, d):
+ """
+ init svn specific variable within url data
+ """
+ if not "module" in ud.parm:
+ raise MissingParameterError('module', ud.url)
+
+ ud.basecmd = d.getVar('FETCHCMD_svn')
+
+ ud.module = ud.parm["module"]
+
+ if not "path_spec" in ud.parm:
+ ud.path_spec = ud.module
+ else:
+ ud.path_spec = ud.parm["path_spec"]
+
+ # Create paths to svn checkouts
+ relpath = self._strip_leading_slashes(ud.path)
+ ud.pkgdir = os.path.join(d.expand('${SVNDIR}'), ud.host, relpath)
+ ud.moddir = os.path.join(ud.pkgdir, ud.module)
+
+ ud.setup_revisions(d)
+
+ if 'rev' in ud.parm:
+ ud.revision = ud.parm['rev']
+
+ ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision))
+
+ def _buildsvncommand(self, ud, d, command):
+ """
+ Build up an svn commandline based on ud
+ command is "fetch", "update", "info"
+ """
+
+ proto = ud.parm.get('protocol', 'svn')
+
+ svn_ssh = None
+ if proto == "svn+ssh" and "ssh" in ud.parm:
+ svn_ssh = ud.parm["ssh"]
+
+ svnroot = ud.host + ud.path
+
+ options = []
+
+ options.append("--no-auth-cache")
+
+ if ud.user:
+ options.append("--username %s" % ud.user)
+
+ if ud.pswd:
+ options.append("--password %s" % ud.pswd)
+
+ if command == "info":
+ svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
+ elif command == "log1":
+ svncmd = "%s log --limit 1 %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
+ else:
+ suffix = ""
+ if ud.revision:
+ options.append("-r %s" % ud.revision)
+ suffix = "@%s" % (ud.revision)
+
+ if command == "fetch":
+ transportuser = ud.parm.get("transportuser", "")
+ svncmd = "%s co %s %s://%s%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, transportuser, svnroot, ud.module, suffix, ud.path_spec)
+ elif command == "update":
+ svncmd = "%s update %s" % (ud.basecmd, " ".join(options))
+ else:
+ raise FetchError("Invalid svn command %s" % command, ud.url)
+
+ if svn_ssh:
+ svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd)
+
+ return svncmd
+
+ def download(self, ud, d):
+ """Fetch url"""
+
+ logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
+
+ if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
+ svnupdatecmd = self._buildsvncommand(ud, d, "update")
+ logger.info("Update " + ud.url)
+ # We need to attempt to run svn upgrade first in case its an older working format
+ try:
+ runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
+ except FetchError:
+ pass
+ logger.debug(1, "Running %s", svnupdatecmd)
+ bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
+ runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
+ else:
+ svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
+ logger.info("Fetch " + ud.url)
+ # check out sources there
+ bb.utils.mkdirhier(ud.pkgdir)
+ logger.debug(1, "Running %s", svnfetchcmd)
+ bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
+ runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
+
+ scmdata = ud.parm.get("scmdata", "")
+ if scmdata == "keep":
+ tar_flags = ""
+ else:
+ tar_flags = "--exclude='.svn'"
+
+ # tar them up to a defined filename
+ runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
+ cleanup=[ud.localpath], workdir=ud.pkgdir)
+
+ def clean(self, ud, d):
+ """ Clean SVN specific files and dirs """
+
+ bb.utils.remove(ud.localpath)
+ bb.utils.remove(ud.moddir, True)
+
+
+ def supports_srcrev(self):
+ return True
+
+ def _revision_key(self, ud, d, name):
+ """
+ Return a unique key for the url
+ """
+ return "svn:" + ud.moddir
+
+ def _latest_revision(self, ud, d, name):
+ """
+ Return the latest upstream revision number
+ """
+ bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url)
+
+ output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True)
+
+ # skip the first line, as per output of svn log
+ # then we expect the revision on the 2nd line
+ revision = re.search('^r([0-9]*)', output.splitlines()[1]).group(1)
+
+ return revision
+
+ def sortable_revision(self, ud, d, name):
+ """
+ Return a sortable revision number which in our case is the revision number
+ """
+
+ return False, self._build_revision(ud, d)
+
+ def _build_revision(self, ud, d):
+ return ud.revision
diff --git a/poky/bitbake/lib/bb/fetch2/wget.py b/poky/bitbake/lib/bb/fetch2/wget.py
new file mode 100644
index 000000000..8f505b6de
--- /dev/null
+++ b/poky/bitbake/lib/bb/fetch2/wget.py
@@ -0,0 +1,626 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementations
+
+Classes for obtaining upstream sources for the
+BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import re
+import tempfile
+import subprocess
+import os
+import logging
+import errno
+import bb
+import bb.progress
+import urllib.request, urllib.parse, urllib.error
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import logger
+from bb.fetch2 import runfetchcmd
+from bb.utils import export_proxies
+from bs4 import BeautifulSoup
+from bs4 import SoupStrainer
+
+class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
+ """
+ Extract progress information from wget output.
+ Note: relies on --progress=dot (with -v or without -q/-nv) being
+ specified on the wget command line.
+ """
+ def __init__(self, d):
+ super(WgetProgressHandler, self).__init__(d)
+ # Send an initial progress event so the bar gets shown
+ self._fire_progress(0)
+
+ def writeline(self, line):
+ percs = re.findall(r'(\d+)%\s+([\d.]+[A-Z])', line)
+ if percs:
+ progress = int(percs[-1][0])
+ rate = percs[-1][1] + '/s'
+ self.update(progress, rate)
+ return False
+ return True
+
+
+class Wget(FetchMethod):
+ """Class to fetch urls via 'wget'"""
+ def supports(self, ud, d):
+ """
+ Check to see if a given url can be fetched with wget.
+ """
+ return ud.type in ['http', 'https', 'ftp']
+
+ def recommends_checksum(self, urldata):
+ return True
+
+ def urldata_init(self, ud, d):
+ if 'protocol' in ud.parm:
+ if ud.parm['protocol'] == 'git':
+ raise bb.fetch2.ParameterError("Invalid protocol - if you wish to fetch from a git repository using http, you need to instead use the git:// prefix with protocol=http", ud.url)
+
+ if 'downloadfilename' in ud.parm:
+ ud.basename = ud.parm['downloadfilename']
+ else:
+ ud.basename = os.path.basename(ud.path)
+
+ ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
+ if not ud.localfile:
+ ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
+
+ self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
+
+ def _runwget(self, ud, d, command, quiet, workdir=None):
+
+ progresshandler = WgetProgressHandler(d)
+
+ logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
+ bb.fetch2.check_network_access(d, command, ud.url)
+ runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
+
+ def download(self, ud, d):
+ """Fetch urls"""
+
+ fetchcmd = self.basecmd
+
+ if 'downloadfilename' in ud.parm:
+ dldir = d.getVar("DL_DIR")
+ bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))
+ fetchcmd += " -O " + dldir + os.sep + ud.localfile
+
+ if ud.user and ud.pswd:
+ fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
+
+ uri = ud.url.split(";")[0]
+ if os.path.exists(ud.localpath):
+ # file exists, but we didnt complete it.. trying again..
+ fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
+ else:
+ fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
+
+ self._runwget(ud, d, fetchcmd, False)
+
+ # Sanity check since wget can pretend it succeed when it didn't
+ # Also, this used to happen if sourceforge sent us to the mirror page
+ if not os.path.exists(ud.localpath):
+ raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
+
+ if os.path.getsize(ud.localpath) == 0:
+ os.remove(ud.localpath)
+ raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
+
+ return True
+
+ def checkstatus(self, fetch, ud, d, try_again=True):
+ import urllib.request, urllib.error, urllib.parse, socket, http.client
+ from urllib.response import addinfourl
+ from bb.fetch2 import FetchConnectionCache
+
+ class HTTPConnectionCache(http.client.HTTPConnection):
+ if fetch.connection_cache:
+ def connect(self):
+ """Connect to the host and port specified in __init__."""
+
+ sock = fetch.connection_cache.get_connection(self.host, self.port)
+ if sock:
+ self.sock = sock
+ else:
+ self.sock = socket.create_connection((self.host, self.port),
+ self.timeout, self.source_address)
+ fetch.connection_cache.add_connection(self.host, self.port, self.sock)
+
+ if self._tunnel_host:
+ self._tunnel()
+
+ class CacheHTTPHandler(urllib.request.HTTPHandler):
+ def http_open(self, req):
+ return self.do_open(HTTPConnectionCache, req)
+
+ def do_open(self, http_class, req):
+ """Return an addinfourl object for the request, using http_class.
+
+ http_class must implement the HTTPConnection API from httplib.
+ The addinfourl return value is a file-like object. It also
+ has methods and attributes including:
+ - info(): return a mimetools.Message object for the headers
+ - geturl(): return the original request URL
+ - code: HTTP status code
+ """
+ host = req.host
+ if not host:
+ raise urlllib2.URLError('no host given')
+
+ h = http_class(host, timeout=req.timeout) # will parse host:port
+ h.set_debuglevel(self._debuglevel)
+
+ headers = dict(req.unredirected_hdrs)
+ headers.update(dict((k, v) for k, v in list(req.headers.items())
+ if k not in headers))
+
+ # We want to make an HTTP/1.1 request, but the addinfourl
+ # class isn't prepared to deal with a persistent connection.
+ # It will try to read all remaining data from the socket,
+ # which will block while the server waits for the next request.
+ # So make sure the connection gets closed after the (only)
+ # request.
+
+ # Don't close connection when connection_cache is enabled,
+ if fetch.connection_cache is None:
+ headers["Connection"] = "close"
+ else:
+ headers["Connection"] = "Keep-Alive" # Works for HTTP/1.0
+
+ headers = dict(
+ (name.title(), val) for name, val in list(headers.items()))
+
+ if req._tunnel_host:
+ tunnel_headers = {}
+ proxy_auth_hdr = "Proxy-Authorization"
+ if proxy_auth_hdr in headers:
+ tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
+ # Proxy-Authorization should not be sent to origin
+ # server.
+ del headers[proxy_auth_hdr]
+ h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
+
+ try:
+ h.request(req.get_method(), req.selector, req.data, headers)
+ except socket.error as err: # XXX what error?
+ # Don't close connection when cache is enabled.
+ # Instead, try to detect connections that are no longer
+ # usable (for example, closed unexpectedly) and remove
+ # them from the cache.
+ if fetch.connection_cache is None:
+ h.close()
+ elif isinstance(err, OSError) and err.errno == errno.EBADF:
+ # This happens when the server closes the connection despite the Keep-Alive.
+ # Apparently urllib then uses the file descriptor, expecting it to be
+ # connected, when in reality the connection is already gone.
+ # We let the request fail and expect it to be
+ # tried once more ("try_again" in check_status()),
+ # with the dead connection removed from the cache.
+ # If it still fails, we give up, which can happend for bad
+ # HTTP proxy settings.
+ fetch.connection_cache.remove_connection(h.host, h.port)
+ raise urllib.error.URLError(err)
+ else:
+ try:
+ r = h.getresponse(buffering=True)
+ except TypeError: # buffering kw not supported
+ r = h.getresponse()
+
+ # Pick apart the HTTPResponse object to get the addinfourl
+ # object initialized properly.
+
+ # Wrap the HTTPResponse object in socket's file object adapter
+ # for Windows. That adapter calls recv(), so delegate recv()
+ # to read(). This weird wrapping allows the returned object to
+ # have readline() and readlines() methods.
+
+ # XXX It might be better to extract the read buffering code
+ # out of socket._fileobject() and into a base class.
+ r.recv = r.read
+
+ # no data, just have to read
+ r.read()
+ class fp_dummy(object):
+ def read(self):
+ return ""
+ def readline(self):
+ return ""
+ def close(self):
+ pass
+ closed = False
+
+ resp = addinfourl(fp_dummy(), r.msg, req.get_full_url())
+ resp.code = r.status
+ resp.msg = r.reason
+
+ # Close connection when server request it.
+ if fetch.connection_cache is not None:
+ if 'Connection' in r.msg and r.msg['Connection'] == 'close':
+ fetch.connection_cache.remove_connection(h.host, h.port)
+
+ return resp
+
+ class HTTPMethodFallback(urllib.request.BaseHandler):
+ """
+ Fallback to GET if HEAD is not allowed (405 HTTP error)
+ """
+ def http_error_405(self, req, fp, code, msg, headers):
+ fp.read()
+ fp.close()
+
+ newheaders = dict((k,v) for k,v in list(req.headers.items())
+ if k.lower() not in ("content-length", "content-type"))
+ return self.parent.open(urllib.request.Request(req.get_full_url(),
+ headers=newheaders,
+ origin_req_host=req.origin_req_host,
+ unverifiable=True))
+
+ """
+ Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403
+ Forbidden when they actually mean 405 Method Not Allowed.
+ """
+ http_error_403 = http_error_405
+
+
+ class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
+ """
+ urllib2.HTTPRedirectHandler resets the method to GET on redirect,
+ when we want to follow redirects using the original method.
+ """
+ def redirect_request(self, req, fp, code, msg, headers, newurl):
+ newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
+ newreq.get_method = lambda: req.get_method()
+ return newreq
+ exported_proxies = export_proxies(d)
+
+ handlers = [FixedHTTPRedirectHandler, HTTPMethodFallback]
+ if export_proxies:
+ handlers.append(urllib.request.ProxyHandler())
+ handlers.append(CacheHTTPHandler())
+ # XXX: Since Python 2.7.9 ssl cert validation is enabled by default
+ # see PEP-0476, this causes verification errors on some https servers
+ # so disable by default.
+ import ssl
+ if hasattr(ssl, '_create_unverified_context'):
+ handlers.append(urllib.request.HTTPSHandler(context=ssl._create_unverified_context()))
+ opener = urllib.request.build_opener(*handlers)
+
+ try:
+ uri = ud.url.split(";")[0]
+ r = urllib.request.Request(uri)
+ r.get_method = lambda: "HEAD"
+ # Some servers (FusionForge, as used on Alioth) require that the
+ # optional Accept header is set.
+ r.add_header("Accept", "*/*")
+ def add_basic_auth(login_str, request):
+ '''Adds Basic auth to http request, pass in login:password as string'''
+ import base64
+ encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
+ authheader = "Basic %s" % encodeuser
+ r.add_header("Authorization", authheader)
+
+ if ud.user:
+ add_basic_auth(ud.user, r)
+
+ try:
+ import netrc, urllib.parse
+ n = netrc.netrc()
+ login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
+ add_basic_auth("%s:%s" % (login, password), r)
+ except (TypeError, ImportError, IOError, netrc.NetrcParseError):
+ pass
+
+ with opener.open(r) as response:
+ pass
+ except urllib.error.URLError as e:
+ if try_again:
+ logger.debug(2, "checkstatus: trying again")
+ return self.checkstatus(fetch, ud, d, False)
+ else:
+ # debug for now to avoid spamming the logs in e.g. remote sstate searches
+ logger.debug(2, "checkstatus() urlopen failed: %s" % e)
+ return False
+ return True
+
+ def _parse_path(self, regex, s):
+ """
+ Find and group name, version and archive type in the given string s
+ """
+
+ m = regex.search(s)
+ if m:
+ pname = ''
+ pver = ''
+ ptype = ''
+
+ mdict = m.groupdict()
+ if 'name' in mdict.keys():
+ pname = mdict['name']
+ if 'pver' in mdict.keys():
+ pver = mdict['pver']
+ if 'type' in mdict.keys():
+ ptype = mdict['type']
+
+ bb.debug(3, "_parse_path: %s, %s, %s" % (pname, pver, ptype))
+
+ return (pname, pver, ptype)
+
+ return None
+
+ def _modelate_version(self, version):
+ if version[0] in ['.', '-']:
+ if version[1].isdigit():
+ version = version[1] + version[0] + version[2:len(version)]
+ else:
+ version = version[1:len(version)]
+
+ version = re.sub('-', '.', version)
+ version = re.sub('_', '.', version)
+ version = re.sub('(rc)+', '.1000.', version)
+ version = re.sub('(beta)+', '.100.', version)
+ version = re.sub('(alpha)+', '.10.', version)
+ if version[0] == 'v':
+ version = version[1:len(version)]
+ return version
+
+ def _vercmp(self, old, new):
+ """
+ Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
+ purpose. PE is cleared in comparison as it's not for build, and PR is cleared too
+ for simplicity as it's somehow difficult to get from various upstream format
+ """
+
+ (oldpn, oldpv, oldsuffix) = old
+ (newpn, newpv, newsuffix) = new
+
+ """
+ Check for a new suffix type that we have never heard of before
+ """
+ if (newsuffix):
+ m = self.suffix_regex_comp.search(newsuffix)
+ if not m:
+ bb.warn("%s has a possible unknown suffix: %s" % (newpn, newsuffix))
+ return False
+
+ """
+ Not our package so ignore it
+ """
+ if oldpn != newpn:
+ return False
+
+ oldpv = self._modelate_version(oldpv)
+ newpv = self._modelate_version(newpv)
+
+ return bb.utils.vercmp(("0", oldpv, ""), ("0", newpv, ""))
+
+ def _fetch_index(self, uri, ud, d):
+ """
+ Run fetch checkstatus to get directory information
+ """
+ f = tempfile.NamedTemporaryFile()
+ with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
+ agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
+ fetchcmd = self.basecmd
+ fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
+ try:
+ self._runwget(ud, d, fetchcmd, True, workdir=workdir)
+ fetchresult = f.read()
+ except bb.fetch2.BBFetchException:
+ fetchresult = ""
+
+ return fetchresult
+
+ def _check_latest_version(self, url, package, package_regex, current_version, ud, d):
+ """
+ Return the latest version of a package inside a given directory path
+ If error or no version, return ""
+ """
+ valid = 0
+ version = ['', '', '']
+
+ bb.debug(3, "VersionURL: %s" % (url))
+ soup = BeautifulSoup(self._fetch_index(url, ud, d), "html.parser", parse_only=SoupStrainer("a"))
+ if not soup:
+ bb.debug(3, "*** %s NO SOUP" % (url))
+ return ""
+
+ for line in soup.find_all('a', href=True):
+ bb.debug(3, "line['href'] = '%s'" % (line['href']))
+ bb.debug(3, "line = '%s'" % (str(line)))
+
+ newver = self._parse_path(package_regex, line['href'])
+ if not newver:
+ newver = self._parse_path(package_regex, str(line))
+
+ if newver:
+ bb.debug(3, "Upstream version found: %s" % newver[1])
+ if valid == 0:
+ version = newver
+ valid = 1
+ elif self._vercmp(version, newver) < 0:
+ version = newver
+
+ pupver = re.sub('_', '.', version[1])
+
+ bb.debug(3, "*** %s -> UpstreamVersion = %s (CurrentVersion = %s)" %
+ (package, pupver or "N/A", current_version[1]))
+
+ if valid:
+ return pupver
+
+ return ""
+
+ def _check_latest_version_by_dir(self, dirver, package, package_regex,
+ current_version, ud, d):
+ """
+ Scan every directory in order to get upstream version.
+ """
+ version_dir = ['', '', '']
+ version = ['', '', '']
+
+ dirver_regex = re.compile("(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])+(\d+))")
+ s = dirver_regex.search(dirver)
+ if s:
+ version_dir[1] = s.group('ver')
+ else:
+ version_dir[1] = dirver
+
+ dirs_uri = bb.fetch.encodeurl([ud.type, ud.host,
+ ud.path.split(dirver)[0], ud.user, ud.pswd, {}])
+ bb.debug(3, "DirURL: %s, %s" % (dirs_uri, package))
+
+ soup = BeautifulSoup(self._fetch_index(dirs_uri, ud, d), "html.parser", parse_only=SoupStrainer("a"))
+ if not soup:
+ return version[1]
+
+ for line in soup.find_all('a', href=True):
+ s = dirver_regex.search(line['href'].strip("/"))
+ if s:
+ sver = s.group('ver')
+
+ # When prefix is part of the version directory it need to
+ # ensure that only version directory is used so remove previous
+ # directories if exists.
+ #
+ # Example: pfx = '/dir1/dir2/v' and version = '2.5' the expected
+ # result is v2.5.
+ spfx = s.group('pfx').split('/')[-1]
+
+ version_dir_new = ['', sver, '']
+ if self._vercmp(version_dir, version_dir_new) <= 0:
+ dirver_new = spfx + sver
+ path = ud.path.replace(dirver, dirver_new, True) \
+ .split(package)[0]
+ uri = bb.fetch.encodeurl([ud.type, ud.host, path,
+ ud.user, ud.pswd, {}])
+
+ pupver = self._check_latest_version(uri,
+ package, package_regex, current_version, ud, d)
+ if pupver:
+ version[1] = pupver
+
+ version_dir = version_dir_new
+
+ return version[1]
+
+ def _init_regexes(self, package, ud, d):
+ """
+ Match as many patterns as possible such as:
+ gnome-common-2.20.0.tar.gz (most common format)
+ gtk+-2.90.1.tar.gz
+ xf86-input-synaptics-12.6.9.tar.gz
+ dri2proto-2.3.tar.gz
+ blktool_4.orig.tar.gz
+ libid3tag-0.15.1b.tar.gz
+ unzip552.tar.gz
+ icu4c-3_6-src.tgz
+ genext2fs_1.3.orig.tar.gz
+ gst-fluendo-mp3
+ """
+ # match most patterns which uses "-" as separator to version digits
+ pn_prefix1 = "[a-zA-Z][a-zA-Z0-9]*([-_][a-zA-Z]\w+)*\+?[-_]"
+ # a loose pattern such as for unzip552.tar.gz
+ pn_prefix2 = "[a-zA-Z]+"
+ # a loose pattern such as for 80325-quicky-0.4.tar.gz
+ pn_prefix3 = "[0-9]+[-]?[a-zA-Z]+"
+ # Save the Package Name (pn) Regex for use later
+ pn_regex = "(%s|%s|%s)" % (pn_prefix1, pn_prefix2, pn_prefix3)
+
+ # match version
+ pver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"
+
+ # match arch
+ parch_regex = "-source|_all_"
+
+ # src.rpm extension was added only for rpm package. Can be removed if the rpm
+ # packaged will always be considered as having to be manually upgraded
+ psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
+
+ # match name, version and archive type of a package
+ package_regex_comp = re.compile("(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
+ % (pn_regex, pver_regex, parch_regex, psuffix_regex))
+ self.suffix_regex_comp = re.compile(psuffix_regex)
+
+ # compile regex, can be specific by package or generic regex
+ pn_regex = d.getVar('UPSTREAM_CHECK_REGEX')
+ if pn_regex:
+ package_custom_regex_comp = re.compile(pn_regex)
+ else:
+ version = self._parse_path(package_regex_comp, package)
+ if version:
+ package_custom_regex_comp = re.compile(
+ "(?P<name>%s)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s)" %
+ (re.escape(version[0]), pver_regex, parch_regex, psuffix_regex))
+ else:
+ package_custom_regex_comp = None
+
+ return package_custom_regex_comp
+
+ def latest_versionstring(self, ud, d):
+ """
+ Manipulate the URL and try to obtain the latest package version
+
+ sanity check to ensure same name and type.
+ """
+ package = ud.path.split("/")[-1]
+ current_version = ['', d.getVar('PV'), '']
+
+ """possible to have no version in pkg name, such as spectrum-fw"""
+ if not re.search("\d+", package):
+ current_version[1] = re.sub('_', '.', current_version[1])
+ current_version[1] = re.sub('-', '.', current_version[1])
+ return (current_version[1], '')
+
+ package_regex = self._init_regexes(package, ud, d)
+ if package_regex is None:
+ bb.warn("latest_versionstring: package %s don't match pattern" % (package))
+ return ('', '')
+ bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern))
+
+ uri = ""
+ regex_uri = d.getVar("UPSTREAM_CHECK_URI")
+ if not regex_uri:
+ path = ud.path.split(package)[0]
+
+ # search for version matches on folders inside the path, like:
+ # "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
+ dirver_regex = re.compile("(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
+ m = dirver_regex.search(path)
+ if m:
+ pn = d.getVar('PN')
+ dirver = m.group('dirver')
+
+ dirver_pn_regex = re.compile("%s\d?" % (re.escape(pn)))
+ if not dirver_pn_regex.search(dirver):
+ return (self._check_latest_version_by_dir(dirver,
+ package, package_regex, current_version, ud, d), '')
+
+ uri = bb.fetch.encodeurl([ud.type, ud.host, path, ud.user, ud.pswd, {}])
+ else:
+ uri = regex_uri
+
+ return (self._check_latest_version(uri, package, package_regex,
+ current_version, ud, d), '')
diff --git a/poky/bitbake/lib/bb/main.py b/poky/bitbake/lib/bb/main.py
new file mode 100755
index 000000000..f4474e410
--- /dev/null
+++ b/poky/bitbake/lib/bb/main.py
@@ -0,0 +1,508 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+import logging
+import optparse
+import warnings
+import fcntl
+import time
+import traceback
+
+import bb
+from bb import event
+import bb.msg
+from bb import cooker
+from bb import ui
+from bb import server
+from bb import cookerdata
+
+import bb.server.process
+import bb.server.xmlrpcclient
+
+logger = logging.getLogger("BitBake")
+
+class BBMainException(Exception):
+ pass
+
+class BBMainFatal(bb.BBHandledException):
+ pass
+
+def present_options(optionlist):
+ if len(optionlist) > 1:
+ return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]])
+ else:
+ return optionlist[0]
+
+class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
+ def format_option(self, option):
+ # We need to do this here rather than in the text we supply to
+ # add_option() because we don't want to call list_extension_modules()
+ # on every execution (since it imports all of the modules)
+ # Note also that we modify option.help rather than the returned text
+ # - this is so that we don't have to re-format the text ourselves
+ if option.dest == 'ui':
+ valid_uis = list_extension_modules(bb.ui, 'main')
+ option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
+
+ return optparse.IndentedHelpFormatter.format_option(self, option)
+
+def list_extension_modules(pkg, checkattr):
+ """
+ Lists extension modules in a specific Python package
+ (e.g. UIs, servers). NOTE: Calling this function will import all of the
+ submodules of the specified module in order to check for the specified
+ attribute; this can have unusual side-effects. As a result, this should
+ only be called when displaying help text or error messages.
+ Parameters:
+ pkg: previously imported Python package to list
+ checkattr: attribute to look for in module to determine if it's valid
+ as the type of extension you are looking for
+ """
+ import pkgutil
+ pkgdir = os.path.dirname(pkg.__file__)
+
+ modules = []
+ for _, modulename, _ in pkgutil.iter_modules([pkgdir]):
+ if os.path.isdir(os.path.join(pkgdir, modulename)):
+ # ignore directories
+ continue
+ try:
+ module = __import__(pkg.__name__, fromlist=[modulename])
+ except:
+ # If we can't import it, it's not valid
+ continue
+ module_if = getattr(module, modulename)
+ if getattr(module_if, 'hidden_extension', False):
+ continue
+ if not checkattr or hasattr(module_if, checkattr):
+ modules.append(modulename)
+ return modules
+
+def import_extension_module(pkg, modulename, checkattr):
+ try:
+ # Dynamically load the UI based on the ui name. Although we
+ # suggest a fixed set this allows you to have flexibility in which
+ # ones are available.
+ module = __import__(pkg.__name__, fromlist=[modulename])
+ return getattr(module, modulename)
+ except AttributeError:
+ modules = present_options(list_extension_modules(pkg, checkattr))
+ raise BBMainException('FATAL: Unable to import extension module "%s" from %s. '
+ 'Valid extension modules: %s' % (modulename, pkg.__name__, modules))
+
+# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others"""
+warnlog = logging.getLogger("BitBake.Warnings")
+_warnings_showwarning = warnings.showwarning
+def _showwarning(message, category, filename, lineno, file=None, line=None):
+ if file is not None:
+ if _warnings_showwarning is not None:
+ _warnings_showwarning(message, category, filename, lineno, file, line)
+ else:
+ s = warnings.formatwarning(message, category, filename, lineno)
+ warnlog.warning(s)
+
+warnings.showwarning = _showwarning
+warnings.filterwarnings("ignore")
+warnings.filterwarnings("default", module="(<string>$|(oe|bb)\.)")
+warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
+warnings.filterwarnings("ignore", category=ImportWarning)
+warnings.filterwarnings("ignore", category=DeprecationWarning, module="<string>$")
+warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers")
+
+class BitBakeConfigParameters(cookerdata.ConfigParameters):
+
+ def parseCommandLine(self, argv=sys.argv):
+ parser = optparse.OptionParser(
+ formatter=BitbakeHelpFormatter(),
+ version="BitBake Build Tool Core version %s" % bb.__version__,
+ usage="""%prog [options] [recipename/target recipe:do_task ...]
+
+ Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
+ It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
+ will provide the layer, BBFILES and other configuration information.""")
+
+ parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
+ help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
+ "not handle any dependencies from other recipes.")
+
+ parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
+ help="Continue as much as possible after an error. While the target that "
+ "failed and anything depending on it cannot be built, as much as "
+ "possible will be built before stopping.")
+
+ parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
+ help="Force the specified targets/task to run (invalidating any "
+ "existing stamp file).")
+
+ parser.add_option("-c", "--cmd", action="store", dest="cmd",
+ help="Specify the task to execute. The exact options available "
+ "depend on the metadata. Some examples might be 'compile'"
+ " or 'populate_sysroot' or 'listtasks' may give a list of "
+ "the tasks available.")
+
+ parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
+ help="Invalidate the stamp for the specified task such as 'compile' "
+ "and then run the default task for the specified target(s).")
+
+ parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
+ help="Read the specified file before bitbake.conf.")
+
+ parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
+ help="Read the specified file after bitbake.conf.")
+
+ parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
+ help="Enable tracing of shell tasks (with 'set -x'). "
+ "Also print bb.note(...) messages to stdout (in "
+ "addition to writing them to ${T}/log.do_<task>).")
+
+ parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
+ help="Increase the debug level. You can specify this "
+ "more than once. -D sets the debug level to 1, "
+ "where only bb.debug(1, ...) messages are printed "
+ "to stdout; -DD sets the debug level to 2, where "
+ "both bb.debug(1, ...) and bb.debug(2, ...) "
+ "messages are printed; etc. Without -D, no debug "
+ "messages are printed. Note that -D only affects "
+ "output to stdout. All debug messages are written "
+ "to ${T}/log.do_taskname, regardless of the debug "
+ "level.")
+
+ parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
+ help="Output less log message data to the terminal. You can specify this more than once.")
+
+ parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
+ help="Don't execute, just go through the motions.")
+
+ parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
+ default=[], metavar="SIGNATURE_HANDLER",
+ help="Dump out the signature construction information, with no task "
+ "execution. The SIGNATURE_HANDLER parameter is passed to the "
+ "handler. Two common values are none and printdiff but the handler "
+ "may define more/less. none means only dump the signature, printdiff"
+ " means compare the dumped signature with the cached one.")
+
+ parser.add_option("-p", "--parse-only", action="store_true",
+ dest="parse_only", default=False,
+ help="Quit after parsing the BB recipes.")
+
+ parser.add_option("-s", "--show-versions", action="store_true",
+ dest="show_versions", default=False,
+ help="Show current and preferred versions of all recipes.")
+
+ parser.add_option("-e", "--environment", action="store_true",
+ dest="show_environment", default=False,
+ help="Show the global or per-recipe environment complete with information"
+ " about where variables were set/changed.")
+
+ parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
+ help="Save dependency tree information for the specified "
+ "targets in the dot syntax.")
+
+ parser.add_option("-I", "--ignore-deps", action="append",
+ dest="extra_assume_provided", default=[],
+ help="Assume these dependencies don't exist and are already provided "
+ "(equivalent to ASSUME_PROVIDED). Useful to make dependency "
+ "graphs more appealing")
+
+ parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
+ help="Show debug logging for the specified logging domains")
+
+ parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
+ help="Profile the command and save reports.")
+
+ # @CHOICES@ is substituted out by BitbakeHelpFormatter above
+ parser.add_option("-u", "--ui", action="store", dest="ui",
+ default=os.environ.get('BITBAKE_UI', 'knotty'),
+ help="The user interface to use (@CHOICES@ - default %default).")
+
+ parser.add_option("", "--token", action="store", dest="xmlrpctoken",
+ default=os.environ.get("BBTOKEN"),
+ help="Specify the connection token to be used when connecting "
+ "to a remote server.")
+
+ parser.add_option("", "--revisions-changed", action="store_true",
+ dest="revisions_changed", default=False,
+ help="Set the exit code depending on whether upstream floating "
+ "revisions have changed or not.")
+
+ parser.add_option("", "--server-only", action="store_true",
+ dest="server_only", default=False,
+ help="Run bitbake without a UI, only starting a server "
+ "(cooker) process.")
+
+ parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
+ help="The name/address for the bitbake xmlrpc server to bind to.")
+
+ parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
+ default=os.getenv("BB_SERVER_TIMEOUT"),
+ help="Set timeout to unload bitbake server due to inactivity, "
+ "set to -1 means no unload, "
+ "default: Environment variable BB_SERVER_TIMEOUT.")
+
+ parser.add_option("", "--no-setscene", action="store_true",
+ dest="nosetscene", default=False,
+ help="Do not run any setscene tasks. sstate will be ignored and "
+ "everything needed, built.")
+
+ parser.add_option("", "--setscene-only", action="store_true",
+ dest="setsceneonly", default=False,
+ help="Only run setscene tasks, don't run any real tasks.")
+
+ parser.add_option("", "--remote-server", action="store", dest="remote_server",
+ default=os.environ.get("BBSERVER"),
+ help="Connect to the specified server.")
+
+ parser.add_option("-m", "--kill-server", action="store_true",
+ dest="kill_server", default=False,
+ help="Terminate any running bitbake server.")
+
+ parser.add_option("", "--observe-only", action="store_true",
+ dest="observe_only", default=False,
+ help="Connect to a server as an observing-only client.")
+
+ parser.add_option("", "--status-only", action="store_true",
+ dest="status_only", default=False,
+ help="Check the status of the remote bitbake server.")
+
+ parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
+ default=os.environ.get("BBEVENTLOG"),
+ help="Writes the event log of the build to a bitbake event json file. "
+ "Use '' (empty string) to assign the name automatically.")
+
+ parser.add_option("", "--runall", action="append", dest="runall",
+ help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
+
+ parser.add_option("", "--runonly", action="append", dest="runonly",
+ help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
+
+
+ options, targets = parser.parse_args(argv)
+
+ if options.quiet and options.verbose:
+ parser.error("options --quiet and --verbose are mutually exclusive")
+
+ if options.quiet and options.debug:
+ parser.error("options --quiet and --debug are mutually exclusive")
+
+ # use configuration files from environment variables
+ if "BBPRECONF" in os.environ:
+ options.prefile.append(os.environ["BBPRECONF"])
+
+ if "BBPOSTCONF" in os.environ:
+ options.postfile.append(os.environ["BBPOSTCONF"])
+
+ # fill in proper log name if not supplied
+ if options.writeeventlog is not None and len(options.writeeventlog) == 0:
+ from datetime import datetime
+ eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S")
+ options.writeeventlog = eventlog
+
+ if options.bind:
+ try:
+ #Checking that the port is a number and is a ':' delimited value
+ (host, port) = options.bind.split(':')
+ port = int(port)
+ except (ValueError,IndexError):
+ raise BBMainException("FATAL: Malformed host:port bind parameter")
+ options.xmlrpcinterface = (host, port)
+ else:
+ options.xmlrpcinterface = (None, 0)
+
+ return options, targets[1:]
+
+
+def bitbake_main(configParams, configuration):
+
+ # Python multiprocessing requires /dev/shm on Linux
+ if sys.platform.startswith('linux') and not os.access('/dev/shm', os.W_OK | os.X_OK):
+ raise BBMainException("FATAL: /dev/shm does not exist or is not writable")
+
+ # Unbuffer stdout to avoid log truncation in the event
+ # of an unorderly exit as well as to provide timely
+ # updates to log files for use with tail
+ try:
+ if sys.stdout.name == '<stdout>':
+ # Reopen with O_SYNC (unbuffered)
+ fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
+ fl |= os.O_SYNC
+ fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl)
+ except:
+ pass
+
+ configuration.setConfigParameters(configParams)
+
+ if configParams.server_only and configParams.remote_server:
+ raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
+ ("the BBSERVER environment variable" if "BBSERVER" in os.environ \
+ else "the '--remote-server' option"))
+
+ if configParams.observe_only and not (configParams.remote_server or configParams.bind):
+ raise BBMainException("FATAL: '--observe-only' can only be used by UI clients "
+ "connecting to a server.\n")
+
+ if "BBDEBUG" in os.environ:
+ level = int(os.environ["BBDEBUG"])
+ if level > configuration.debug:
+ configuration.debug = level
+
+ bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
+ configuration.debug_domains)
+
+ server_connection, ui_module = setup_bitbake(configParams, configuration)
+ # No server connection
+ if server_connection is None:
+ if configParams.status_only:
+ return 1
+ if configParams.kill_server:
+ return 0
+
+ if not configParams.server_only:
+ if configParams.status_only:
+ server_connection.terminate()
+ return 0
+
+ try:
+ for event in bb.event.ui_queue:
+ server_connection.events.queue_event(event)
+ bb.event.ui_queue = []
+
+ return ui_module.main(server_connection.connection, server_connection.events,
+ configParams)
+ finally:
+ server_connection.terminate()
+ else:
+ return 0
+
+ return 1
+
+def setup_bitbake(configParams, configuration, extrafeatures=None):
+ # Ensure logging messages get sent to the UI as events
+ handler = bb.event.LogHandler()
+ if not configParams.status_only:
+ # In status only mode there are no logs and no UI
+ logger.addHandler(handler)
+
+ # Clear away any spurious environment variables while we stoke up the cooker
+ cleanedvars = bb.utils.clean_environment()
+
+ if configParams.server_only:
+ featureset = []
+ ui_module = None
+ else:
+ ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
+ # Collect the feature set for the UI
+ featureset = getattr(ui_module, "featureSet", [])
+
+ if extrafeatures:
+ for feature in extrafeatures:
+ if not feature in featureset:
+ featureset.append(feature)
+
+ server_connection = None
+
+ if configParams.remote_server:
+ # Connect to a remote XMLRPC server
+ server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
+ configParams.observe_only, configParams.xmlrpctoken)
+ else:
+ retries = 8
+ while retries:
+ try:
+ topdir, lock = lockBitbake()
+ sockname = topdir + "/bitbake.sock"
+ if lock:
+ if configParams.status_only or configParams.kill_server:
+ logger.info("bitbake server is not running.")
+ lock.close()
+ return None, None
+ # we start a server with a given configuration
+ logger.info("Starting bitbake server...")
+ # Clear the event queue since we already displayed messages
+ bb.event.ui_queue = []
+ server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset)
+
+ else:
+ logger.info("Reconnecting to bitbake server...")
+ if not os.path.exists(sockname):
+ print("Previous bitbake instance shutting down?, waiting to retry...")
+ i = 0
+ lock = None
+ # Wait for 5s or until we can get the lock
+ while not lock and i < 50:
+ time.sleep(0.1)
+ _, lock = lockBitbake()
+ i += 1
+ if lock:
+ bb.utils.unlockfile(lock)
+ raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
+ if not configParams.server_only:
+ try:
+ server_connection = bb.server.process.connectProcessServer(sockname, featureset)
+ except EOFError:
+ # The server may have been shutting down but not closed the socket yet. If that happened,
+ # ignore it.
+ pass
+
+ if server_connection or configParams.server_only:
+ break
+ except BBMainFatal:
+ raise
+ except (Exception, bb.server.process.ProcessTimeout) as e:
+ if not retries:
+ raise
+ retries -= 1
+ if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
+ logger.info("Retrying server connection...")
+ else:
+ logger.info("Retrying server connection... (%s)" % traceback.format_exc())
+ if not retries:
+ bb.fatal("Unable to connect to bitbake server, or start one")
+ if retries < 5:
+ time.sleep(5)
+
+ if configParams.kill_server:
+ server_connection.connection.terminateServer()
+ server_connection.terminate()
+ bb.event.ui_queue = []
+ logger.info("Terminated bitbake server.")
+ return None, None
+
+ # Restore the environment in case the UI needs it
+ for k in cleanedvars:
+ os.environ[k] = cleanedvars[k]
+
+ logger.removeHandler(handler)
+
+ return server_connection, ui_module
+
+def lockBitbake():
+ topdir = bb.cookerdata.findTopdir()
+ if not topdir:
+ bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?")
+ raise BBMainFatal
+ lockfile = topdir + "/bitbake.lock"
+ return topdir, bb.utils.lockfile(lockfile, False, False)
+
diff --git a/poky/bitbake/lib/bb/methodpool.py b/poky/bitbake/lib/bb/methodpool.py
new file mode 100644
index 000000000..49aed3338
--- /dev/null
+++ b/poky/bitbake/lib/bb/methodpool.py
@@ -0,0 +1,40 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+#
+# Copyright (C) 2006 Holger Hans Peter Freyther
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from bb.utils import better_compile, better_exec
+
+def insert_method(modulename, code, fn, lineno):
+ """
+ Add code of a module should be added. The methods
+ will be simply added, no checking will be done
+ """
+ comp = better_compile(code, modulename, fn, lineno=lineno)
+ better_exec(comp, None, code, fn)
+
+compilecache = {}
+
+def compile_cache(code):
+ h = hash(code)
+ if h in compilecache:
+ return compilecache[h]
+ return None
+
+def compile_cache_add(code, compileobj):
+ h = hash(code)
+ compilecache[h] = compileobj
diff --git a/poky/bitbake/lib/bb/monitordisk.py b/poky/bitbake/lib/bb/monitordisk.py
new file mode 100644
index 000000000..833cd3d34
--- /dev/null
+++ b/poky/bitbake/lib/bb/monitordisk.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2012 Robert Yang
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, logging, re, sys
+import bb
+logger = logging.getLogger("BitBake.Monitor")
+
+def printErr(info):
+ logger.error("%s\n Disk space monitor will NOT be enabled" % info)
+
+def convertGMK(unit):
+
+ """ Convert the space unit G, M, K, the unit is case-insensitive """
+
+ unitG = re.match('([1-9][0-9]*)[gG]\s?$', unit)
+ if unitG:
+ return int(unitG.group(1)) * (1024 ** 3)
+ unitM = re.match('([1-9][0-9]*)[mM]\s?$', unit)
+ if unitM:
+ return int(unitM.group(1)) * (1024 ** 2)
+ unitK = re.match('([1-9][0-9]*)[kK]\s?$', unit)
+ if unitK:
+ return int(unitK.group(1)) * 1024
+ unitN = re.match('([1-9][0-9]*)\s?$', unit)
+ if unitN:
+ return int(unitN.group(1))
+ else:
+ return None
+
+def getMountedDev(path):
+
+ """ Get the device mounted at the path, uses /proc/mounts """
+
+ # Get the mount point of the filesystem containing path
+ # st_dev is the ID of device containing file
+ parentDev = os.stat(path).st_dev
+ currentDev = parentDev
+ # When the current directory's device is different from the
+ # parent's, then the current directory is a mount point
+ while parentDev == currentDev:
+ mountPoint = path
+ # Use dirname to get the parent's directory
+ path = os.path.dirname(path)
+ # Reach the "/"
+ if path == mountPoint:
+ break
+ parentDev= os.stat(path).st_dev
+
+ try:
+ with open("/proc/mounts", "r") as ifp:
+ for line in ifp:
+ procLines = line.rstrip('\n').split()
+ if procLines[1] == mountPoint:
+ return procLines[0]
+ except EnvironmentError:
+ pass
+ return None
+
+def getDiskData(BBDirs, configuration):
+
+ """Prepare disk data for disk space monitor"""
+
+ # Save the device IDs, need the ID to be unique (the dictionary's key is
+ # unique), so that when more than one directory is located on the same
+ # device, we just monitor it once
+ devDict = {}
+ for pathSpaceInode in BBDirs.split():
+ # The input format is: "dir,space,inode", dir is a must, space
+ # and inode are optional
+ pathSpaceInodeRe = re.match('([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode)
+ if not pathSpaceInodeRe:
+ printErr("Invalid value in BB_DISKMON_DIRS: %s" % pathSpaceInode)
+ return None
+
+ action = pathSpaceInodeRe.group(1)
+ if action not in ("ABORT", "STOPTASKS", "WARN"):
+ printErr("Unknown disk space monitor action: %s" % action)
+ return None
+
+ path = os.path.realpath(pathSpaceInodeRe.group(2))
+ if not path:
+ printErr("Invalid path value in BB_DISKMON_DIRS: %s" % pathSpaceInode)
+ return None
+
+ # The disk space or inode is optional, but it should have a correct
+ # value once it is specified
+ minSpace = pathSpaceInodeRe.group(3)
+ if minSpace:
+ minSpace = convertGMK(minSpace)
+ if not minSpace:
+ printErr("Invalid disk space value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(3))
+ return None
+ else:
+ # None means that it is not specified
+ minSpace = None
+
+ minInode = pathSpaceInodeRe.group(4)
+ if minInode:
+ minInode = convertGMK(minInode)
+ if not minInode:
+ printErr("Invalid inode value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(4))
+ return None
+ else:
+ # None means that it is not specified
+ minInode = None
+
+ if minSpace is None and minInode is None:
+ printErr("No disk space or inode value in found BB_DISKMON_DIRS: %s" % pathSpaceInode)
+ return None
+ # mkdir for the directory since it may not exist, for example the
+ # DL_DIR may not exist at the very beginning
+ if not os.path.exists(path):
+ bb.utils.mkdirhier(path)
+ dev = getMountedDev(path)
+ # Use path/action as the key
+ devDict[(path, action)] = [dev, minSpace, minInode]
+
+ return devDict
+
+def getInterval(configuration):
+
+ """ Get the disk space interval """
+
+ # The default value is 50M and 5K.
+ spaceDefault = 50 * 1024 * 1024
+ inodeDefault = 5 * 1024
+
+ interval = configuration.getVar("BB_DISKMON_WARNINTERVAL")
+ if not interval:
+ return spaceDefault, inodeDefault
+ else:
+ # The disk space or inode interval is optional, but it should
+ # have a correct value once it is specified
+ intervalRe = re.match('([^,]*),?\s*(.*)', interval)
+ if intervalRe:
+ intervalSpace = intervalRe.group(1)
+ if intervalSpace:
+ intervalSpace = convertGMK(intervalSpace)
+ if not intervalSpace:
+ printErr("Invalid disk space interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(1))
+ return None, None
+ else:
+ intervalSpace = spaceDefault
+ intervalInode = intervalRe.group(2)
+ if intervalInode:
+ intervalInode = convertGMK(intervalInode)
+ if not intervalInode:
+ printErr("Invalid disk inode interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(2))
+ return None, None
+ else:
+ intervalInode = inodeDefault
+ return intervalSpace, intervalInode
+ else:
+ printErr("Invalid interval value in BB_DISKMON_WARNINTERVAL: %s" % interval)
+ return None, None
+
+class diskMonitor:
+
+ """Prepare the disk space monitor data"""
+
+ def __init__(self, configuration):
+
+ self.enableMonitor = False
+ self.configuration = configuration
+
+ BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
+ if BBDirs:
+ self.devDict = getDiskData(BBDirs, configuration)
+ if self.devDict:
+ self.spaceInterval, self.inodeInterval = getInterval(configuration)
+ if self.spaceInterval and self.inodeInterval:
+ self.enableMonitor = True
+ # These are for saving the previous disk free space and inode, we
+ # use them to avoid printing too many warning messages
+ self.preFreeS = {}
+ self.preFreeI = {}
+ # This is for STOPTASKS and ABORT, to avoid printing the message
+ # repeatedly while waiting for the tasks to finish
+ self.checked = {}
+ for k in self.devDict:
+ self.preFreeS[k] = 0
+ self.preFreeI[k] = 0
+ self.checked[k] = False
+ if self.spaceInterval is None and self.inodeInterval is None:
+ self.enableMonitor = False
+
+ def check(self, rq):
+
+ """ Take action for the monitor """
+
+ if self.enableMonitor:
+ diskUsage = {}
+ for k, attributes in self.devDict.items():
+ path, action = k
+ dev, minSpace, minInode = attributes
+
+ st = os.statvfs(path)
+
+ # The available free space, integer number
+ freeSpace = st.f_bavail * st.f_frsize
+
+ # Send all relevant information in the event.
+ freeSpaceRoot = st.f_bfree * st.f_frsize
+ totalSpace = st.f_blocks * st.f_frsize
+ diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace)
+
+ if minSpace and freeSpace < minSpace:
+ # Always show warning, the self.checked would always be False if the action is WARN
+ if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]:
+ logger.warning("The free space of %s (%s) is running low (%.3fGB left)" % \
+ (path, dev, freeSpace / 1024 / 1024 / 1024.0))
+ self.preFreeS[k] = freeSpace
+
+ if action == "STOPTASKS" and not self.checked[k]:
+ logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!")
+ self.checked[k] = True
+ rq.finish_runqueue(False)
+ bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
+ elif action == "ABORT" and not self.checked[k]:
+ logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
+ self.checked[k] = True
+ rq.finish_runqueue(True)
+ bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
+
+ # The free inodes, integer number
+ freeInode = st.f_favail
+
+ if minInode and freeInode < minInode:
+ # Some filesystems use dynamic inodes so can't run out
+ # (e.g. btrfs). This is reported by the inode count being 0.
+ if st.f_files == 0:
+ self.devDict[k][2] = None
+ continue
+ # Always show warning, the self.checked would always be False if the action is WARN
+ if self.preFreeI[k] == 0 or self.preFreeI[k] - freeInode > self.inodeInterval and not self.checked[k]:
+ logger.warning("The free inode of %s (%s) is running low (%.3fK left)" % \
+ (path, dev, freeInode / 1024.0))
+ self.preFreeI[k] = freeInode
+
+ if action == "STOPTASKS" and not self.checked[k]:
+ logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!")
+ self.checked[k] = True
+ rq.finish_runqueue(False)
+ bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
+ elif action == "ABORT" and not self.checked[k]:
+ logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
+ self.checked[k] = True
+ rq.finish_runqueue(True)
+ bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
+
+ bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration)
+ return
diff --git a/poky/bitbake/lib/bb/msg.py b/poky/bitbake/lib/bb/msg.py
new file mode 100644
index 000000000..f1723be79
--- /dev/null
+++ b/poky/bitbake/lib/bb/msg.py
@@ -0,0 +1,225 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'msg' implementation
+
+Message handling infrastructure for bitbake
+
+"""
+
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import copy
+import logging
+import collections
+from itertools import groupby
+import warnings
+import bb
+import bb.event
+
+class BBLogFormatter(logging.Formatter):
+ """Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is"""
+
+ DEBUG3 = logging.DEBUG - 2
+ DEBUG2 = logging.DEBUG - 1
+ DEBUG = logging.DEBUG
+ VERBOSE = logging.INFO - 1
+ NOTE = logging.INFO
+ PLAIN = logging.INFO + 1
+ ERROR = logging.ERROR
+ WARNING = logging.WARNING
+ CRITICAL = logging.CRITICAL
+
+ levelnames = {
+ DEBUG3 : 'DEBUG',
+ DEBUG2 : 'DEBUG',
+ DEBUG : 'DEBUG',
+ VERBOSE: 'NOTE',
+ NOTE : 'NOTE',
+ PLAIN : '',
+ WARNING : 'WARNING',
+ ERROR : 'ERROR',
+ CRITICAL: 'ERROR',
+ }
+
+ color_enabled = False
+ BASECOLOR, BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(29,38))
+
+ COLORS = {
+ DEBUG3 : CYAN,
+ DEBUG2 : CYAN,
+ DEBUG : CYAN,
+ VERBOSE : BASECOLOR,
+ NOTE : BASECOLOR,
+ PLAIN : BASECOLOR,
+ WARNING : YELLOW,
+ ERROR : RED,
+ CRITICAL: RED,
+ }
+
+ BLD = '\033[1;%dm'
+ STD = '\033[%dm'
+ RST = '\033[0m'
+
+ def getLevelName(self, levelno):
+ try:
+ return self.levelnames[levelno]
+ except KeyError:
+ self.levelnames[levelno] = value = 'Level %d' % levelno
+ return value
+
+ def format(self, record):
+ record.levelname = self.getLevelName(record.levelno)
+ if record.levelno == self.PLAIN:
+ msg = record.getMessage()
+ else:
+ if self.color_enabled:
+ record = self.colorize(record)
+ msg = logging.Formatter.format(self, record)
+ if hasattr(record, 'bb_exc_formatted'):
+ msg += '\n' + ''.join(record.bb_exc_formatted)
+ elif hasattr(record, 'bb_exc_info'):
+ etype, value, tb = record.bb_exc_info
+ formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
+ msg += '\n' + ''.join(formatted)
+ return msg
+
+ def colorize(self, record):
+ color = self.COLORS[record.levelno]
+ if self.color_enabled and color is not None:
+ record = copy.copy(record)
+ record.levelname = "".join([self.BLD % color, record.levelname, self.RST])
+ record.msg = "".join([self.STD % color, record.msg, self.RST])
+ return record
+
+ def enable_color(self):
+ self.color_enabled = True
+
+class BBLogFilter(object):
+ def __init__(self, handler, level, debug_domains):
+ self.stdlevel = level
+ self.debug_domains = debug_domains
+ loglevel = level
+ for domain in debug_domains:
+ if debug_domains[domain] < loglevel:
+ loglevel = debug_domains[domain]
+ handler.setLevel(loglevel)
+ handler.addFilter(self)
+
+ def filter(self, record):
+ if record.levelno >= self.stdlevel:
+ return True
+ if record.name in self.debug_domains and record.levelno >= self.debug_domains[record.name]:
+ return True
+ return False
+
+class BBLogFilterStdErr(BBLogFilter):
+ def filter(self, record):
+ if not BBLogFilter.filter(self, record):
+ return False
+ if record.levelno >= logging.ERROR:
+ return True
+ return False
+
+class BBLogFilterStdOut(BBLogFilter):
+ def filter(self, record):
+ if not BBLogFilter.filter(self, record):
+ return False
+ if record.levelno < logging.ERROR:
+ return True
+ return False
+
+# Message control functions
+#
+
+loggerDefaultDebugLevel = 0
+loggerDefaultVerbose = False
+loggerVerboseLogs = False
+loggerDefaultDomains = []
+
+def init_msgconfig(verbose, debug, debug_domains=None):
+ """
+ Set default verbosity and debug levels config the logger
+ """
+ bb.msg.loggerDefaultDebugLevel = debug
+ bb.msg.loggerDefaultVerbose = verbose
+ if verbose:
+ bb.msg.loggerVerboseLogs = True
+ if debug_domains:
+ bb.msg.loggerDefaultDomains = debug_domains
+ else:
+ bb.msg.loggerDefaultDomains = []
+
+def constructLogOptions():
+ debug = loggerDefaultDebugLevel
+ verbose = loggerDefaultVerbose
+ domains = loggerDefaultDomains
+
+ if debug:
+ level = BBLogFormatter.DEBUG - debug + 1
+ elif verbose:
+ level = BBLogFormatter.VERBOSE
+ else:
+ level = BBLogFormatter.NOTE
+
+ debug_domains = {}
+ for (domainarg, iterator) in groupby(domains):
+ dlevel = len(tuple(iterator))
+ debug_domains["BitBake.%s" % domainarg] = logging.DEBUG - dlevel + 1
+ return level, debug_domains
+
+def addDefaultlogFilter(handler, cls = BBLogFilter, forcelevel=None):
+ level, debug_domains = constructLogOptions()
+
+ if forcelevel is not None:
+ level = forcelevel
+
+ cls(handler, level, debug_domains)
+
+#
+# Message handling functions
+#
+
+def fatal(msgdomain, msg):
+ if msgdomain:
+ logger = logging.getLogger("BitBake.%s" % msgdomain)
+ else:
+ logger = logging.getLogger("BitBake")
+ logger.critical(msg)
+ sys.exit(1)
+
+def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'):
+ """Standalone logger creation function"""
+ logger = logging.getLogger(name)
+ console = logging.StreamHandler(output)
+ format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
+ if color == 'always' or (color == 'auto' and output.isatty()):
+ format.enable_color()
+ console.setFormatter(format)
+ if preserve_handlers:
+ logger.addHandler(console)
+ else:
+ logger.handlers = [console]
+ logger.setLevel(level)
+ return logger
+
+def has_console_handler(logger):
+ for handler in logger.handlers:
+ if isinstance(handler, logging.StreamHandler):
+ if handler.stream in [sys.stderr, sys.stdout]:
+ return True
+ return False
diff --git a/poky/bitbake/lib/bb/namedtuple_with_abc.py b/poky/bitbake/lib/bb/namedtuple_with_abc.py
new file mode 100644
index 000000000..32f2fc642
--- /dev/null
+++ b/poky/bitbake/lib/bb/namedtuple_with_abc.py
@@ -0,0 +1,255 @@
+# http://code.activestate.com/recipes/577629-namedtupleabc-abstract-base-class-mix-in-for-named/
+#!/usr/bin/env python
+# Copyright (c) 2011 Jan Kaliszewski (zuo). Available under the MIT License.
+
+"""
+namedtuple_with_abc.py:
+* named tuple mix-in + ABC (abstract base class) recipe,
+* works under Python 2.6, 2.7 as well as 3.x.
+
+Import this module to patch collections.namedtuple() factory function
+-- enriching it with the 'abc' attribute (an abstract base class + mix-in
+for named tuples) and decorating it with a wrapper that registers each
+newly created named tuple as a subclass of namedtuple.abc.
+
+How to import:
+ import collections, namedtuple_with_abc
+or:
+ import namedtuple_with_abc
+ from collections import namedtuple
+ # ^ in this variant you must import namedtuple function
+ # *after* importing namedtuple_with_abc module
+or simply:
+ from namedtuple_with_abc import namedtuple
+
+Simple usage example:
+ class Credentials(namedtuple.abc):
+ _fields = 'username password'
+ def __str__(self):
+ return ('{0.__class__.__name__}'
+ '(username={0.username}, password=...)'.format(self))
+ print(Credentials("alice", "Alice's password"))
+
+For more advanced examples -- see below the "if __name__ == '__main__':".
+"""
+
+import collections
+from abc import ABCMeta, abstractproperty
+from functools import wraps
+from sys import version_info
+
+__all__ = ('namedtuple',)
+_namedtuple = collections.namedtuple
+
+
+class _NamedTupleABCMeta(ABCMeta):
+ '''The metaclass for the abstract base class + mix-in for named tuples.'''
+ def __new__(mcls, name, bases, namespace):
+ fields = namespace.get('_fields')
+ for base in bases:
+ if fields is not None:
+ break
+ fields = getattr(base, '_fields', None)
+ if not isinstance(fields, abstractproperty):
+ basetuple = _namedtuple(name, fields)
+ bases = (basetuple,) + bases
+ namespace.pop('_fields', None)
+ namespace.setdefault('__doc__', basetuple.__doc__)
+ namespace.setdefault('__slots__', ())
+ return ABCMeta.__new__(mcls, name, bases, namespace)
+
+
+exec(
+ # Python 2.x metaclass declaration syntax
+ """class _NamedTupleABC(object):
+ '''The abstract base class + mix-in for named tuples.'''
+ __metaclass__ = _NamedTupleABCMeta
+ _fields = abstractproperty()""" if version_info[0] < 3 else
+ # Python 3.x metaclass declaration syntax
+ """class _NamedTupleABC(metaclass=_NamedTupleABCMeta):
+ '''The abstract base class + mix-in for named tuples.'''
+ _fields = abstractproperty()"""
+)
+
+
+_namedtuple.abc = _NamedTupleABC
+#_NamedTupleABC.register(type(version_info)) # (and similar, in the future...)
+
+@wraps(_namedtuple)
+def namedtuple(*args, **kwargs):
+ '''Named tuple factory with namedtuple.abc subclass registration.'''
+ cls = _namedtuple(*args, **kwargs)
+ _NamedTupleABC.register(cls)
+ return cls
+
+collections.namedtuple = namedtuple
+
+
+
+
+if __name__ == '__main__':
+
+ '''Examples and explanations'''
+
+ # Simple usage
+
+ class MyRecord(namedtuple.abc):
+ _fields = 'x y z' # such form will be transformed into ('x', 'y', 'z')
+ def _my_custom_method(self):
+ return list(self._asdict().items())
+ # (the '_fields' attribute belongs to the named tuple public API anyway)
+
+ rec = MyRecord(1, 2, 3)
+ print(rec)
+ print(rec._my_custom_method())
+ print(rec._replace(y=222))
+ print(rec._replace(y=222)._my_custom_method())
+
+ # Custom abstract classes...
+
+ class MyAbstractRecord(namedtuple.abc):
+ def _my_custom_method(self):
+ return list(self._asdict().items())
+
+ try:
+ MyAbstractRecord() # (abstract classes cannot be instantiated)
+ except TypeError as exc:
+ print(exc)
+
+ class AnotherAbstractRecord(MyAbstractRecord):
+ def __str__(self):
+ return '<<<{0}>>>'.format(super(AnotherAbstractRecord,
+ self).__str__())
+
+ # ...and their non-abstract subclasses
+
+ class MyRecord2(MyAbstractRecord):
+ _fields = 'a, b'
+
+ class MyRecord3(AnotherAbstractRecord):
+ _fields = 'p', 'q', 'r'
+
+ rec2 = MyRecord2('foo', 'bar')
+ print(rec2)
+ print(rec2._my_custom_method())
+ print(rec2._replace(b=222))
+ print(rec2._replace(b=222)._my_custom_method())
+
+ rec3 = MyRecord3('foo', 'bar', 'baz')
+ print(rec3)
+ print(rec3._my_custom_method())
+ print(rec3._replace(q=222))
+ print(rec3._replace(q=222)._my_custom_method())
+
+ # You can also subclass non-abstract ones...
+
+ class MyRecord33(MyRecord3):
+ def __str__(self):
+ return '< {0!r}, ..., {0!r} >'.format(self.p, self.r)
+
+ rec33 = MyRecord33('foo', 'bar', 'baz')
+ print(rec33)
+ print(rec33._my_custom_method())
+ print(rec33._replace(q=222))
+ print(rec33._replace(q=222)._my_custom_method())
+
+ # ...and even override the magic '_fields' attribute again
+
+ class MyRecord345(MyRecord3):
+ _fields = 'e f g h i j k'
+
+ rec345 = MyRecord345(1, 2, 3, 4, 3, 2, 1)
+ print(rec345)
+ print(rec345._my_custom_method())
+ print(rec345._replace(f=222))
+ print(rec345._replace(f=222)._my_custom_method())
+
+ # Mixing-in some other classes is also possible:
+
+ class MyMixIn(object):
+ def method(self):
+ return "MyMixIn.method() called"
+ def _my_custom_method(self):
+ return "MyMixIn._my_custom_method() called"
+ def count(self, item):
+ return "MyMixIn.count({0}) called".format(item)
+ def _asdict(self): # (cannot override a namedtuple method, see below)
+ return "MyMixIn._asdict() called"
+
+ class MyRecord4(MyRecord33, MyMixIn): # mix-in on the right
+ _fields = 'j k l x'
+
+ class MyRecord5(MyMixIn, MyRecord33): # mix-in on the left
+ _fields = 'j k l x y'
+
+ rec4 = MyRecord4(1, 2, 3, 2)
+ print(rec4)
+ print(rec4.method())
+ print(rec4._my_custom_method()) # MyRecord33's
+ print(rec4.count(2)) # tuple's
+ print(rec4._replace(k=222))
+ print(rec4._replace(k=222).method())
+ print(rec4._replace(k=222)._my_custom_method()) # MyRecord33's
+ print(rec4._replace(k=222).count(8)) # tuple's
+
+ rec5 = MyRecord5(1, 2, 3, 2, 1)
+ print(rec5)
+ print(rec5.method())
+ print(rec5._my_custom_method()) # MyMixIn's
+ print(rec5.count(2)) # MyMixIn's
+ print(rec5._replace(k=222))
+ print(rec5._replace(k=222).method())
+ print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's
+ print(rec5._replace(k=222).count(2)) # MyMixIn's
+
+ # Note that behavior: the standard namedtuple methods cannot be
+ # overridden by a foreign mix-in -- even if the mix-in is declared
+ # as the leftmost base class (but, obviously, you can override them
+ # in the defined class or its subclasses):
+
+ print(rec4._asdict()) # (returns a dict, not "MyMixIn._asdict() called")
+ print(rec5._asdict()) # (returns a dict, not "MyMixIn._asdict() called")
+
+ class MyRecord6(MyRecord33):
+ _fields = 'j k l x y z'
+ def _asdict(self):
+ return "MyRecord6._asdict() called"
+ rec6 = MyRecord6(1, 2, 3, 1, 2, 3)
+ print(rec6._asdict()) # (this returns "MyRecord6._asdict() called")
+
+ # All that record classes are real subclasses of namedtuple.abc:
+
+ assert issubclass(MyRecord, namedtuple.abc)
+ assert issubclass(MyAbstractRecord, namedtuple.abc)
+ assert issubclass(AnotherAbstractRecord, namedtuple.abc)
+ assert issubclass(MyRecord2, namedtuple.abc)
+ assert issubclass(MyRecord3, namedtuple.abc)
+ assert issubclass(MyRecord33, namedtuple.abc)
+ assert issubclass(MyRecord345, namedtuple.abc)
+ assert issubclass(MyRecord4, namedtuple.abc)
+ assert issubclass(MyRecord5, namedtuple.abc)
+ assert issubclass(MyRecord6, namedtuple.abc)
+
+ # ...but abstract ones are not subclasses of tuple
+ # (and this is what you probably want):
+
+ assert not issubclass(MyAbstractRecord, tuple)
+ assert not issubclass(AnotherAbstractRecord, tuple)
+
+ assert issubclass(MyRecord, tuple)
+ assert issubclass(MyRecord2, tuple)
+ assert issubclass(MyRecord3, tuple)
+ assert issubclass(MyRecord33, tuple)
+ assert issubclass(MyRecord345, tuple)
+ assert issubclass(MyRecord4, tuple)
+ assert issubclass(MyRecord5, tuple)
+ assert issubclass(MyRecord6, tuple)
+
+ # Named tuple classes created with namedtuple() factory function
+ # (in the "traditional" way) are registered as "virtual" subclasses
+ # of namedtuple.abc:
+
+ MyTuple = namedtuple('MyTuple', 'a b c')
+ mt = MyTuple(1, 2, 3)
+ assert issubclass(MyTuple, namedtuple.abc)
+ assert isinstance(mt, namedtuple.abc)
diff --git a/poky/bitbake/lib/bb/parse/__init__.py b/poky/bitbake/lib/bb/parse/__init__.py
new file mode 100644
index 000000000..5397d57a5
--- /dev/null
+++ b/poky/bitbake/lib/bb/parse/__init__.py
@@ -0,0 +1,175 @@
+"""
+BitBake Parsers
+
+File parsers for the BitBake build tools.
+
+"""
+
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+handlers = []
+
+import errno
+import logging
+import os
+import stat
+import bb
+import bb.utils
+import bb.siggen
+
+logger = logging.getLogger("BitBake.Parsing")
+
+class ParseError(Exception):
+ """Exception raised when parsing fails"""
+ def __init__(self, msg, filename, lineno=0):
+ self.msg = msg
+ self.filename = filename
+ self.lineno = lineno
+ Exception.__init__(self, msg, filename, lineno)
+
+ def __str__(self):
+ if self.lineno:
+ return "ParseError at %s:%d: %s" % (self.filename, self.lineno, self.msg)
+ else:
+ return "ParseError in %s: %s" % (self.filename, self.msg)
+
+class SkipRecipe(Exception):
+ """Exception raised to skip this recipe"""
+
+class SkipPackage(SkipRecipe):
+ """Exception raised to skip this recipe (use SkipRecipe in new code)"""
+
+__mtime_cache = {}
+def cached_mtime(f):
+ if f not in __mtime_cache:
+ __mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
+ return __mtime_cache[f]
+
+def cached_mtime_noerror(f):
+ if f not in __mtime_cache:
+ try:
+ __mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
+ except OSError:
+ return 0
+ return __mtime_cache[f]
+
+def update_mtime(f):
+ try:
+ __mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
+ except OSError:
+ if f in __mtime_cache:
+ del __mtime_cache[f]
+ return 0
+ return __mtime_cache[f]
+
+def update_cache(f):
+ if f in __mtime_cache:
+ logger.debug(1, "Updating mtime cache for %s" % f)
+ update_mtime(f)
+
+def clear_cache():
+ global __mtime_cache
+ __mtime_cache = {}
+
+def mark_dependency(d, f):
+ if f.startswith('./'):
+ f = "%s/%s" % (os.getcwd(), f[2:])
+ deps = (d.getVar('__depends', False) or [])
+ s = (f, cached_mtime_noerror(f))
+ if s not in deps:
+ deps.append(s)
+ d.setVar('__depends', deps)
+
+def check_dependency(d, f):
+ s = (f, cached_mtime_noerror(f))
+ deps = (d.getVar('__depends', False) or [])
+ return s in deps
+
+def supports(fn, data):
+ """Returns true if we have a handler for this file, false otherwise"""
+ for h in handlers:
+ if h['supports'](fn, data):
+ return 1
+ return 0
+
+def handle(fn, data, include = 0):
+ """Call the handler that is appropriate for this file"""
+ for h in handlers:
+ if h['supports'](fn, data):
+ with data.inchistory.include(fn):
+ return h['handle'](fn, data, include)
+ raise ParseError("not a BitBake file", fn)
+
+def init(fn, data):
+ for h in handlers:
+ if h['supports'](fn):
+ return h['init'](data)
+
+def init_parser(d):
+ bb.parse.siggen = bb.siggen.init(d)
+
+def resolve_file(fn, d):
+ if not os.path.isabs(fn):
+ bbpath = d.getVar("BBPATH")
+ newfn, attempts = bb.utils.which(bbpath, fn, history=True)
+ for af in attempts:
+ mark_dependency(d, af)
+ if not newfn:
+ raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
+ fn = newfn
+ else:
+ mark_dependency(d, fn)
+
+ if not os.path.isfile(fn):
+ raise IOError(errno.ENOENT, "file %s not found" % fn)
+
+ return fn
+
+# Used by OpenEmbedded metadata
+__pkgsplit_cache__={}
+def vars_from_file(mypkg, d):
+ if not mypkg or not mypkg.endswith((".bb", ".bbappend")):
+ return (None, None, None)
+ if mypkg in __pkgsplit_cache__:
+ return __pkgsplit_cache__[mypkg]
+
+ myfile = os.path.splitext(os.path.basename(mypkg))
+ parts = myfile[0].split('_')
+ __pkgsplit_cache__[mypkg] = parts
+ if len(parts) > 3:
+ raise ParseError("Unable to generate default variables from filename (too many underscores)", mypkg)
+ exp = 3 - len(parts)
+ tmplist = []
+ while exp != 0:
+ exp -= 1
+ tmplist.append(None)
+ parts.extend(tmplist)
+ return parts
+
+def get_file_depends(d):
+ '''Return the dependent files'''
+ dep_files = []
+ depends = d.getVar('__base_depends', False) or []
+ depends = depends + (d.getVar('__depends', False) or [])
+ for (fn, _) in depends:
+ dep_files.append(os.path.abspath(fn))
+ return " ".join(dep_files)
+
+from bb.parse.parse_py import __version__, ConfHandler, BBHandler
diff --git a/poky/bitbake/lib/bb/parse/ast.py b/poky/bitbake/lib/bb/parse/ast.py
new file mode 100644
index 000000000..6690dc51c
--- /dev/null
+++ b/poky/bitbake/lib/bb/parse/ast.py
@@ -0,0 +1,442 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+ AbstractSyntaxTree classes for the Bitbake language
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2009 Holger Hans Peter Freyther
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import re
+import string
+import logging
+import bb
+import itertools
+from bb import methodpool
+from bb.parse import logger
+
+class StatementGroup(list):
+ def eval(self, data):
+ for statement in self:
+ statement.eval(data)
+
+class AstNode(object):
+ def __init__(self, filename, lineno):
+ self.filename = filename
+ self.lineno = lineno
+
+class IncludeNode(AstNode):
+ def __init__(self, filename, lineno, what_file, force):
+ AstNode.__init__(self, filename, lineno)
+ self.what_file = what_file
+ self.force = force
+
+ def eval(self, data):
+ """
+ Include the file and evaluate the statements
+ """
+ s = data.expand(self.what_file)
+ logger.debug(2, "CONF %s:%s: including %s", self.filename, self.lineno, s)
+
+ # TODO: Cache those includes... maybe not here though
+ if self.force:
+ bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, "include required")
+ else:
+ bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False)
+
+class ExportNode(AstNode):
+ def __init__(self, filename, lineno, var):
+ AstNode.__init__(self, filename, lineno)
+ self.var = var
+
+ def eval(self, data):
+ data.setVarFlag(self.var, "export", 1, op = 'exported')
+
+class UnsetNode(AstNode):
+ def __init__(self, filename, lineno, var):
+ AstNode.__init__(self, filename, lineno)
+ self.var = var
+
+ def eval(self, data):
+ loginfo = {
+ 'variable': self.var,
+ 'file': self.filename,
+ 'line': self.lineno,
+ }
+ data.delVar(self.var,**loginfo)
+
+class UnsetFlagNode(AstNode):
+ def __init__(self, filename, lineno, var, flag):
+ AstNode.__init__(self, filename, lineno)
+ self.var = var
+ self.flag = flag
+
+ def eval(self, data):
+ loginfo = {
+ 'variable': self.var,
+ 'file': self.filename,
+ 'line': self.lineno,
+ }
+ data.delVarFlag(self.var, self.flag, **loginfo)
+
+class DataNode(AstNode):
+ """
+ Various data related updates. For the sake of sanity
+ we have one class doing all this. This means that all
+ this need to be re-evaluated... we might be able to do
+ that faster with multiple classes.
+ """
+ def __init__(self, filename, lineno, groupd):
+ AstNode.__init__(self, filename, lineno)
+ self.groupd = groupd
+
+ def getFunc(self, key, data):
+ if 'flag' in self.groupd and self.groupd['flag'] != None:
+ return data.getVarFlag(key, self.groupd['flag'], expand=False, noweakdefault=True)
+ else:
+ return data.getVar(key, False, noweakdefault=True, parsing=True)
+
+ def eval(self, data):
+ groupd = self.groupd
+ key = groupd["var"]
+ loginfo = {
+ 'variable': key,
+ 'file': self.filename,
+ 'line': self.lineno,
+ }
+ if "exp" in groupd and groupd["exp"] != None:
+ data.setVarFlag(key, "export", 1, op = 'exported', **loginfo)
+
+ op = "set"
+ if "ques" in groupd and groupd["ques"] != None:
+ val = self.getFunc(key, data)
+ op = "set?"
+ if val == None:
+ val = groupd["value"]
+ elif "colon" in groupd and groupd["colon"] != None:
+ e = data.createCopy()
+ op = "immediate"
+ val = e.expand(groupd["value"], key + "[:=]")
+ elif "append" in groupd and groupd["append"] != None:
+ op = "append"
+ val = "%s %s" % ((self.getFunc(key, data) or ""), groupd["value"])
+ elif "prepend" in groupd and groupd["prepend"] != None:
+ op = "prepend"
+ val = "%s %s" % (groupd["value"], (self.getFunc(key, data) or ""))
+ elif "postdot" in groupd and groupd["postdot"] != None:
+ op = "postdot"
+ val = "%s%s" % ((self.getFunc(key, data) or ""), groupd["value"])
+ elif "predot" in groupd and groupd["predot"] != None:
+ op = "predot"
+ val = "%s%s" % (groupd["value"], (self.getFunc(key, data) or ""))
+ else:
+ val = groupd["value"]
+
+ flag = None
+ if 'flag' in groupd and groupd['flag'] != None:
+ flag = groupd['flag']
+ elif groupd["lazyques"]:
+ flag = "_defaultval"
+
+ loginfo['op'] = op
+ loginfo['detail'] = groupd["value"]
+
+ if flag:
+ data.setVarFlag(key, flag, val, **loginfo)
+ else:
+ data.setVar(key, val, parsing=True, **loginfo)
+
+class MethodNode(AstNode):
+ tr_tbl = str.maketrans('/.+-@%&', '_______')
+
+ def __init__(self, filename, lineno, func_name, body, python, fakeroot):
+ AstNode.__init__(self, filename, lineno)
+ self.func_name = func_name
+ self.body = body
+ self.python = python
+ self.fakeroot = fakeroot
+
+ def eval(self, data):
+ text = '\n'.join(self.body)
+ funcname = self.func_name
+ if self.func_name == "__anonymous":
+ funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(MethodNode.tr_tbl)))
+ self.python = True
+ text = "def %s(d):\n" % (funcname) + text
+ bb.methodpool.insert_method(funcname, text, self.filename, self.lineno - len(self.body))
+ anonfuncs = data.getVar('__BBANONFUNCS', False) or []
+ anonfuncs.append(funcname)
+ data.setVar('__BBANONFUNCS', anonfuncs)
+ if data.getVar(funcname, False):
+ # clean up old version of this piece of metadata, as its
+ # flags could cause problems
+ data.delVarFlag(funcname, 'python')
+ data.delVarFlag(funcname, 'fakeroot')
+ if self.python:
+ data.setVarFlag(funcname, "python", "1")
+ if self.fakeroot:
+ data.setVarFlag(funcname, "fakeroot", "1")
+ data.setVarFlag(funcname, "func", 1)
+ data.setVar(funcname, text, parsing=True)
+ data.setVarFlag(funcname, 'filename', self.filename)
+ data.setVarFlag(funcname, 'lineno', str(self.lineno - len(self.body)))
+
+class PythonMethodNode(AstNode):
+ def __init__(self, filename, lineno, function, modulename, body):
+ AstNode.__init__(self, filename, lineno)
+ self.function = function
+ self.modulename = modulename
+ self.body = body
+
+ def eval(self, data):
+ # Note we will add root to parsedmethods after having parse
+ # 'this' file. This means we will not parse methods from
+ # bb classes twice
+ text = '\n'.join(self.body)
+ bb.methodpool.insert_method(self.modulename, text, self.filename, self.lineno - len(self.body) - 1)
+ data.setVarFlag(self.function, "func", 1)
+ data.setVarFlag(self.function, "python", 1)
+ data.setVar(self.function, text, parsing=True)
+ data.setVarFlag(self.function, 'filename', self.filename)
+ data.setVarFlag(self.function, 'lineno', str(self.lineno - len(self.body) - 1))
+
+class ExportFuncsNode(AstNode):
+ def __init__(self, filename, lineno, fns, classname):
+ AstNode.__init__(self, filename, lineno)
+ self.n = fns.split()
+ self.classname = classname
+
+ def eval(self, data):
+
+ for func in self.n:
+ calledfunc = self.classname + "_" + func
+
+ if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
+ continue
+
+ if data.getVar(func, False):
+ data.setVarFlag(func, 'python', None)
+ data.setVarFlag(func, 'func', None)
+
+ for flag in [ "func", "python" ]:
+ if data.getVarFlag(calledfunc, flag, False):
+ data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False))
+ for flag in [ "dirs" ]:
+ if data.getVarFlag(func, flag, False):
+ data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False))
+ data.setVarFlag(func, "filename", "autogenerated")
+ data.setVarFlag(func, "lineno", 1)
+
+ if data.getVarFlag(calledfunc, "python", False):
+ data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
+ else:
+ if "-" in self.classname:
+ bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
+ data.setVar(func, " " + calledfunc + "\n", parsing=True)
+ data.setVarFlag(func, 'export_func', '1')
+
+class AddTaskNode(AstNode):
+ def __init__(self, filename, lineno, func, before, after):
+ AstNode.__init__(self, filename, lineno)
+ self.func = func
+ self.before = before
+ self.after = after
+
+ def eval(self, data):
+ bb.build.addtask(self.func, self.before, self.after, data)
+
+class DelTaskNode(AstNode):
+ def __init__(self, filename, lineno, func):
+ AstNode.__init__(self, filename, lineno)
+ self.func = func
+
+ def eval(self, data):
+ bb.build.deltask(self.func, data)
+
+class BBHandlerNode(AstNode):
+ def __init__(self, filename, lineno, fns):
+ AstNode.__init__(self, filename, lineno)
+ self.hs = fns.split()
+
+ def eval(self, data):
+ bbhands = data.getVar('__BBHANDLERS', False) or []
+ for h in self.hs:
+ bbhands.append(h)
+ data.setVarFlag(h, "handler", 1)
+ data.setVar('__BBHANDLERS', bbhands)
+
+class InheritNode(AstNode):
+ def __init__(self, filename, lineno, classes):
+ AstNode.__init__(self, filename, lineno)
+ self.classes = classes
+
+ def eval(self, data):
+ bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data)
+
+def handleInclude(statements, filename, lineno, m, force):
+ statements.append(IncludeNode(filename, lineno, m.group(1), force))
+
+def handleExport(statements, filename, lineno, m):
+ statements.append(ExportNode(filename, lineno, m.group(1)))
+
+def handleUnset(statements, filename, lineno, m):
+ statements.append(UnsetNode(filename, lineno, m.group(1)))
+
+def handleUnsetFlag(statements, filename, lineno, m):
+ statements.append(UnsetFlagNode(filename, lineno, m.group(1), m.group(2)))
+
+def handleData(statements, filename, lineno, groupd):
+ statements.append(DataNode(filename, lineno, groupd))
+
+def handleMethod(statements, filename, lineno, func_name, body, python, fakeroot):
+ statements.append(MethodNode(filename, lineno, func_name, body, python, fakeroot))
+
+def handlePythonMethod(statements, filename, lineno, funcname, modulename, body):
+ statements.append(PythonMethodNode(filename, lineno, funcname, modulename, body))
+
+def handleExportFuncs(statements, filename, lineno, m, classname):
+ statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname))
+
+def handleAddTask(statements, filename, lineno, m):
+ func = m.group("func")
+ before = m.group("before")
+ after = m.group("after")
+ if func is None:
+ return
+
+ statements.append(AddTaskNode(filename, lineno, func, before, after))
+
+def handleDelTask(statements, filename, lineno, m):
+ func = m.group("func")
+ if func is None:
+ return
+
+ statements.append(DelTaskNode(filename, lineno, func))
+
+def handleBBHandlers(statements, filename, lineno, m):
+ statements.append(BBHandlerNode(filename, lineno, m.group(1)))
+
+def handleInherit(statements, filename, lineno, m):
+ classes = m.group(1)
+ statements.append(InheritNode(filename, lineno, classes))
+
+def runAnonFuncs(d):
+ code = []
+ for funcname in d.getVar("__BBANONFUNCS", False) or []:
+ code.append("%s(d)" % funcname)
+ bb.utils.better_exec("\n".join(code), {"d": d})
+
+def finalize(fn, d, variant = None):
+ saved_handlers = bb.event.get_handlers().copy()
+
+ for var in d.getVar('__BBHANDLERS', False) or []:
+ # try to add the handler
+ handlerfn = d.getVarFlag(var, "filename", False)
+ if not handlerfn:
+ bb.fatal("Undefined event handler function '%s'" % var)
+ handlerln = int(d.getVarFlag(var, "lineno", False))
+ bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
+
+ bb.event.fire(bb.event.RecipePreFinalise(fn), d)
+
+ bb.data.expandKeys(d)
+ runAnonFuncs(d)
+
+ tasklist = d.getVar('__BBTASKS', False) or []
+ bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
+ bb.build.add_tasks(tasklist, d)
+
+ bb.parse.siggen.finalise(fn, d, variant)
+
+ d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
+
+ bb.event.fire(bb.event.RecipeParsed(fn), d)
+ bb.event.set_handlers(saved_handlers)
+
+def _create_variants(datastores, names, function, onlyfinalise):
+ def create_variant(name, orig_d, arg = None):
+ if onlyfinalise and name not in onlyfinalise:
+ return
+ new_d = bb.data.createCopy(orig_d)
+ function(arg or name, new_d)
+ datastores[name] = new_d
+
+ for variant in list(datastores.keys()):
+ for name in names:
+ if not variant:
+ # Based on main recipe
+ create_variant(name, datastores[""])
+ else:
+ create_variant("%s-%s" % (variant, name), datastores[variant], name)
+
+def multi_finalize(fn, d):
+ appends = (d.getVar("__BBAPPEND") or "").split()
+ for append in appends:
+ logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
+ bb.parse.BBHandler.handle(append, d, True)
+
+ onlyfinalise = d.getVar("__ONLYFINALISE", False)
+
+ safe_d = d
+ d = bb.data.createCopy(safe_d)
+ try:
+ finalize(fn, d)
+ except bb.parse.SkipRecipe as e:
+ d.setVar("__SKIPPED", e.args[0])
+ datastores = {"": safe_d}
+
+ extended = d.getVar("BBCLASSEXTEND") or ""
+ if extended:
+ # the following is to support bbextends with arguments, for e.g. multilib
+ # an example is as follows:
+ # BBCLASSEXTEND = "multilib:lib32"
+ # it will create foo-lib32, inheriting multilib.bbclass and set
+ # BBEXTENDCURR to "multilib" and BBEXTENDVARIANT to "lib32"
+ extendedmap = {}
+ variantmap = {}
+
+ for ext in extended.split():
+ eext = ext.split(':', 2)
+ if len(eext) > 1:
+ extendedmap[ext] = eext[0]
+ variantmap[ext] = eext[1]
+ else:
+ extendedmap[ext] = ext
+
+ pn = d.getVar("PN")
+ def extendfunc(name, d):
+ if name != extendedmap[name]:
+ d.setVar("BBEXTENDCURR", extendedmap[name])
+ d.setVar("BBEXTENDVARIANT", variantmap[name])
+ else:
+ d.setVar("PN", "%s-%s" % (pn, name))
+ bb.parse.BBHandler.inherit(extendedmap[name], fn, 0, d)
+
+ safe_d.setVar("BBCLASSEXTEND", extended)
+ _create_variants(datastores, extendedmap.keys(), extendfunc, onlyfinalise)
+
+ for variant in datastores.keys():
+ if variant:
+ try:
+ if not onlyfinalise or variant in onlyfinalise:
+ finalize(fn, datastores[variant], variant)
+ except bb.parse.SkipRecipe as e:
+ datastores[variant].setVar("__SKIPPED", e.args[0])
+
+ datastores[""] = d
+ return datastores
diff --git a/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py b/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py
new file mode 100644
index 000000000..e5039e3bd
--- /dev/null
+++ b/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py
@@ -0,0 +1,251 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+ class for handling .bb files
+
+ Reads a .bb file and obtains its metadata
+
+"""
+
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import re, bb, os
+import logging
+import bb.build, bb.utils
+from bb import data
+
+from . import ConfHandler
+from .. import resolve_file, ast, logger, ParseError
+from .ConfHandler import include, init
+
+# For compatibility
+bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
+
+__func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
+__inherit_regexp__ = re.compile( r"inherit\s+(.+)" )
+__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" )
+__addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
+__deltask_regexp__ = re.compile("deltask\s+(?P<func>\w+)")
+__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
+__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
+__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
+
+__infunc__ = []
+__inpython__ = False
+__body__ = []
+__classname__ = ""
+
+cached_statements = {}
+
+def supports(fn, d):
+ """Return True if fn has a supported extension"""
+ return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
+
+def inherit(files, fn, lineno, d):
+ __inherit_cache = d.getVar('__inherit_cache', False) or []
+ files = d.expand(files).split()
+ for file in files:
+ if not os.path.isabs(file) and not file.endswith(".bbclass"):
+ file = os.path.join('classes', '%s.bbclass' % file)
+
+ if not os.path.isabs(file):
+ bbpath = d.getVar("BBPATH")
+ abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
+ for af in attempts:
+ if af != abs_fn:
+ bb.parse.mark_dependency(d, af)
+ if abs_fn:
+ file = abs_fn
+
+ if not file in __inherit_cache:
+ logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno))
+ __inherit_cache.append( file )
+ d.setVar('__inherit_cache', __inherit_cache)
+ include(fn, file, lineno, d, "inherit")
+ __inherit_cache = d.getVar('__inherit_cache', False) or []
+
+def get_statements(filename, absolute_filename, base_name):
+ global cached_statements
+
+ try:
+ return cached_statements[absolute_filename]
+ except KeyError:
+ with open(absolute_filename, 'r') as f:
+ statements = ast.StatementGroup()
+
+ lineno = 0
+ while True:
+ lineno = lineno + 1
+ s = f.readline()
+ if not s: break
+ s = s.rstrip()
+ feeder(lineno, s, filename, base_name, statements)
+
+ if __inpython__:
+ # add a blank line to close out any python definition
+ feeder(lineno, "", filename, base_name, statements, eof=True)
+
+ if filename.endswith(".bbclass") or filename.endswith(".inc"):
+ cached_statements[absolute_filename] = statements
+ return statements
+
+def handle(fn, d, include):
+ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__
+ __body__ = []
+ __infunc__ = []
+ __classname__ = ""
+ __residue__ = []
+
+ base_name = os.path.basename(fn)
+ (root, ext) = os.path.splitext(base_name)
+ init(d)
+
+ if ext == ".bbclass":
+ __classname__ = root
+ __inherit_cache = d.getVar('__inherit_cache', False) or []
+ if not fn in __inherit_cache:
+ __inherit_cache.append(fn)
+ d.setVar('__inherit_cache', __inherit_cache)
+
+ if include != 0:
+ oldfile = d.getVar('FILE', False)
+ else:
+ oldfile = None
+
+ abs_fn = resolve_file(fn, d)
+
+ # actual loading
+ statements = get_statements(fn, abs_fn, base_name)
+
+ # DONE WITH PARSING... time to evaluate
+ if ext != ".bbclass" and abs_fn != oldfile:
+ d.setVar('FILE', abs_fn)
+
+ try:
+ statements.eval(d)
+ except bb.parse.SkipRecipe:
+ d.setVar("__SKIPPED", True)
+ if include == 0:
+ return { "" : d }
+
+ if __infunc__:
+ raise ParseError("Shell function %s is never closed" % __infunc__[0], __infunc__[1], __infunc__[2])
+ if __residue__:
+ raise ParseError("Leftover unparsed (incomplete?) data %s from %s" % __residue__, fn)
+
+ if ext != ".bbclass" and include == 0:
+ return ast.multi_finalize(fn, d)
+
+ if ext != ".bbclass" and oldfile and abs_fn != oldfile:
+ d.setVar("FILE", oldfile)
+
+ return d
+
+def feeder(lineno, s, fn, root, statements, eof=False):
+ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
+ if __infunc__:
+ if s == '}':
+ __body__.append('')
+ ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4])
+ __infunc__ = []
+ __body__ = []
+ else:
+ __body__.append(s)
+ return
+
+ if __inpython__:
+ m = __python_func_regexp__.match(s)
+ if m and not eof:
+ __body__.append(s)
+ return
+ else:
+ ast.handlePythonMethod(statements, fn, lineno, __inpython__,
+ root, __body__)
+ __body__ = []
+ __inpython__ = False
+
+ if eof:
+ return
+
+ if s and s[0] == '#':
+ if len(__residue__) != 0 and __residue__[0][0] != "#":
+ bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
+
+ if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
+ bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
+
+ if s and s[-1] == '\\':
+ __residue__.append(s[:-1])
+ return
+
+ s = "".join(__residue__) + s
+ __residue__ = []
+
+ # Skip empty lines
+ if s == '':
+ return
+
+ # Skip comments
+ if s[0] == '#':
+ return
+
+ m = __func_start_regexp__.match(s)
+ if m:
+ __infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None]
+ return
+
+ m = __def_regexp__.match(s)
+ if m:
+ __body__.append(s)
+ __inpython__ = m.group(1)
+
+ return
+
+ m = __export_func_regexp__.match(s)
+ if m:
+ ast.handleExportFuncs(statements, fn, lineno, m, __classname__)
+ return
+
+ m = __addtask_regexp__.match(s)
+ if m:
+ ast.handleAddTask(statements, fn, lineno, m)
+ return
+
+ m = __deltask_regexp__.match(s)
+ if m:
+ ast.handleDelTask(statements, fn, lineno, m)
+ return
+
+ m = __addhandler_regexp__.match(s)
+ if m:
+ ast.handleBBHandlers(statements, fn, lineno, m)
+ return
+
+ m = __inherit_regexp__.match(s)
+ if m:
+ ast.handleInherit(statements, fn, lineno, m)
+ return
+
+ return ConfHandler.feeder(lineno, s, fn, statements)
+
+# Add us to the handlers list
+from .. import handlers
+handlers.append({'supports': supports, 'handle': handle, 'init': init})
+del handlers
diff --git a/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py
new file mode 100644
index 000000000..9d3ebe16f
--- /dev/null
+++ b/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+ class for handling configuration data files
+
+ Reads a .conf file and obtains its metadata
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import errno
+import re
+import os
+import bb.utils
+from bb.parse import ParseError, resolve_file, ast, logger, handle
+
+__config_regexp__ = re.compile( r"""
+ ^
+ (?P<exp>export\s+)?
+ (?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
+ (\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
+
+ \s* (
+ (?P<colon>:=) |
+ (?P<lazyques>\?\?=) |
+ (?P<ques>\?=) |
+ (?P<append>\+=) |
+ (?P<prepend>=\+) |
+ (?P<predot>=\.) |
+ (?P<postdot>\.=) |
+ =
+ ) \s*
+
+ (?!'[^']*'[^']*'$)
+ (?!\"[^\"]*\"[^\"]*\"$)
+ (?P<apo>['\"])
+ (?P<value>.*)
+ (?P=apo)
+ $
+ """, re.X)
+__include_regexp__ = re.compile( r"include\s+(.+)" )
+__require_regexp__ = re.compile( r"require\s+(.+)" )
+__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
+__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
+__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
+
+def init(data):
+ topdir = data.getVar('TOPDIR', False)
+ if not topdir:
+ data.setVar('TOPDIR', os.getcwd())
+
+
+def supports(fn, d):
+ return fn[-5:] == ".conf"
+
+def include(parentfn, fns, lineno, data, error_out):
+ """
+ error_out: A string indicating the verb (e.g. "include", "inherit") to be
+ used in a ParseError that will be raised if the file to be included could
+ not be included. Specify False to avoid raising an error in this case.
+ """
+ fns = data.expand(fns)
+ parentfn = data.expand(parentfn)
+
+ # "include" or "require" accept zero to n space-separated file names to include.
+ for fn in fns.split():
+ include_single_file(parentfn, fn, lineno, data, error_out)
+
+def include_single_file(parentfn, fn, lineno, data, error_out):
+ """
+ Helper function for include() which does not expand or split its parameters.
+ """
+ if parentfn == fn: # prevent infinite recursion
+ return None
+
+ if not os.path.isabs(fn):
+ dname = os.path.dirname(parentfn)
+ bbpath = "%s:%s" % (dname, data.getVar("BBPATH"))
+ abs_fn, attempts = bb.utils.which(bbpath, fn, history=True)
+ if abs_fn and bb.parse.check_dependency(data, abs_fn):
+ logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE')))
+ for af in attempts:
+ bb.parse.mark_dependency(data, af)
+ if abs_fn:
+ fn = abs_fn
+ elif bb.parse.check_dependency(data, fn):
+ logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE')))
+
+ try:
+ bb.parse.handle(fn, data, True)
+ except (IOError, OSError) as exc:
+ if exc.errno == errno.ENOENT:
+ if error_out:
+ raise ParseError("Could not %s file %s" % (error_out, fn), parentfn, lineno)
+ logger.debug(2, "CONF file '%s' not found", fn)
+ else:
+ if error_out:
+ raise ParseError("Could not %s file %s: %s" % (error_out, fn, exc.strerror), parentfn, lineno)
+ else:
+ raise ParseError("Error parsing %s: %s" % (fn, exc.strerror), parentfn, lineno)
+
+# We have an issue where a UI might want to enforce particular settings such as
+# an empty DISTRO variable. If configuration files do something like assigning
+# a weak default, it turns out to be very difficult to filter out these changes,
+# particularly when the weak default might appear half way though parsing a chain
+# of configuration files. We therefore let the UIs hook into configuration file
+# parsing. This turns out to be a hard problem to solve any other way.
+confFilters = []
+
+def handle(fn, data, include):
+ init(data)
+
+ if include == 0:
+ oldfile = None
+ else:
+ oldfile = data.getVar('FILE', False)
+
+ abs_fn = resolve_file(fn, data)
+ f = open(abs_fn, 'r')
+
+ statements = ast.StatementGroup()
+ lineno = 0
+ while True:
+ lineno = lineno + 1
+ s = f.readline()
+ if not s:
+ break
+ w = s.strip()
+ # skip empty lines
+ if not w:
+ continue
+ s = s.rstrip()
+ while s[-1] == '\\':
+ s2 = f.readline().strip()
+ lineno = lineno + 1
+ if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
+ bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
+ s = s[:-1] + s2
+ # skip comments
+ if s[0] == '#':
+ continue
+ feeder(lineno, s, abs_fn, statements)
+
+ # DONE WITH PARSING... time to evaluate
+ data.setVar('FILE', abs_fn)
+ statements.eval(data)
+ if oldfile:
+ data.setVar('FILE', oldfile)
+
+ f.close()
+
+ for f in confFilters:
+ f(fn, data)
+
+ return data
+
+def feeder(lineno, s, fn, statements):
+ m = __config_regexp__.match(s)
+ if m:
+ groupd = m.groupdict()
+ ast.handleData(statements, fn, lineno, groupd)
+ return
+
+ m = __include_regexp__.match(s)
+ if m:
+ ast.handleInclude(statements, fn, lineno, m, False)
+ return
+
+ m = __require_regexp__.match(s)
+ if m:
+ ast.handleInclude(statements, fn, lineno, m, True)
+ return
+
+ m = __export_regexp__.match(s)
+ if m:
+ ast.handleExport(statements, fn, lineno, m)
+ return
+
+ m = __unset_regexp__.match(s)
+ if m:
+ ast.handleUnset(statements, fn, lineno, m)
+ return
+
+ m = __unset_flag_regexp__.match(s)
+ if m:
+ ast.handleUnsetFlag(statements, fn, lineno, m)
+ return
+
+ raise ParseError("unparsed line: '%s'" % s, fn, lineno);
+
+# Add us to the handlers list
+from bb.parse import handlers
+handlers.append({'supports': supports, 'handle': handle, 'init': init})
+del handlers
diff --git a/poky/bitbake/lib/bb/parse/parse_py/__init__.py b/poky/bitbake/lib/bb/parse/parse_py/__init__.py
new file mode 100644
index 000000000..3e658d0de
--- /dev/null
+++ b/poky/bitbake/lib/bb/parse/parse_py/__init__.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake Parsers
+
+File parsers for the BitBake build tools.
+
+"""
+
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+from __future__ import absolute_import
+from . import ConfHandler
+from . import BBHandler
+
+__version__ = '1.0'
diff --git a/poky/bitbake/lib/bb/persist_data.py b/poky/bitbake/lib/bb/persist_data.py
new file mode 100644
index 000000000..bef701861
--- /dev/null
+++ b/poky/bitbake/lib/bb/persist_data.py
@@ -0,0 +1,214 @@
+"""BitBake Persistent Data Store
+
+Used to store data in a central location such that other threads/tasks can
+access them at some future date. Acts as a convenience wrapper around sqlite,
+currently, providing a key/value store accessed by 'domain'.
+"""
+
+# Copyright (C) 2007 Richard Purdie
+# Copyright (C) 2010 Chris Larson <chris_larson@mentor.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import collections
+import logging
+import os.path
+import sys
+import warnings
+from bb.compat import total_ordering
+from collections import Mapping
+import sqlite3
+
+sqlversion = sqlite3.sqlite_version_info
+if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
+ raise Exception("sqlite3 version 3.3.0 or later is required.")
+
+
+logger = logging.getLogger("BitBake.PersistData")
+if hasattr(sqlite3, 'enable_shared_cache'):
+ try:
+ sqlite3.enable_shared_cache(True)
+ except sqlite3.OperationalError:
+ pass
+
+
+@total_ordering
+class SQLTable(collections.MutableMapping):
+ """Object representing a table/domain in the database"""
+ def __init__(self, cachefile, table):
+ self.cachefile = cachefile
+ self.table = table
+ self.cursor = connect(self.cachefile)
+
+ self._execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);"
+ % table)
+
+ def _execute(self, *query):
+ """Execute a query, waiting to acquire a lock if necessary"""
+ count = 0
+ while True:
+ try:
+ return self.cursor.execute(*query)
+ except sqlite3.OperationalError as exc:
+ if 'database is locked' in str(exc) and count < 500:
+ count = count + 1
+ self.cursor.close()
+ self.cursor = connect(self.cachefile)
+ continue
+ raise
+
+ def __enter__(self):
+ self.cursor.__enter__()
+ return self
+
+ def __exit__(self, *excinfo):
+ self.cursor.__exit__(*excinfo)
+
+ def __getitem__(self, key):
+ data = self._execute("SELECT * from %s where key=?;" %
+ self.table, [key])
+ for row in data:
+ return row[1]
+ raise KeyError(key)
+
+ def __delitem__(self, key):
+ if key not in self:
+ raise KeyError(key)
+ self._execute("DELETE from %s where key=?;" % self.table, [key])
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, str):
+ raise TypeError('Only string keys are supported')
+ elif not isinstance(value, str):
+ raise TypeError('Only string values are supported')
+
+ data = self._execute("SELECT * from %s where key=?;" %
+ self.table, [key])
+ exists = len(list(data))
+ if exists:
+ self._execute("UPDATE %s SET value=? WHERE key=?;" % self.table,
+ [value, key])
+ else:
+ self._execute("INSERT into %s(key, value) values (?, ?);" %
+ self.table, [key, value])
+
+ def __contains__(self, key):
+ return key in set(self)
+
+ def __len__(self):
+ data = self._execute("SELECT COUNT(key) FROM %s;" % self.table)
+ for row in data:
+ return row[0]
+
+ def __iter__(self):
+ data = self._execute("SELECT key FROM %s;" % self.table)
+ return (row[0] for row in data)
+
+ def __lt__(self, other):
+ if not isinstance(other, Mapping):
+ raise NotImplemented
+
+ return len(self) < len(other)
+
+ def get_by_pattern(self, pattern):
+ data = self._execute("SELECT * FROM %s WHERE key LIKE ?;" %
+ self.table, [pattern])
+ return [row[1] for row in data]
+
+ def values(self):
+ return list(self.itervalues())
+
+ def itervalues(self):
+ data = self._execute("SELECT value FROM %s;" % self.table)
+ return (row[0] for row in data)
+
+ def items(self):
+ return list(self.iteritems())
+
+ def iteritems(self):
+ return self._execute("SELECT * FROM %s;" % self.table)
+
+ def clear(self):
+ self._execute("DELETE FROM %s;" % self.table)
+
+ def has_key(self, key):
+ return key in self
+
+
+class PersistData(object):
+ """Deprecated representation of the bitbake persistent data store"""
+ def __init__(self, d):
+ warnings.warn("Use of PersistData is deprecated. Please use "
+ "persist(domain, d) instead.",
+ category=DeprecationWarning,
+ stacklevel=2)
+
+ self.data = persist(d)
+ logger.debug(1, "Using '%s' as the persistent data cache",
+ self.data.filename)
+
+ def addDomain(self, domain):
+ """
+ Add a domain (pending deprecation)
+ """
+ return self.data[domain]
+
+ def delDomain(self, domain):
+ """
+ Removes a domain and all the data it contains
+ """
+ del self.data[domain]
+
+ def getKeyValues(self, domain):
+ """
+ Return a list of key + value pairs for a domain
+ """
+ return list(self.data[domain].items())
+
+ def getValue(self, domain, key):
+ """
+ Return the value of a key for a domain
+ """
+ return self.data[domain][key]
+
+ def setValue(self, domain, key, value):
+ """
+ Sets the value of a key for a domain
+ """
+ self.data[domain][key] = value
+
+ def delValue(self, domain, key):
+ """
+ Deletes a key/value pair
+ """
+ del self.data[domain][key]
+
+def connect(database):
+ connection = sqlite3.connect(database, timeout=5, isolation_level=None)
+ connection.execute("pragma synchronous = off;")
+ connection.text_factory = str
+ return connection
+
+def persist(domain, d):
+ """Convenience factory for SQLTable objects based upon metadata"""
+ import bb.utils
+ cachedir = (d.getVar("PERSISTENT_DIR") or
+ d.getVar("CACHE"))
+ if not cachedir:
+ logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
+ sys.exit(1)
+
+ bb.utils.mkdirhier(cachedir)
+ cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
+ return SQLTable(cachefile, domain)
diff --git a/poky/bitbake/lib/bb/process.py b/poky/bitbake/lib/bb/process.py
new file mode 100644
index 000000000..e69697cb6
--- /dev/null
+++ b/poky/bitbake/lib/bb/process.py
@@ -0,0 +1,179 @@
+import logging
+import signal
+import subprocess
+import errno
+import select
+
+logger = logging.getLogger('BitBake.Process')
+
+def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+class CmdError(RuntimeError):
+ def __init__(self, command, msg=None):
+ self.command = command
+ self.msg = msg
+
+ def __str__(self):
+ if not isinstance(self.command, str):
+ cmd = subprocess.list2cmdline(self.command)
+ else:
+ cmd = self.command
+
+ msg = "Execution of '%s' failed" % cmd
+ if self.msg:
+ msg += ': %s' % self.msg
+ return msg
+
+class NotFoundError(CmdError):
+ def __str__(self):
+ return CmdError.__str__(self) + ": command not found"
+
+class ExecutionError(CmdError):
+ def __init__(self, command, exitcode, stdout = None, stderr = None):
+ CmdError.__init__(self, command)
+ self.exitcode = exitcode
+ self.stdout = stdout
+ self.stderr = stderr
+
+ def __str__(self):
+ message = ""
+ if self.stderr:
+ message += self.stderr
+ if self.stdout:
+ message += self.stdout
+ if message:
+ message = ":\n" + message
+ return (CmdError.__str__(self) +
+ " with exit code %s" % self.exitcode + message)
+
+class Popen(subprocess.Popen):
+ defaults = {
+ "close_fds": True,
+ "preexec_fn": subprocess_setup,
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.STDOUT,
+ "stdin": subprocess.PIPE,
+ "shell": False,
+ }
+
+ def __init__(self, *args, **kwargs):
+ options = dict(self.defaults)
+ options.update(kwargs)
+ subprocess.Popen.__init__(self, *args, **options)
+
+def _logged_communicate(pipe, log, input, extrafiles):
+ if pipe.stdin:
+ if input is not None:
+ pipe.stdin.write(input)
+ pipe.stdin.close()
+
+ outdata, errdata = [], []
+ rin = []
+
+ if pipe.stdout is not None:
+ bb.utils.nonblockingfd(pipe.stdout.fileno())
+ rin.append(pipe.stdout)
+ if pipe.stderr is not None:
+ bb.utils.nonblockingfd(pipe.stderr.fileno())
+ rin.append(pipe.stderr)
+ for fobj, _ in extrafiles:
+ bb.utils.nonblockingfd(fobj.fileno())
+ rin.append(fobj)
+
+ def readextras(selected):
+ for fobj, func in extrafiles:
+ if fobj in selected:
+ try:
+ data = fobj.read()
+ except IOError as err:
+ if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK:
+ data = None
+ if data is not None:
+ func(data)
+
+ def read_all_pipes(log, rin, outdata, errdata):
+ rlist = rin
+ stdoutbuf = b""
+ stderrbuf = b""
+
+ try:
+ r,w,e = select.select (rlist, [], [], 1)
+ except OSError as e:
+ if e.errno != errno.EINTR:
+ raise
+
+ readextras(r)
+
+ if pipe.stdout in r:
+ data = stdoutbuf + pipe.stdout.read()
+ if data is not None and len(data) > 0:
+ try:
+ data = data.decode("utf-8")
+ outdata.append(data)
+ log.write(data)
+ log.flush()
+ stdoutbuf = b""
+ except UnicodeDecodeError:
+ stdoutbuf = data
+
+ if pipe.stderr in r:
+ data = stderrbuf + pipe.stderr.read()
+ if data is not None and len(data) > 0:
+ try:
+ data = data.decode("utf-8")
+ errdata.append(data)
+ log.write(data)
+ log.flush()
+ stderrbuf = b""
+ except UnicodeDecodeError:
+ stderrbuf = data
+
+ try:
+ # Read all pipes while the process is open
+ while pipe.poll() is None:
+ read_all_pipes(log, rin, outdata, errdata)
+
+ # Pocess closed, drain all pipes...
+ read_all_pipes(log, rin, outdata, errdata)
+ finally:
+ log.flush()
+
+ if pipe.stdout is not None:
+ pipe.stdout.close()
+ if pipe.stderr is not None:
+ pipe.stderr.close()
+ return ''.join(outdata), ''.join(errdata)
+
+def run(cmd, input=None, log=None, extrafiles=None, **options):
+ """Convenience function to run a command and return its output, raising an
+ exception when the command fails"""
+
+ if not extrafiles:
+ extrafiles = []
+
+ if isinstance(cmd, str) and not "shell" in options:
+ options["shell"] = True
+
+ try:
+ pipe = Popen(cmd, **options)
+ except OSError as exc:
+ if exc.errno == 2:
+ raise NotFoundError(cmd)
+ else:
+ raise CmdError(cmd, exc)
+
+ if log:
+ stdout, stderr = _logged_communicate(pipe, log, input, extrafiles)
+ else:
+ stdout, stderr = pipe.communicate(input)
+ if not stdout is None:
+ stdout = stdout.decode("utf-8")
+ if not stderr is None:
+ stderr = stderr.decode("utf-8")
+
+ if pipe.returncode != 0:
+ raise ExecutionError(cmd, pipe.returncode, stdout, stderr)
+ return stdout, stderr
diff --git a/poky/bitbake/lib/bb/progress.py b/poky/bitbake/lib/bb/progress.py
new file mode 100644
index 000000000..f54d1c76f
--- /dev/null
+++ b/poky/bitbake/lib/bb/progress.py
@@ -0,0 +1,276 @@
+"""
+BitBake progress handling code
+"""
+
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import re
+import time
+import inspect
+import bb.event
+import bb.build
+
+class ProgressHandler(object):
+ """
+ Base class that can pretend to be a file object well enough to be
+ used to build objects to intercept console output and determine the
+ progress of some operation.
+ """
+ def __init__(self, d, outfile=None):
+ self._progress = 0
+ self._data = d
+ self._lastevent = 0
+ if outfile:
+ self._outfile = outfile
+ else:
+ self._outfile = sys.stdout
+
+ def _fire_progress(self, taskprogress, rate=None):
+ """Internal function to fire the progress event"""
+ bb.event.fire(bb.build.TaskProgress(taskprogress, rate), self._data)
+
+ def write(self, string):
+ self._outfile.write(string)
+
+ def flush(self):
+ self._outfile.flush()
+
+ def update(self, progress, rate=None):
+ ts = time.time()
+ if progress > 100:
+ progress = 100
+ if progress != self._progress or self._lastevent + 1 < ts:
+ self._fire_progress(progress, rate)
+ self._lastevent = ts
+ self._progress = progress
+
+class LineFilterProgressHandler(ProgressHandler):
+ """
+ A ProgressHandler variant that provides the ability to filter out
+ the lines if they contain progress information. Additionally, it
+ filters out anything before the last line feed on a line. This can
+ be used to keep the logs clean of output that we've only enabled for
+ getting progress, assuming that that can be done on a per-line
+ basis.
+ """
+ def __init__(self, d, outfile=None):
+ self._linebuffer = ''
+ super(LineFilterProgressHandler, self).__init__(d, outfile)
+
+ def write(self, string):
+ self._linebuffer += string
+ while True:
+ breakpos = self._linebuffer.find('\n') + 1
+ if breakpos == 0:
+ break
+ line = self._linebuffer[:breakpos]
+ self._linebuffer = self._linebuffer[breakpos:]
+ # Drop any line feeds and anything that precedes them
+ lbreakpos = line.rfind('\r') + 1
+ if lbreakpos:
+ line = line[lbreakpos:]
+ if self.writeline(line):
+ super(LineFilterProgressHandler, self).write(line)
+
+ def writeline(self, line):
+ return True
+
+class BasicProgressHandler(ProgressHandler):
+ def __init__(self, d, regex=r'(\d+)%', outfile=None):
+ super(BasicProgressHandler, self).__init__(d, outfile)
+ self._regex = re.compile(regex)
+ # Send an initial progress event so the bar gets shown
+ self._fire_progress(0)
+
+ def write(self, string):
+ percs = self._regex.findall(string)
+ if percs:
+ progress = int(percs[-1])
+ self.update(progress)
+ super(BasicProgressHandler, self).write(string)
+
+class OutOfProgressHandler(ProgressHandler):
+ def __init__(self, d, regex, outfile=None):
+ super(OutOfProgressHandler, self).__init__(d, outfile)
+ self._regex = re.compile(regex)
+ # Send an initial progress event so the bar gets shown
+ self._fire_progress(0)
+
+ def write(self, string):
+ nums = self._regex.findall(string)
+ if nums:
+ progress = (float(nums[-1][0]) / float(nums[-1][1])) * 100
+ self.update(progress)
+ super(OutOfProgressHandler, self).write(string)
+
+class MultiStageProgressReporter(object):
+ """
+ Class which allows reporting progress without the caller
+ having to know where they are in the overall sequence. Useful
+ for tasks made up of python code spread across multiple
+ classes / functions - the progress reporter object can
+ be passed around or stored at the object level and calls
+ to next_stage() and update() made whereever needed.
+ """
+ def __init__(self, d, stage_weights, debug=False):
+ """
+ Initialise the progress reporter.
+
+ Parameters:
+ * d: the datastore (needed for firing the events)
+ * stage_weights: a list of weight values, one for each stage.
+ The value is scaled internally so you only need to specify
+ values relative to other values in the list, so if there
+ are two stages and the first takes 2s and the second takes
+ 10s you would specify [2, 10] (or [1, 5], it doesn't matter).
+ * debug: specify True (and ensure you call finish() at the end)
+ in order to show a printout of the calculated stage weights
+ based on timing each stage. Use this to determine what the
+ weights should be when you're not sure.
+ """
+ self._data = d
+ total = sum(stage_weights)
+ self._stage_weights = [float(x)/total for x in stage_weights]
+ self._stage = -1
+ self._base_progress = 0
+ # Send an initial progress event so the bar gets shown
+ self._fire_progress(0)
+ self._debug = debug
+ self._finished = False
+ if self._debug:
+ self._last_time = time.time()
+ self._stage_times = []
+ self._stage_total = None
+ self._callers = []
+
+ def _fire_progress(self, taskprogress):
+ bb.event.fire(bb.build.TaskProgress(taskprogress), self._data)
+
+ def next_stage(self, stage_total=None):
+ """
+ Move to the next stage.
+ Parameters:
+ * stage_total: optional total for progress within the stage,
+ see update() for details
+ NOTE: you need to call this before the first stage.
+ """
+ self._stage += 1
+ self._stage_total = stage_total
+ if self._stage == 0:
+ # First stage
+ if self._debug:
+ self._last_time = time.time()
+ else:
+ if self._stage < len(self._stage_weights):
+ self._base_progress = sum(self._stage_weights[:self._stage]) * 100
+ if self._debug:
+ currtime = time.time()
+ self._stage_times.append(currtime - self._last_time)
+ self._last_time = currtime
+ self._callers.append(inspect.getouterframes(inspect.currentframe())[1])
+ elif not self._debug:
+ bb.warn('ProgressReporter: current stage beyond declared number of stages')
+ self._base_progress = 100
+ self._fire_progress(self._base_progress)
+
+ def update(self, stage_progress):
+ """
+ Update progress within the current stage.
+ Parameters:
+ * stage_progress: progress value within the stage. If stage_total
+ was specified when next_stage() was last called, then this
+ value is considered to be out of stage_total, otherwise it should
+ be a percentage value from 0 to 100.
+ """
+ if self._stage_total:
+ stage_progress = (float(stage_progress) / self._stage_total) * 100
+ if self._stage < 0:
+ bb.warn('ProgressReporter: update called before first call to next_stage()')
+ elif self._stage < len(self._stage_weights):
+ progress = self._base_progress + (stage_progress * self._stage_weights[self._stage])
+ else:
+ progress = self._base_progress
+ if progress > 100:
+ progress = 100
+ self._fire_progress(progress)
+
+ def finish(self):
+ if self._finished:
+ return
+ self._finished = True
+ if self._debug:
+ import math
+ self._stage_times.append(time.time() - self._last_time)
+ mintime = max(min(self._stage_times), 0.01)
+ self._callers.append(None)
+ stage_weights = [int(math.ceil(x / mintime)) for x in self._stage_times]
+ bb.warn('Stage weights: %s' % stage_weights)
+ out = []
+ for stage_weight, caller in zip(stage_weights, self._callers):
+ if caller:
+ out.append('Up to %s:%d: %d' % (caller[1], caller[2], stage_weight))
+ else:
+ out.append('Up to finish: %d' % stage_weight)
+ bb.warn('Stage times:\n %s' % '\n '.join(out))
+
+class MultiStageProcessProgressReporter(MultiStageProgressReporter):
+ """
+ Version of MultiStageProgressReporter intended for use with
+ standalone processes (such as preparing the runqueue)
+ """
+ def __init__(self, d, processname, stage_weights, debug=False):
+ self._processname = processname
+ self._started = False
+ MultiStageProgressReporter.__init__(self, d, stage_weights, debug)
+
+ def start(self):
+ if not self._started:
+ bb.event.fire(bb.event.ProcessStarted(self._processname, 100), self._data)
+ self._started = True
+
+ def _fire_progress(self, taskprogress):
+ if taskprogress == 0:
+ self.start()
+ return
+ bb.event.fire(bb.event.ProcessProgress(self._processname, taskprogress), self._data)
+
+ def finish(self):
+ MultiStageProgressReporter.finish(self)
+ bb.event.fire(bb.event.ProcessFinished(self._processname), self._data)
+
+class DummyMultiStageProcessProgressReporter(MultiStageProgressReporter):
+ """
+ MultiStageProcessProgressReporter that takes the calls and does nothing
+ with them (to avoid a bunch of "if progress_reporter:" checks)
+ """
+ def __init__(self):
+ MultiStageProcessProgressReporter.__init__(self, "", None, [])
+
+ def _fire_progress(self, taskprogress, rate=None):
+ pass
+
+ def start(self):
+ pass
+
+ def next_stage(self, stage_total=None):
+ pass
+
+ def update(self, stage_progress):
+ pass
+
+ def finish(self):
+ pass
diff --git a/poky/bitbake/lib/bb/providers.py b/poky/bitbake/lib/bb/providers.py
new file mode 100644
index 000000000..c2aa98c06
--- /dev/null
+++ b/poky/bitbake/lib/bb/providers.py
@@ -0,0 +1,430 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2003, 2004 Chris Larson
+# Copyright (C) 2003, 2004 Phil Blundell
+# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
+# Copyright (C) 2005 Holger Hans Peter Freyther
+# Copyright (C) 2005 ROAD GmbH
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+from bb import data, utils
+from collections import defaultdict
+import bb
+
+logger = logging.getLogger("BitBake.Provider")
+
+class NoProvider(bb.BBHandledException):
+ """Exception raised when no provider of a build dependency can be found"""
+
+class NoRProvider(bb.BBHandledException):
+ """Exception raised when no provider of a runtime dependency can be found"""
+
+class MultipleRProvider(bb.BBHandledException):
+ """Exception raised when multiple providers of a runtime dependency can be found"""
+
+def findProviders(cfgData, dataCache, pkg_pn = None):
+ """
+ Convenience function to get latest and preferred providers in pkg_pn
+ """
+
+ if not pkg_pn:
+ pkg_pn = dataCache.pkg_pn
+
+ # Need to ensure data store is expanded
+ localdata = data.createCopy(cfgData)
+ bb.data.expandKeys(localdata)
+
+ preferred_versions = {}
+ latest_versions = {}
+
+ for pn in pkg_pn:
+ (last_ver, last_file, pref_ver, pref_file) = findBestProvider(pn, localdata, dataCache, pkg_pn)
+ preferred_versions[pn] = (pref_ver, pref_file)
+ latest_versions[pn] = (last_ver, last_file)
+
+ return (latest_versions, preferred_versions)
+
+
+def allProviders(dataCache):
+ """
+ Find all providers for each pn
+ """
+ all_providers = defaultdict(list)
+ for (fn, pn) in dataCache.pkg_fn.items():
+ ver = dataCache.pkg_pepvpr[fn]
+ all_providers[pn].append((ver, fn))
+ return all_providers
+
+
+def sortPriorities(pn, dataCache, pkg_pn = None):
+ """
+ Reorder pkg_pn by file priority and default preference
+ """
+
+ if not pkg_pn:
+ pkg_pn = dataCache.pkg_pn
+
+ files = pkg_pn[pn]
+ priorities = {}
+ for f in files:
+ priority = dataCache.bbfile_priority[f]
+ preference = dataCache.pkg_dp[f]
+ if priority not in priorities:
+ priorities[priority] = {}
+ if preference not in priorities[priority]:
+ priorities[priority][preference] = []
+ priorities[priority][preference].append(f)
+ tmp_pn = []
+ for pri in sorted(priorities):
+ tmp_pref = []
+ for pref in sorted(priorities[pri]):
+ tmp_pref.extend(priorities[pri][pref])
+ tmp_pn = [tmp_pref] + tmp_pn
+
+ return tmp_pn
+
+def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
+ """
+ Check if the version pe,pv,pr is the preferred one.
+ If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%'
+ """
+ if (pr == preferred_r or preferred_r == None):
+ if (pe == preferred_e or preferred_e == None):
+ if preferred_v == pv:
+ return True
+ if preferred_v != None and preferred_v.endswith('%') and pv.startswith(preferred_v[:len(preferred_v)-1]):
+ return True
+ return False
+
+def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
+ """
+ Find the first provider in pkg_pn with a PREFERRED_VERSION set.
+ """
+
+ preferred_file = None
+ preferred_ver = None
+
+ # pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
+ # hence we do this manually rather than use OVERRIDES
+ preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
+ if not preferred_v:
+ preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
+ if not preferred_v:
+ preferred_v = cfgData.getVar("PREFERRED_VERSION")
+
+ if preferred_v:
+ m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
+ if m:
+ if m.group(1):
+ preferred_e = m.group(1)[:-1]
+ else:
+ preferred_e = None
+ preferred_v = m.group(2)
+ if m.group(3):
+ preferred_r = m.group(3)[1:]
+ else:
+ preferred_r = None
+ else:
+ preferred_e = None
+ preferred_r = None
+
+ for file_set in pkg_pn:
+ for f in file_set:
+ pe, pv, pr = dataCache.pkg_pepvpr[f]
+ if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
+ preferred_file = f
+ preferred_ver = (pe, pv, pr)
+ break
+ if preferred_file:
+ break;
+ if preferred_r:
+ pv_str = '%s-%s' % (preferred_v, preferred_r)
+ else:
+ pv_str = preferred_v
+ if not (preferred_e is None):
+ pv_str = '%s:%s' % (preferred_e, pv_str)
+ itemstr = ""
+ if item:
+ itemstr = " (for item %s)" % item
+ if preferred_file is None:
+ logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr)
+ available_vers = []
+ for file_set in pkg_pn:
+ for f in file_set:
+ pe, pv, pr = dataCache.pkg_pepvpr[f]
+ ver_str = pv
+ if pe:
+ ver_str = "%s:%s" % (pe, ver_str)
+ if not ver_str in available_vers:
+ available_vers.append(ver_str)
+ if available_vers:
+ available_vers.sort()
+ logger.info("versions of %s available: %s", pn, ' '.join(available_vers))
+ else:
+ logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
+
+ return (preferred_ver, preferred_file)
+
+
+def findLatestProvider(pn, cfgData, dataCache, file_set):
+ """
+ Return the highest version of the providers in file_set.
+ Take default preferences into account.
+ """
+ latest = None
+ latest_p = 0
+ latest_f = None
+ for file_name in file_set:
+ pe, pv, pr = dataCache.pkg_pepvpr[file_name]
+ dp = dataCache.pkg_dp[file_name]
+
+ if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p):
+ latest = (pe, pv, pr)
+ latest_f = file_name
+ latest_p = dp
+
+ return (latest, latest_f)
+
+
+def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
+ """
+ If there is a PREFERRED_VERSION, find the highest-priority bbfile
+ providing that version. If not, find the latest version provided by
+ an bbfile in the highest-priority set.
+ """
+
+ sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn)
+ # Find the highest priority provider with a PREFERRED_VERSION set
+ (preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item)
+ # Find the latest version of the highest priority provider
+ (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0])
+
+ if preferred_file is None:
+ preferred_file = latest_f
+ preferred_ver = latest
+
+ return (latest, latest_f, preferred_ver, preferred_file)
+
+
+def _filterProviders(providers, item, cfgData, dataCache):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables
+ """
+ eligible = []
+ preferred_versions = {}
+ sortpkg_pn = {}
+
+ # The order of providers depends on the order of the files on the disk
+ # up to here. Sort pkg_pn to make dependency issues reproducible rather
+ # than effectively random.
+ providers.sort()
+
+ # Collate providers by PN
+ pkg_pn = {}
+ for p in providers:
+ pn = dataCache.pkg_fn[p]
+ if pn not in pkg_pn:
+ pkg_pn[pn] = []
+ pkg_pn[pn].append(p)
+
+ logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
+
+ # First add PREFERRED_VERSIONS
+ for pn in sorted(pkg_pn):
+ sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
+ preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
+ if preferred_versions[pn][1]:
+ eligible.append(preferred_versions[pn][1])
+
+ # Now add latest versions
+ for pn in sorted(sortpkg_pn):
+ if pn in preferred_versions and preferred_versions[pn][1]:
+ continue
+ preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
+ eligible.append(preferred_versions[pn][1])
+
+ if len(eligible) == 0:
+ logger.error("no eligible providers for %s", item)
+ return 0
+
+ # If pn == item, give it a slight default preference
+ # This means PREFERRED_PROVIDER_foobar defaults to foobar if available
+ for p in providers:
+ pn = dataCache.pkg_fn[p]
+ if pn != item:
+ continue
+ (newvers, fn) = preferred_versions[pn]
+ if not fn in eligible:
+ continue
+ eligible.remove(fn)
+ eligible = [fn] + eligible
+
+ return eligible
+
+
+def filterProviders(providers, item, cfgData, dataCache):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables
+ Takes a "normal" target item
+ """
+
+ eligible = _filterProviders(providers, item, cfgData, dataCache)
+
+ prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item)
+ if prefervar:
+ dataCache.preferred[item] = prefervar
+
+ foundUnique = False
+ if item in dataCache.preferred:
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ if dataCache.preferred[item] == pn:
+ logger.verbose("selecting %s to satisfy %s due to PREFERRED_PROVIDERS", pn, item)
+ eligible.remove(p)
+ eligible = [p] + eligible
+ foundUnique = True
+ break
+
+ logger.debug(1, "sorted providers for %s are: %s", item, eligible)
+
+ return eligible, foundUnique
+
+def filterProvidersRunTime(providers, item, cfgData, dataCache):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables
+ Takes a "runtime" target item
+ """
+
+ eligible = _filterProviders(providers, item, cfgData, dataCache)
+
+ # First try and match any PREFERRED_RPROVIDER entry
+ prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item)
+ foundUnique = False
+ if prefervar:
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ if prefervar == pn:
+ logger.verbose("selecting %s to satisfy %s due to PREFERRED_RPROVIDER", pn, item)
+ eligible.remove(p)
+ eligible = [p] + eligible
+ foundUnique = True
+ numberPreferred = 1
+ break
+
+ # If we didn't find an RPROVIDER entry, try and infer the provider from PREFERRED_PROVIDER entries
+ # by looking through the provides of each eligible recipe and seeing if a PREFERRED_PROVIDER was set.
+ # This is most useful for virtual/ entries rather than having a RPROVIDER per entry.
+ if not foundUnique:
+ # Should use dataCache.preferred here?
+ preferred = []
+ preferred_vars = []
+ pns = {}
+ for p in eligible:
+ pns[dataCache.pkg_fn[p]] = p
+ for p in eligible:
+ pn = dataCache.pkg_fn[p]
+ provides = dataCache.pn_provides[pn]
+ for provide in provides:
+ prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
+ #logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
+ if prefervar in pns and pns[prefervar] not in preferred:
+ var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
+ logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var)
+ preferred_vars.append(var)
+ pref = pns[prefervar]
+ eligible.remove(pref)
+ eligible = [pref] + eligible
+ preferred.append(pref)
+ break
+
+ numberPreferred = len(preferred)
+
+ if numberPreferred > 1:
+ logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s. You could set PREFERRED_RPROVIDER_%s" % (item, preferred, preferred_vars, item))
+
+ logger.debug(1, "sorted runtime providers for %s are: %s", item, eligible)
+
+ return eligible, numberPreferred
+
+regexp_cache = {}
+
+def getRuntimeProviders(dataCache, rdepend):
+ """
+ Return any providers of runtime dependency
+ """
+ rproviders = []
+
+ if rdepend in dataCache.rproviders:
+ rproviders += dataCache.rproviders[rdepend]
+
+ if rdepend in dataCache.packages:
+ rproviders += dataCache.packages[rdepend]
+
+ if rproviders:
+ return rproviders
+
+ # Only search dynamic packages if we can't find anything in other variables
+ for pattern in dataCache.packages_dynamic:
+ pattern = pattern.replace('+', "\+")
+ if pattern in regexp_cache:
+ regexp = regexp_cache[pattern]
+ else:
+ try:
+ regexp = re.compile(pattern)
+ except:
+ logger.error("Error parsing regular expression '%s'", pattern)
+ raise
+ regexp_cache[pattern] = regexp
+ if regexp.match(rdepend):
+ rproviders += dataCache.packages_dynamic[pattern]
+ logger.debug(1, "Assuming %s is a dynamic package, but it may not exist" % rdepend)
+
+ return rproviders
+
+
+def buildWorldTargetList(dataCache, task=None):
+ """
+ Build package list for "bitbake world"
+ """
+ if dataCache.world_target:
+ return
+
+ logger.debug(1, "collating packages for \"world\"")
+ for f in dataCache.possible_world:
+ terminal = True
+ pn = dataCache.pkg_fn[f]
+ if task and task not in dataCache.task_deps[f]['tasks']:
+ logger.debug(2, "World build skipping %s as task %s doesn't exist", f, task)
+ terminal = False
+
+ for p in dataCache.pn_provides[pn]:
+ if p.startswith('virtual/'):
+ logger.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p)
+ terminal = False
+ break
+ for pf in dataCache.providers[p]:
+ if dataCache.pkg_fn[pf] != pn:
+ logger.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p)
+ terminal = False
+ break
+ if terminal:
+ dataCache.world_target.add(pn)
diff --git a/poky/bitbake/lib/bb/pysh/__init__.py b/poky/bitbake/lib/bb/pysh/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/__init__.py
diff --git a/poky/bitbake/lib/bb/pysh/builtin.py b/poky/bitbake/lib/bb/pysh/builtin.py
new file mode 100644
index 000000000..a8814dc33
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/builtin.py
@@ -0,0 +1,710 @@
+# builtin.py - builtins and utilities definitions for pysh.
+#
+# Copyright 2007 Patrick Mezard
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+"""Builtin and internal utilities implementations.
+
+- Beware not to use python interpreter environment as if it were the shell
+environment. For instance, commands working directory must be explicitely handled
+through env['PWD'] instead of relying on python working directory.
+"""
+import errno
+import optparse
+import os
+import re
+import subprocess
+import sys
+import time
+
+def has_subprocess_bug():
+ return getattr(subprocess, 'list2cmdline') and \
+ ( subprocess.list2cmdline(['']) == '' or \
+ subprocess.list2cmdline(['foo|bar']) == 'foo|bar')
+
+# Detect python bug 1634343: "subprocess swallows empty arguments under win32"
+# <http://sourceforge.net/tracker/index.php?func=detail&aid=1634343&group_id=5470&atid=105470>
+# Also detect: "[ 1710802 ] subprocess must escape redirection characters under win32"
+# <http://sourceforge.net/tracker/index.php?func=detail&aid=1710802&group_id=5470&atid=105470>
+if has_subprocess_bug():
+ import subprocess_fix
+ subprocess.list2cmdline = subprocess_fix.list2cmdline
+
+from sherrors import *
+
+class NonExitingParser(optparse.OptionParser):
+ """OptionParser default behaviour upon error is to print the error message and
+ exit. Raise a utility error instead.
+ """
+ def error(self, msg):
+ raise UtilityError(msg)
+
+#-------------------------------------------------------------------------------
+# set special builtin
+#-------------------------------------------------------------------------------
+OPT_SET = NonExitingParser(usage="set - set or unset options and positional parameters")
+OPT_SET.add_option( '-f', action='store_true', dest='has_f', default=False,
+ help='The shell shall disable pathname expansion.')
+OPT_SET.add_option('-e', action='store_true', dest='has_e', default=False,
+ help="""When this option is on, if a simple command fails for any of the \
+ reasons listed in Consequences of Shell Errors or returns an exit status \
+ value >0, and is not part of the compound list following a while, until, \
+ or if keyword, and is not a part of an AND or OR list, and is not a \
+ pipeline preceded by the ! reserved word, then the shell shall immediately \
+ exit.""")
+OPT_SET.add_option('-x', action='store_true', dest='has_x', default=False,
+ help="""The shell shall write to standard error a trace for each command \
+ after it expands the command and before it executes it. It is unspecified \
+ whether the command that turns tracing off is traced.""")
+
+def builtin_set(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ option, args = OPT_SET.parse_args(args)
+ env = interp.get_env()
+
+ if option.has_f:
+ env.set_opt('-f')
+ if option.has_e:
+ env.set_opt('-e')
+ if option.has_x:
+ env.set_opt('-x')
+ return 0
+
+#-------------------------------------------------------------------------------
+# shift special builtin
+#-------------------------------------------------------------------------------
+def builtin_shift(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ params = interp.get_env().get_positional_args()
+ if args:
+ try:
+ n = int(args[0])
+ if n > len(params):
+ raise ValueError()
+ except ValueError:
+ return 1
+ else:
+ n = 1
+
+ params[:n] = []
+ interp.get_env().set_positional_args(params)
+ return 0
+
+#-------------------------------------------------------------------------------
+# export special builtin
+#-------------------------------------------------------------------------------
+OPT_EXPORT = NonExitingParser(usage="set - set or unset options and positional parameters")
+OPT_EXPORT.add_option('-p', action='store_true', dest='has_p', default=False)
+
+def builtin_export(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ option, args = OPT_EXPORT.parse_args(args)
+ if option.has_p:
+ raise NotImplementedError()
+
+ for arg in args:
+ try:
+ name, value = arg.split('=', 1)
+ except ValueError:
+ name, value = arg, None
+ env = interp.get_env().export(name, value)
+
+ return 0
+
+#-------------------------------------------------------------------------------
+# return special builtin
+#-------------------------------------------------------------------------------
+def builtin_return(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+ res = 0
+ if args:
+ try:
+ res = int(args[0])
+ except ValueError:
+ res = 0
+ if not 0<=res<=255:
+ res = 0
+
+ # BUG: should be last executed command exit code
+ raise ReturnSignal(res)
+
+#-------------------------------------------------------------------------------
+# trap special builtin
+#-------------------------------------------------------------------------------
+def builtin_trap(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+ if len(args) < 2:
+ stderr.write('trap: usage: trap [[arg] signal_spec ...]\n')
+ return 2
+
+ action = args[0]
+ for sig in args[1:]:
+ try:
+ env.traps[sig] = action
+ except Exception as e:
+ stderr.write('trap: %s\n' % str(e))
+ return 0
+
+#-------------------------------------------------------------------------------
+# unset special builtin
+#-------------------------------------------------------------------------------
+OPT_UNSET = NonExitingParser("unset - unset values and attributes of variables and functions")
+OPT_UNSET.add_option( '-f', action='store_true', dest='has_f', default=False)
+OPT_UNSET.add_option( '-v', action='store_true', dest='has_v', default=False)
+
+def builtin_unset(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ option, args = OPT_UNSET.parse_args(args)
+
+ status = 0
+ env = interp.get_env()
+ for arg in args:
+ try:
+ if option.has_f:
+ env.remove_function(arg)
+ else:
+ del env[arg]
+ except KeyError:
+ pass
+ except VarAssignmentError:
+ status = 1
+
+ return status
+
+#-------------------------------------------------------------------------------
+# wait special builtin
+#-------------------------------------------------------------------------------
+def builtin_wait(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ return interp.wait([int(arg) for arg in args])
+
+#-------------------------------------------------------------------------------
+# cat utility
+#-------------------------------------------------------------------------------
+def utility_cat(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ if not args:
+ args = ['-']
+
+ status = 0
+ for arg in args:
+ if arg == '-':
+ data = stdin.read()
+ else:
+ path = os.path.join(env['PWD'], arg)
+ try:
+ f = file(path, 'rb')
+ try:
+ data = f.read()
+ finally:
+ f.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ status = 1
+ continue
+ stdout.write(data)
+ stdout.flush()
+ return status
+
+#-------------------------------------------------------------------------------
+# cd utility
+#-------------------------------------------------------------------------------
+OPT_CD = NonExitingParser("cd - change the working directory")
+
+def utility_cd(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ option, args = OPT_CD.parse_args(args)
+ env = interp.get_env()
+
+ directory = None
+ printdir = False
+ if not args:
+ home = env.get('HOME')
+ if home:
+ # Unspecified, do nothing
+ return 0
+ else:
+ directory = home
+ elif len(args)==1:
+ directory = args[0]
+ if directory=='-':
+ if 'OLDPWD' not in env:
+ raise UtilityError("OLDPWD not set")
+ printdir = True
+ directory = env['OLDPWD']
+ else:
+ raise UtilityError("too many arguments")
+
+ curpath = None
+ # Absolute directories will be handled correctly by the os.path.join call.
+ if not directory.startswith('.') and not directory.startswith('..'):
+ cdpaths = env.get('CDPATH', '.').split(';')
+ for cdpath in cdpaths:
+ p = os.path.join(cdpath, directory)
+ if os.path.isdir(p):
+ curpath = p
+ break
+
+ if curpath is None:
+ curpath = directory
+ curpath = os.path.join(env['PWD'], directory)
+
+ env['OLDPWD'] = env['PWD']
+ env['PWD'] = curpath
+ if printdir:
+ stdout.write('%s\n' % curpath)
+ return 0
+
+#-------------------------------------------------------------------------------
+# colon utility
+#-------------------------------------------------------------------------------
+def utility_colon(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+ return 0
+
+#-------------------------------------------------------------------------------
+# echo utility
+#-------------------------------------------------------------------------------
+def utility_echo(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ # Echo only takes arguments, no options. Use printf if you need fancy stuff.
+ output = ' '.join(args) + '\n'
+ stdout.write(output)
+ stdout.flush()
+ return 0
+
+#-------------------------------------------------------------------------------
+# egrep utility
+#-------------------------------------------------------------------------------
+# egrep is usually a shell script.
+# Unfortunately, pysh does not support shell scripts *with arguments* right now,
+# so the redirection is implemented here, assuming grep is available.
+def utility_egrep(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ return run_command('grep', ['-E'] + args, interp, env, stdin, stdout,
+ stderr, debugflags)
+
+#-------------------------------------------------------------------------------
+# env utility
+#-------------------------------------------------------------------------------
+def utility_env(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ if args and args[0]=='-i':
+ raise NotImplementedError('env: -i option is not implemented')
+
+ i = 0
+ for arg in args:
+ if '=' not in arg:
+ break
+ # Update the current environment
+ name, value = arg.split('=', 1)
+ env[name] = value
+ i += 1
+
+ if args[i:]:
+ # Find then execute the specified interpreter
+ utility = env.find_in_path(args[i])
+ if not utility:
+ return 127
+ args[i:i+1] = utility
+ name = args[i]
+ args = args[i+1:]
+ try:
+ return run_command(name, args, interp, env, stdin, stdout, stderr,
+ debugflags)
+ except UtilityError:
+ stderr.write('env: failed to execute %s' % ' '.join([name]+args))
+ return 126
+ else:
+ for pair in env.get_variables().iteritems():
+ stdout.write('%s=%s\n' % pair)
+ return 0
+
+#-------------------------------------------------------------------------------
+# exit utility
+#-------------------------------------------------------------------------------
+def utility_exit(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ res = None
+ if args:
+ try:
+ res = int(args[0])
+ except ValueError:
+ res = None
+ if not 0<=res<=255:
+ res = None
+
+ if res is None:
+ # BUG: should be last executed command exit code
+ res = 0
+
+ raise ExitSignal(res)
+
+#-------------------------------------------------------------------------------
+# fgrep utility
+#-------------------------------------------------------------------------------
+# see egrep
+def utility_fgrep(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ return run_command('grep', ['-F'] + args, interp, env, stdin, stdout,
+ stderr, debugflags)
+
+#-------------------------------------------------------------------------------
+# gunzip utility
+#-------------------------------------------------------------------------------
+# see egrep
+def utility_gunzip(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ return run_command('gzip', ['-d'] + args, interp, env, stdin, stdout,
+ stderr, debugflags)
+
+#-------------------------------------------------------------------------------
+# kill utility
+#-------------------------------------------------------------------------------
+def utility_kill(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ for arg in args:
+ pid = int(arg)
+ status = subprocess.call(['pskill', '/T', str(pid)],
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ # pskill is asynchronous, hence the stupid polling loop
+ while 1:
+ p = subprocess.Popen(['pslist', str(pid)],
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = p.communicate()[0]
+ if ('process %d was not' % pid) in output:
+ break
+ time.sleep(1)
+ return status
+
+#-------------------------------------------------------------------------------
+# mkdir utility
+#-------------------------------------------------------------------------------
+OPT_MKDIR = NonExitingParser("mkdir - make directories.")
+OPT_MKDIR.add_option('-p', action='store_true', dest='has_p', default=False)
+
+def utility_mkdir(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ # TODO: implement umask
+ # TODO: implement proper utility error report
+ option, args = OPT_MKDIR.parse_args(args)
+ for arg in args:
+ path = os.path.join(env['PWD'], arg)
+ if option.has_p:
+ try:
+ os.makedirs(path)
+ except IOError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ else:
+ os.mkdir(path)
+ return 0
+
+#-------------------------------------------------------------------------------
+# netstat utility
+#-------------------------------------------------------------------------------
+def utility_netstat(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ # Do you really expect me to implement netstat ?
+ # This empty form is enough for Mercurial tests since it's
+ # supposed to generate nothing upon success. Faking this test
+ # is not a big deal either.
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+ return 0
+
+#-------------------------------------------------------------------------------
+# pwd utility
+#-------------------------------------------------------------------------------
+OPT_PWD = NonExitingParser("pwd - return working directory name")
+OPT_PWD.add_option('-L', action='store_true', dest='has_L', default=True,
+ help="""If the PWD environment variable contains an absolute pathname of \
+ the current directory that does not contain the filenames dot or dot-dot, \
+ pwd shall write this pathname to standard output. Otherwise, the -L option \
+ shall behave as the -P option.""")
+OPT_PWD.add_option('-P', action='store_true', dest='has_L', default=False,
+ help="""The absolute pathname written shall not contain filenames that, in \
+ the context of the pathname, refer to files of type symbolic link.""")
+
+def utility_pwd(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ option, args = OPT_PWD.parse_args(args)
+ stdout.write('%s\n' % env['PWD'])
+ return 0
+
+#-------------------------------------------------------------------------------
+# printf utility
+#-------------------------------------------------------------------------------
+RE_UNESCAPE = re.compile(r'(\\x[a-zA-Z0-9]{2}|\\[0-7]{1,3}|\\.)')
+
+def utility_printf(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ def replace(m):
+ assert m.group()
+ g = m.group()[1:]
+ if g.startswith('x'):
+ return chr(int(g[1:], 16))
+ if len(g) <= 3 and len([c for c in g if c in '01234567']) == len(g):
+ # Yay, an octal number
+ return chr(int(g, 8))
+ return {
+ 'a': '\a',
+ 'b': '\b',
+ 'f': '\f',
+ 'n': '\n',
+ 'r': '\r',
+ 't': '\t',
+ 'v': '\v',
+ '\\': '\\',
+ }.get(g)
+
+ # Convert escape sequences
+ format = re.sub(RE_UNESCAPE, replace, args[0])
+ stdout.write(format % tuple(args[1:]))
+ return 0
+
+#-------------------------------------------------------------------------------
+# true utility
+#-------------------------------------------------------------------------------
+def utility_true(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+ return 0
+
+#-------------------------------------------------------------------------------
+# sed utility
+#-------------------------------------------------------------------------------
+RE_SED = re.compile(r'^s(.).*\1[a-zA-Z]*$')
+
+# cygwin sed fails with some expressions when they do not end with a single space.
+# see unit tests for details. Interestingly, the same expressions works perfectly
+# in cygwin shell.
+def utility_sed(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ # Scan pattern arguments and append a space if necessary
+ for i in range(len(args)):
+ if not RE_SED.search(args[i]):
+ continue
+ args[i] = args[i] + ' '
+
+ return run_command(name, args, interp, env, stdin, stdout,
+ stderr, debugflags)
+
+#-------------------------------------------------------------------------------
+# sleep utility
+#-------------------------------------------------------------------------------
+def utility_sleep(name, args, interp, env, stdin, stdout, stderr, debugflags):
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+ time.sleep(int(args[0]))
+ return 0
+
+#-------------------------------------------------------------------------------
+# sort utility
+#-------------------------------------------------------------------------------
+OPT_SORT = NonExitingParser("sort - sort, merge, or sequence check text files")
+
+def utility_sort(name, args, interp, env, stdin, stdout, stderr, debugflags):
+
+ def sort(path):
+ if path == '-':
+ lines = stdin.readlines()
+ else:
+ try:
+ f = file(path)
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+ except IOError as e:
+ stderr.write(str(e) + '\n')
+ return 1
+
+ if lines and lines[-1][-1]!='\n':
+ lines[-1] = lines[-1] + '\n'
+ return lines
+
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ option, args = OPT_SORT.parse_args(args)
+ alllines = []
+
+ if len(args)<=0:
+ args += ['-']
+
+ # Load all files lines
+ curdir = os.getcwd()
+ try:
+ os.chdir(env['PWD'])
+ for path in args:
+ alllines += sort(path)
+ finally:
+ os.chdir(curdir)
+
+ alllines.sort()
+ for line in alllines:
+ stdout.write(line)
+ return 0
+
+#-------------------------------------------------------------------------------
+# hg utility
+#-------------------------------------------------------------------------------
+
+hgcommands = [
+ 'add',
+ 'addremove',
+ 'commit', 'ci',
+ 'debugrename',
+ 'debugwalk',
+ 'falabala', # Dummy command used in a mercurial test
+ 'incoming',
+ 'locate',
+ 'pull',
+ 'push',
+ 'qinit',
+ 'remove', 'rm',
+ 'rename', 'mv',
+ 'revert',
+ 'showconfig',
+ 'status', 'st',
+ 'strip',
+ ]
+
+def rewriteslashes(name, args):
+ # Several hg commands output file paths, rewrite the separators
+ if len(args) > 1 and name.lower().endswith('python') \
+ and args[0].endswith('hg'):
+ for cmd in hgcommands:
+ if cmd in args[1:]:
+ return True
+
+ # svn output contains many paths with OS specific separators.
+ # Normalize these to unix paths.
+ base = os.path.basename(name)
+ if base.startswith('svn'):
+ return True
+
+ return False
+
+def rewritehg(output):
+ if not output:
+ return output
+ # Rewrite os specific messages
+ output = output.replace(': The system cannot find the file specified',
+ ': No such file or directory')
+ output = re.sub(': Access is denied.*$', ': Permission denied', output)
+ output = output.replace(': No connection could be made because the target machine actively refused it',
+ ': Connection refused')
+ return output
+
+
+def run_command(name, args, interp, env, stdin, stdout,
+ stderr, debugflags):
+ # Execute the command
+ if 'debug-utility' in debugflags:
+ print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
+
+ hgbin = interp.options().hgbinary
+ ishg = hgbin and ('hg' in name or args and 'hg' in args[0])
+ unixoutput = 'cygwin' in name or ishg
+
+ exec_env = env.get_variables()
+ try:
+ # BUG: comparing file descriptor is clearly not a reliable way to tell
+ # whether they point on the same underlying object. But in pysh limited
+ # scope this is usually right, we do not expect complicated redirections
+ # besides usual 2>&1.
+ # Still there is one case we have but cannot deal with is when stdout
+ # and stderr are redirected *by pysh caller*. This the reason for the
+ # --redirect pysh() option.
+ # Now, we want to know they are the same because we sometimes need to
+ # transform the command output, mostly remove CR-LF to ensure that
+ # command output is unix-like. Cygwin utilies are a special case because
+ # they explicitely set their output streams to binary mode, so we have
+ # nothing to do. For all others commands, we have to guess whether they
+ # are sending text data, in which case the transformation must be done.
+ # Again, the NUL character test is unreliable but should be enough for
+ # hg tests.
+ redirected = stdout.fileno()==stderr.fileno()
+ if not redirected:
+ p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
+ stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ else:
+ p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
+ stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ out, err = p.communicate()
+ except WindowsError as e:
+ raise UtilityError(str(e))
+
+ if not unixoutput:
+ def encode(s):
+ if '\0' in s:
+ return s
+ return s.replace('\r\n', '\n')
+ else:
+ encode = lambda s: s
+
+ if rewriteslashes(name, args):
+ encode1_ = encode
+ def encode(s):
+ s = encode1_(s)
+ s = s.replace('\\\\', '\\')
+ s = s.replace('\\', '/')
+ return s
+
+ if ishg:
+ encode2_ = encode
+ def encode(s):
+ return rewritehg(encode2_(s))
+
+ stdout.write(encode(out))
+ if not redirected:
+ stderr.write(encode(err))
+ return p.returncode
+
diff --git a/poky/bitbake/lib/bb/pysh/interp.py b/poky/bitbake/lib/bb/pysh/interp.py
new file mode 100644
index 000000000..d14ecf3c6
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/interp.py
@@ -0,0 +1,1367 @@
+# interp.py - shell interpreter for pysh.
+#
+# Copyright 2007 Patrick Mezard
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+"""Implement the shell interpreter.
+
+Most references are made to "The Open Group Base Specifications Issue 6".
+<http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html>
+"""
+# TODO: document the fact input streams must implement fileno() so Popen will work correctly.
+# it requires non-stdin stream to be implemented as files. Still to be tested...
+# DOC: pathsep is used in PATH instead of ':'. Clearly, there are path syntax issues here.
+# TODO: stop command execution upon error.
+# TODO: sort out the filename/io_number mess. It should be possible to use filenames only.
+# TODO: review subshell implementation
+# TODO: test environment cloning for non-special builtins
+# TODO: set -x should not rebuild commands from tokens, assignments/redirections are lost
+# TODO: unit test for variable assignment
+# TODO: test error management wrt error type/utility type
+# TODO: test for binary output everywhere
+# BUG: debug-parsing does not pass log file to PLY. Maybe a PLY upgrade is necessary.
+import base64
+import cPickle as pickle
+import errno
+import glob
+import os
+import re
+import subprocess
+import sys
+import tempfile
+
+try:
+ s = set()
+ del s
+except NameError:
+ from Set import Set as set
+
+import builtin
+from sherrors import *
+import pyshlex
+import pyshyacc
+
+def mappend(func, *args, **kargs):
+ """Like map but assume func returns a list. Returned lists are merged into
+ a single one.
+ """
+ return reduce(lambda a,b: a+b, map(func, *args, **kargs), [])
+
+class FileWrapper:
+ """File object wrapper to ease debugging.
+
+ Allow mode checking and implement file duplication through a simple
+ reference counting scheme. Not sure the latter is really useful since
+ only real file descriptors can be used.
+ """
+ def __init__(self, mode, file, close=True):
+ if mode not in ('r', 'w', 'a'):
+ raise IOError('invalid mode: %s' % mode)
+ self._mode = mode
+ self._close = close
+ if isinstance(file, FileWrapper):
+ if file._refcount[0] <= 0:
+ raise IOError(0, 'Error')
+ self._refcount = file._refcount
+ self._refcount[0] += 1
+ self._file = file._file
+ else:
+ self._refcount = [1]
+ self._file = file
+
+ def dup(self):
+ return FileWrapper(self._mode, self, self._close)
+
+ def fileno(self):
+ """fileno() should be only necessary for input streams."""
+ return self._file.fileno()
+
+ def read(self, size=-1):
+ if self._mode!='r':
+ raise IOError(0, 'Error')
+ return self._file.read(size)
+
+ def readlines(self, *args, **kwargs):
+ return self._file.readlines(*args, **kwargs)
+
+ def write(self, s):
+ if self._mode not in ('w', 'a'):
+ raise IOError(0, 'Error')
+ return self._file.write(s)
+
+ def flush(self):
+ self._file.flush()
+
+ def close(self):
+ if not self._refcount:
+ return
+ assert self._refcount[0] > 0
+
+ self._refcount[0] -= 1
+ if self._refcount[0] == 0:
+ self._mode = 'c'
+ if self._close:
+ self._file.close()
+ self._refcount = None
+
+ def mode(self):
+ return self._mode
+
+ def __getattr__(self, name):
+ if name == 'name':
+ self.name = getattr(self._file, name)
+ return self.name
+ else:
+ raise AttributeError(name)
+
+ def __del__(self):
+ self.close()
+
+
+def win32_open_devnull(mode):
+ return open('NUL', mode)
+
+
+class Redirections:
+ """Stores open files and their mapping to pseudo-sh file descriptor.
+ """
+ # BUG: redirections are not handled correctly: 1>&3 2>&3 3>&4 does
+ # not make 1 to redirect to 4
+ def __init__(self, stdin=None, stdout=None, stderr=None):
+ self._descriptors = {}
+ if stdin is not None:
+ self._add_descriptor(0, stdin)
+ if stdout is not None:
+ self._add_descriptor(1, stdout)
+ if stderr is not None:
+ self._add_descriptor(2, stderr)
+
+ def add_here_document(self, interp, name, content, io_number=None):
+ if io_number is None:
+ io_number = 0
+
+ if name==pyshlex.unquote_wordtree(name):
+ content = interp.expand_here_document(('TOKEN', content))
+
+ # Write document content in a temporary file
+ tmp = tempfile.TemporaryFile()
+ try:
+ tmp.write(content)
+ tmp.flush()
+ tmp.seek(0)
+ self._add_descriptor(io_number, FileWrapper('r', tmp))
+ except:
+ tmp.close()
+ raise
+
+ def add(self, interp, op, filename, io_number=None):
+ if op not in ('<', '>', '>|', '>>', '>&'):
+ # TODO: add descriptor duplication and here_documents
+ raise RedirectionError('Unsupported redirection operator "%s"' % op)
+
+ if io_number is not None:
+ io_number = int(io_number)
+
+ if (op == '>&' and filename.isdigit()) or filename=='-':
+ # No expansion for file descriptors, quote them if you want a filename
+ fullname = filename
+ else:
+ if filename.startswith('/'):
+ # TODO: win32 kludge
+ if filename=='/dev/null':
+ fullname = 'NUL'
+ else:
+ # TODO: handle absolute pathnames, they are unlikely to exist on the
+ # current platform (win32 for instance).
+ raise NotImplementedError()
+ else:
+ fullname = interp.expand_redirection(('TOKEN', filename))
+ if not fullname:
+ raise RedirectionError('%s: ambiguous redirect' % filename)
+ # Build absolute path based on PWD
+ fullname = os.path.join(interp.get_env()['PWD'], fullname)
+
+ if op=='<':
+ return self._add_input_redirection(interp, fullname, io_number)
+ elif op in ('>', '>|'):
+ clobber = ('>|'==op)
+ return self._add_output_redirection(interp, fullname, io_number, clobber)
+ elif op=='>>':
+ return self._add_output_appending(interp, fullname, io_number)
+ elif op=='>&':
+ return self._dup_output_descriptor(fullname, io_number)
+
+ def close(self):
+ if self._descriptors is not None:
+ for desc in self._descriptors.itervalues():
+ desc.flush()
+ desc.close()
+ self._descriptors = None
+
+ def stdin(self):
+ return self._descriptors[0]
+
+ def stdout(self):
+ return self._descriptors[1]
+
+ def stderr(self):
+ return self._descriptors[2]
+
+ def clone(self):
+ clone = Redirections()
+ for desc, fileobj in self._descriptors.iteritems():
+ clone._descriptors[desc] = fileobj.dup()
+ return clone
+
+ def _add_output_redirection(self, interp, filename, io_number, clobber):
+ if io_number is None:
+ # io_number default to standard output
+ io_number = 1
+
+ if not clobber and interp.get_env().has_opt('-C') and os.path.isfile(filename):
+ # File already exist in no-clobber mode, bail out
+ raise RedirectionError('File "%s" already exists' % filename)
+
+ # Open and register
+ self._add_file_descriptor(io_number, filename, 'w')
+
+ def _add_output_appending(self, interp, filename, io_number):
+ if io_number is None:
+ io_number = 1
+ self._add_file_descriptor(io_number, filename, 'a')
+
+ def _add_input_redirection(self, interp, filename, io_number):
+ if io_number is None:
+ io_number = 0
+ self._add_file_descriptor(io_number, filename, 'r')
+
+ def _add_file_descriptor(self, io_number, filename, mode):
+ try:
+ if filename.startswith('/'):
+ if filename=='/dev/null':
+ f = win32_open_devnull(mode+'b')
+ else:
+ # TODO: handle absolute pathnames, they are unlikely to exist on the
+ # current platform (win32 for instance).
+ raise NotImplementedError('cannot open absolute path %s' % repr(filename))
+ else:
+ f = file(filename, mode+'b')
+ except IOError as e:
+ raise RedirectionError(str(e))
+
+ wrapper = None
+ try:
+ wrapper = FileWrapper(mode, f)
+ f = None
+ self._add_descriptor(io_number, wrapper)
+ except:
+ if f: f.close()
+ if wrapper: wrapper.close()
+ raise
+
+ def _dup_output_descriptor(self, source_fd, dest_fd):
+ if source_fd is None:
+ source_fd = 1
+ self._dup_file_descriptor(source_fd, dest_fd, 'w')
+
+ def _dup_file_descriptor(self, source_fd, dest_fd, mode):
+ source_fd = int(source_fd)
+ if source_fd not in self._descriptors:
+ raise RedirectionError('"%s" is not a valid file descriptor' % str(source_fd))
+ source = self._descriptors[source_fd]
+
+ if source.mode()!=mode:
+ raise RedirectionError('Descriptor %s cannot be duplicated in mode "%s"' % (str(source), mode))
+
+ if dest_fd=='-':
+ # Close the source descriptor
+ del self._descriptors[source_fd]
+ source.close()
+ else:
+ dest_fd = int(dest_fd)
+ if dest_fd not in self._descriptors:
+ raise RedirectionError('Cannot replace file descriptor %s' % str(dest_fd))
+
+ dest = self._descriptors[dest_fd]
+ if dest.mode()!=mode:
+ raise RedirectionError('Descriptor %s cannot be cannot be redirected in mode "%s"' % (str(dest), mode))
+
+ self._descriptors[dest_fd] = source.dup()
+ dest.close()
+
+ def _add_descriptor(self, io_number, file):
+ io_number = int(io_number)
+
+ if io_number in self._descriptors:
+ # Close the current descriptor
+ d = self._descriptors[io_number]
+ del self._descriptors[io_number]
+ d.close()
+
+ self._descriptors[io_number] = file
+
+ def __str__(self):
+ names = [('%d=%r' % (k, getattr(v, 'name', None))) for k,v
+ in self._descriptors.iteritems()]
+ names = ','.join(names)
+ return 'Redirections(%s)' % names
+
+ def __del__(self):
+ self.close()
+
+def cygwin_to_windows_path(path):
+ """Turn /cygdrive/c/foo into c:/foo, or return path if it
+ is not a cygwin path.
+ """
+ if not path.startswith('/cygdrive/'):
+ return path
+ path = path[len('/cygdrive/'):]
+ path = path[:1] + ':' + path[1:]
+ return path
+
+def win32_to_unix_path(path):
+ if path is not None:
+ path = path.replace('\\', '/')
+ return path
+
+_RE_SHEBANG = re.compile(r'^\#!\s?([^\s]+)(?:\s([^\s]+))?')
+_SHEBANG_CMDS = {
+ '/usr/bin/env': 'env',
+ '/bin/sh': 'pysh',
+ 'python': 'python',
+}
+
+def resolve_shebang(path, ignoreshell=False):
+ """Return a list of arguments as shebang interpreter call or an empty list
+ if path does not refer to an executable script.
+ See <http://www.opengroup.org/austin/docs/austin_51r2.txt>.
+
+ ignoreshell - set to True to ignore sh shebangs. Return an empty list instead.
+ """
+ try:
+ f = file(path)
+ try:
+ # At most 80 characters in the first line
+ header = f.read(80).splitlines()[0]
+ finally:
+ f.close()
+
+ m = _RE_SHEBANG.search(header)
+ if not m:
+ return []
+ cmd, arg = m.group(1,2)
+ if os.path.isfile(cmd):
+ # Keep this one, the hg script for instance contains a weird windows
+ # shebang referencing the current python install.
+ cmdfile = os.path.basename(cmd).lower()
+ if cmdfile == 'python.exe':
+ cmd = 'python'
+ pass
+ elif cmd not in _SHEBANG_CMDS:
+ raise CommandNotFound('Unknown interpreter "%s" referenced in '\
+ 'shebang' % header)
+ cmd = _SHEBANG_CMDS.get(cmd)
+ if cmd is None or (ignoreshell and cmd == 'pysh'):
+ return []
+ if arg is None:
+ return [cmd, win32_to_unix_path(path)]
+ return [cmd, arg, win32_to_unix_path(path)]
+ except IOError as e:
+ if e.errno!=errno.ENOENT and \
+ (e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM
+ raise
+ return []
+
+def win32_find_in_path(name, path):
+ if isinstance(path, str):
+ path = path.split(os.pathsep)
+
+ exts = os.environ.get('PATHEXT', '').lower().split(os.pathsep)
+ for p in path:
+ p_name = os.path.join(p, name)
+
+ prefix = resolve_shebang(p_name)
+ if prefix:
+ return prefix
+
+ for ext in exts:
+ p_name_ext = p_name + ext
+ if os.path.exists(p_name_ext):
+ return [win32_to_unix_path(p_name_ext)]
+ return []
+
+class Traps(dict):
+ def __setitem__(self, key, value):
+ if key not in ('EXIT',):
+ raise NotImplementedError()
+ super(Traps, self).__setitem__(key, value)
+
+# IFS white spaces character class
+_IFS_WHITESPACES = (' ', '\t', '\n')
+
+class Environment:
+ """Environment holds environment variables, export table, function
+ definitions and whatever is defined in 2.12 "Shell Execution Environment",
+ redirection excepted.
+ """
+ def __init__(self, pwd):
+ self._opt = set() #Shell options
+
+ self._functions = {}
+ self._env = {'?': '0', '#': '0'}
+ self._exported = set([
+ 'HOME', 'IFS', 'PATH'
+ ])
+
+ # Set environment vars with side-effects
+ self._ifs_ws = None # Set of IFS whitespace characters
+ self._ifs_re = None # Regular expression used to split between words using IFS classes
+ self['IFS'] = ''.join(_IFS_WHITESPACES) #Default environment values
+ self['PWD'] = pwd
+ self.traps = Traps()
+
+ def clone(self, subshell=False):
+ env = Environment(self['PWD'])
+ env._opt = set(self._opt)
+ for k,v in self.get_variables().iteritems():
+ if k in self._exported:
+ env.export(k,v)
+ elif subshell:
+ env[k] = v
+
+ if subshell:
+ env._functions = dict(self._functions)
+
+ return env
+
+ def __getitem__(self, key):
+ if key in ('@', '*', '-', '$'):
+ raise NotImplementedError('%s is not implemented' % repr(key))
+ return self._env[key]
+
+ def get(self, key, defval=None):
+ try:
+ return self[key]
+ except KeyError:
+ return defval
+
+ def __setitem__(self, key, value):
+ if key=='IFS':
+ # Update the whitespace/non-whitespace classes
+ self._update_ifs(value)
+ elif key=='PWD':
+ pwd = os.path.abspath(value)
+ if not os.path.isdir(pwd):
+ raise VarAssignmentError('Invalid directory %s' % value)
+ value = pwd
+ elif key in ('?', '!'):
+ value = str(int(value))
+ self._env[key] = value
+
+ def __delitem__(self, key):
+ if key in ('IFS', 'PWD', '?'):
+ raise VarAssignmentError('%s cannot be unset' % key)
+ del self._env[key]
+
+ def __contains__(self, item):
+ return item in self._env
+
+ def set_positional_args(self, args):
+ """Set the content of 'args' as positional argument from 1 to len(args).
+ Return previous argument as a list of strings.
+ """
+ # Save and remove previous arguments
+ prevargs = []
+ for i in range(int(self._env['#'])):
+ i = str(i+1)
+ prevargs.append(self._env[i])
+ del self._env[i]
+ self._env['#'] = '0'
+
+ #Set new ones
+ for i,arg in enumerate(args):
+ self._env[str(i+1)] = str(arg)
+ self._env['#'] = str(len(args))
+
+ return prevargs
+
+ def get_positional_args(self):
+ return [self._env[str(i+1)] for i in range(int(self._env['#']))]
+
+ def get_variables(self):
+ return dict(self._env)
+
+ def export(self, key, value=None):
+ if value is not None:
+ self[key] = value
+ self._exported.add(key)
+
+ def get_exported(self):
+ return [(k,self._env.get(k)) for k in self._exported]
+
+ def split_fields(self, word):
+ if not self._ifs_ws or not word:
+ return [word]
+ return re.split(self._ifs_re, word)
+
+ def _update_ifs(self, value):
+ """Update the split_fields related variables when IFS character set is
+ changed.
+ """
+ # TODO: handle NULL IFS
+
+ # Separate characters in whitespace and non-whitespace
+ chars = set(value)
+ ws = [c for c in chars if c in _IFS_WHITESPACES]
+ nws = [c for c in chars if c not in _IFS_WHITESPACES]
+
+ # Keep whitespaces in a string for left and right stripping
+ self._ifs_ws = ''.join(ws)
+
+ # Build a regexp to split fields
+ trailing = '[' + ''.join([re.escape(c) for c in ws]) + ']'
+ if nws:
+ # First, the single non-whitespace occurence.
+ nws = '[' + ''.join([re.escape(c) for c in nws]) + ']'
+ nws = '(?:' + trailing + '*' + nws + trailing + '*' + '|' + trailing + '+)'
+ else:
+ # Then mix all parts with quantifiers
+ nws = trailing + '+'
+ self._ifs_re = re.compile(nws)
+
+ def has_opt(self, opt, val=None):
+ return (opt, val) in self._opt
+
+ def set_opt(self, opt, val=None):
+ self._opt.add((opt, val))
+
+ def find_in_path(self, name, pwd=False):
+ path = self._env.get('PATH', '').split(os.pathsep)
+ if pwd:
+ path[:0] = [self['PWD']]
+ if os.name == 'nt':
+ return win32_find_in_path(name, self._env.get('PATH', ''))
+ else:
+ raise NotImplementedError()
+
+ def define_function(self, name, body):
+ if not is_name(name):
+ raise ShellSyntaxError('%s is not a valid function name' % repr(name))
+ self._functions[name] = body
+
+ def remove_function(self, name):
+ del self._functions[name]
+
+ def is_function(self, name):
+ return name in self._functions
+
+ def get_function(self, name):
+ return self._functions.get(name)
+
+
+name_charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+name_charset = dict(zip(name_charset,name_charset))
+
+def match_name(s):
+ """Return the length in characters of the longest prefix made of name
+ allowed characters in s.
+ """
+ for i,c in enumerate(s):
+ if c not in name_charset:
+ return s[:i]
+ return s
+
+def is_name(s):
+ return len([c for c in s if c not in name_charset])<=0
+
+def is_special_param(c):
+ return len(c)==1 and c in ('@','*','#','?','-','$','!','0')
+
+def utility_not_implemented(name, *args, **kwargs):
+ raise NotImplementedError('%s utility is not implemented' % name)
+
+
+class Utility:
+ """Define utilities properties:
+ func -- utility callable. See builtin module for utility samples.
+ is_special -- see XCU 2.8.
+ """
+ def __init__(self, func, is_special=0):
+ self.func = func
+ self.is_special = bool(is_special)
+
+
+def encodeargs(args):
+ def encodearg(s):
+ lines = base64.encodestring(s)
+ lines = [l.splitlines()[0] for l in lines]
+ return ''.join(lines)
+
+ s = pickle.dumps(args)
+ return encodearg(s)
+
+def decodeargs(s):
+ s = base64.decodestring(s)
+ return pickle.loads(s)
+
+
+class GlobError(Exception):
+ pass
+
+class Options:
+ def __init__(self):
+ # True if Mercurial operates with binary streams
+ self.hgbinary = True
+
+class Interpreter:
+ # Implementation is very basic: the execute() method just makes a DFS on the
+ # AST and execute nodes one by one. Nodes are tuple (name,obj) where name
+ # is a string identifier and obj the AST element returned by the parser.
+ #
+ # Handler are named after the node identifiers.
+ # TODO: check node names and remove the switch in execute with some
+ # dynamic getattr() call to find node handlers.
+ """Shell interpreter.
+
+ The following debugging flags can be passed:
+ debug-parsing - enable PLY debugging.
+ debug-tree - print the generated AST.
+ debug-cmd - trace command execution before word expansion, plus exit status.
+ debug-utility - trace utility execution.
+ """
+
+ # List supported commands.
+ COMMANDS = {
+ 'cat': Utility(builtin.utility_cat,),
+ 'cd': Utility(builtin.utility_cd,),
+ ':': Utility(builtin.utility_colon,),
+ 'echo': Utility(builtin.utility_echo),
+ 'env': Utility(builtin.utility_env),
+ 'exit': Utility(builtin.utility_exit),
+ 'export': Utility(builtin.builtin_export, is_special=1),
+ 'egrep': Utility(builtin.utility_egrep),
+ 'fgrep': Utility(builtin.utility_fgrep),
+ 'gunzip': Utility(builtin.utility_gunzip),
+ 'kill': Utility(builtin.utility_kill),
+ 'mkdir': Utility(builtin.utility_mkdir),
+ 'netstat': Utility(builtin.utility_netstat),
+ 'printf': Utility(builtin.utility_printf),
+ 'pwd': Utility(builtin.utility_pwd),
+ 'return': Utility(builtin.builtin_return, is_special=1),
+ 'sed': Utility(builtin.utility_sed,),
+ 'set': Utility(builtin.builtin_set,),
+ 'shift': Utility(builtin.builtin_shift,),
+ 'sleep': Utility(builtin.utility_sleep,),
+ 'sort': Utility(builtin.utility_sort,),
+ 'trap': Utility(builtin.builtin_trap, is_special=1),
+ 'true': Utility(builtin.utility_true),
+ 'unset': Utility(builtin.builtin_unset, is_special=1),
+ 'wait': Utility(builtin.builtin_wait, is_special=1),
+ }
+
+ def __init__(self, pwd, debugflags = [], env=None, redirs=None, stdin=None,
+ stdout=None, stderr=None, opts=Options()):
+ self._env = env
+ if self._env is None:
+ self._env = Environment(pwd)
+ self._children = {}
+
+ self._redirs = redirs
+ self._close_redirs = False
+
+ if self._redirs is None:
+ if stdin is None:
+ stdin = sys.stdin
+ if stdout is None:
+ stdout = sys.stdout
+ if stderr is None:
+ stderr = sys.stderr
+ stdin = FileWrapper('r', stdin, False)
+ stdout = FileWrapper('w', stdout, False)
+ stderr = FileWrapper('w', stderr, False)
+ self._redirs = Redirections(stdin, stdout, stderr)
+ self._close_redirs = True
+
+ self._debugflags = list(debugflags)
+ self._logfile = sys.stderr
+ self._options = opts
+
+ def close(self):
+ """Must be called when the interpreter is no longer used."""
+ script = self._env.traps.get('EXIT')
+ if script:
+ try:
+ self.execute_script(script=script)
+ except:
+ pass
+
+ if self._redirs is not None and self._close_redirs:
+ self._redirs.close()
+ self._redirs = None
+
+ def log(self, s):
+ self._logfile.write(s)
+ self._logfile.flush()
+
+ def __getitem__(self, key):
+ return self._env[key]
+
+ def __setitem__(self, key, value):
+ self._env[key] = value
+
+ def options(self):
+ return self._options
+
+ def redirect(self, redirs, ios):
+ def add_redir(io):
+ if isinstance(io, pyshyacc.IORedirect):
+ redirs.add(self, io.op, io.filename, io.io_number)
+ else:
+ redirs.add_here_document(self, io.name, io.content, io.io_number)
+
+ map(add_redir, ios)
+ return redirs
+
+ def execute_script(self, script=None, ast=None, sourced=False,
+ scriptpath=None):
+ """If script is not None, parse the input. Otherwise takes the supplied
+ AST. Then execute the AST.
+ Return the script exit status.
+ """
+ try:
+ if scriptpath is not None:
+ self._env['0'] = os.path.abspath(scriptpath)
+
+ if script is not None:
+ debug_parsing = ('debug-parsing' in self._debugflags)
+ cmds, script = pyshyacc.parse(script, True, debug_parsing)
+ if 'debug-tree' in self._debugflags:
+ pyshyacc.print_commands(cmds, self._logfile)
+ self._logfile.flush()
+ else:
+ cmds, script = ast, ''
+
+ status = 0
+ for cmd in cmds:
+ try:
+ status = self.execute(cmd)
+ except ExitSignal as e:
+ if sourced:
+ raise
+ status = int(e.args[0])
+ return status
+ except ShellError:
+ self._env['?'] = 1
+ raise
+ if 'debug-utility' in self._debugflags or 'debug-cmd' in self._debugflags:
+ self.log('returncode ' + str(status)+ '\n')
+ return status
+ except CommandNotFound as e:
+ print >>self._redirs.stderr, str(e)
+ self._redirs.stderr.flush()
+ # Command not found by non-interactive shell
+ # return 127
+ raise
+ except RedirectionError as e:
+ # TODO: should be handled depending on the utility status
+ print >>self._redirs.stderr, str(e)
+ self._redirs.stderr.flush()
+ # Command not found by non-interactive shell
+ # return 127
+ raise
+
+ def dotcommand(self, env, args):
+ if len(args) < 1:
+ raise ShellError('. expects at least one argument')
+ path = args[0]
+ if '/' not in path:
+ found = env.find_in_path(args[0], True)
+ if found:
+ path = found[0]
+ script = file(path).read()
+ return self.execute_script(script=script, sourced=True)
+
+ def execute(self, token, redirs=None):
+ """Execute and AST subtree with supplied redirections overriding default
+ interpreter ones.
+ Return the exit status.
+ """
+ if not token:
+ return 0
+
+ if redirs is None:
+ redirs = self._redirs
+
+ if isinstance(token, list):
+ # Commands sequence
+ res = 0
+ for t in token:
+ res = self.execute(t, redirs)
+ return res
+
+ type, value = token
+ status = 0
+ if type=='simple_command':
+ redirs_copy = redirs.clone()
+ try:
+ # TODO: define and handle command return values
+ # TODO: implement set -e
+ status = self._execute_simple_command(value, redirs_copy)
+ finally:
+ redirs_copy.close()
+ elif type=='pipeline':
+ status = self._execute_pipeline(value, redirs)
+ elif type=='and_or':
+ status = self._execute_and_or(value, redirs)
+ elif type=='for_clause':
+ status = self._execute_for_clause(value, redirs)
+ elif type=='while_clause':
+ status = self._execute_while_clause(value, redirs)
+ elif type=='function_definition':
+ status = self._execute_function_definition(value, redirs)
+ elif type=='brace_group':
+ status = self._execute_brace_group(value, redirs)
+ elif type=='if_clause':
+ status = self._execute_if_clause(value, redirs)
+ elif type=='subshell':
+ status = self.subshell(ast=value.cmds, redirs=redirs)
+ elif type=='async':
+ status = self._asynclist(value)
+ elif type=='redirect_list':
+ redirs_copy = self.redirect(redirs.clone(), value.redirs)
+ try:
+ status = self.execute(value.cmd, redirs_copy)
+ finally:
+ redirs_copy.close()
+ else:
+ raise NotImplementedError('Unsupported token type ' + type)
+
+ if status < 0:
+ status = 255
+ return status
+
+ def _execute_if_clause(self, if_clause, redirs):
+ cond_status = self.execute(if_clause.cond, redirs)
+ if cond_status==0:
+ return self.execute(if_clause.if_cmds, redirs)
+ else:
+ return self.execute(if_clause.else_cmds, redirs)
+
+ def _execute_brace_group(self, group, redirs):
+ status = 0
+ for cmd in group.cmds:
+ status = self.execute(cmd, redirs)
+ return status
+
+ def _execute_function_definition(self, fundef, redirs):
+ self._env.define_function(fundef.name, fundef.body)
+ return 0
+
+ def _execute_while_clause(self, while_clause, redirs):
+ status = 0
+ while 1:
+ cond_status = 0
+ for cond in while_clause.condition:
+ cond_status = self.execute(cond, redirs)
+
+ if cond_status:
+ break
+
+ for cmd in while_clause.cmds:
+ status = self.execute(cmd, redirs)
+
+ return status
+
+ def _execute_for_clause(self, for_clause, redirs):
+ if not is_name(for_clause.name):
+ raise ShellSyntaxError('%s is not a valid name' % repr(for_clause.name))
+ items = mappend(self.expand_token, for_clause.items)
+
+ status = 0
+ for item in items:
+ self._env[for_clause.name] = item
+ for cmd in for_clause.cmds:
+ status = self.execute(cmd, redirs)
+ return status
+
+ def _execute_and_or(self, or_and, redirs):
+ res = self.execute(or_and.left, redirs)
+ if (or_and.op=='&&' and res==0) or (or_and.op!='&&' and res!=0):
+ res = self.execute(or_and.right, redirs)
+ return res
+
+ def _execute_pipeline(self, pipeline, redirs):
+ if len(pipeline.commands)==1:
+ status = self.execute(pipeline.commands[0], redirs)
+ else:
+ # Execute all commands one after the other
+ status = 0
+ inpath, outpath = None, None
+ try:
+ # Commands inputs and outputs cannot really be plugged as done
+ # by a real shell. Run commands sequentially and chain their
+ # input/output throught temporary files.
+ tmpfd, inpath = tempfile.mkstemp()
+ os.close(tmpfd)
+ tmpfd, outpath = tempfile.mkstemp()
+ os.close(tmpfd)
+
+ inpath = win32_to_unix_path(inpath)
+ outpath = win32_to_unix_path(outpath)
+
+ for i, cmd in enumerate(pipeline.commands):
+ call_redirs = redirs.clone()
+ try:
+ if i!=0:
+ call_redirs.add(self, '<', inpath)
+ if i!=len(pipeline.commands)-1:
+ call_redirs.add(self, '>', outpath)
+
+ status = self.execute(cmd, call_redirs)
+
+ # Chain inputs/outputs
+ inpath, outpath = outpath, inpath
+ finally:
+ call_redirs.close()
+ finally:
+ if inpath: os.remove(inpath)
+ if outpath: os.remove(outpath)
+
+ if pipeline.reverse_status:
+ status = int(not status)
+ self._env['?'] = status
+ return status
+
+ def _execute_function(self, name, args, interp, env, stdin, stdout, stderr, *others):
+ assert interp is self
+
+ func = env.get_function(name)
+ #Set positional parameters
+ prevargs = None
+ try:
+ prevargs = env.set_positional_args(args)
+ try:
+ redirs = Redirections(stdin.dup(), stdout.dup(), stderr.dup())
+ try:
+ status = self.execute(func, redirs)
+ finally:
+ redirs.close()
+ except ReturnSignal as e:
+ status = int(e.args[0])
+ env['?'] = status
+ return status
+ finally:
+ #Reset positional parameters
+ if prevargs is not None:
+ env.set_positional_args(prevargs)
+
+ def _execute_simple_command(self, token, redirs):
+ """Can raise ReturnSignal when return builtin is called, ExitSignal when
+ exit is called, and other shell exceptions upon builtin failures.
+ """
+ debug_command = 'debug-cmd' in self._debugflags
+ if debug_command:
+ self.log('word' + repr(token.words) + '\n')
+ self.log('assigns' + repr(token.assigns) + '\n')
+ self.log('redirs' + repr(token.redirs) + '\n')
+
+ is_special = None
+ env = self._env
+
+ try:
+ # Word expansion
+ args = []
+ for word in token.words:
+ args += self.expand_token(word)
+ if is_special is None and args:
+ is_special = env.is_function(args[0]) or \
+ (args[0] in self.COMMANDS and self.COMMANDS[args[0]].is_special)
+
+ if debug_command:
+ self.log('_execute_simple_command' + str(args) + '\n')
+
+ if not args:
+ # Redirections happen is a subshell
+ redirs = redirs.clone()
+ elif not is_special:
+ env = self._env.clone()
+
+ # Redirections
+ self.redirect(redirs, token.redirs)
+
+ # Variables assignments
+ res = 0
+ for type,(k,v) in token.assigns:
+ status, expanded = self.expand_variable((k,v))
+ if status is not None:
+ res = status
+ if args:
+ env.export(k, expanded)
+ else:
+ env[k] = expanded
+
+ if args and args[0] in ('.', 'source'):
+ res = self.dotcommand(env, args[1:])
+ elif args:
+ if args[0] in self.COMMANDS:
+ command = self.COMMANDS[args[0]]
+ elif env.is_function(args[0]):
+ command = Utility(self._execute_function, is_special=True)
+ else:
+ if not '/' in args[0].replace('\\', '/'):
+ cmd = env.find_in_path(args[0])
+ if not cmd:
+ # TODO: test error code on unknown command => 127
+ raise CommandNotFound('Unknown command: "%s"' % args[0])
+ else:
+ # Handle commands like '/cygdrive/c/foo.bat'
+ cmd = cygwin_to_windows_path(args[0])
+ if not os.path.exists(cmd):
+ raise CommandNotFound('%s: No such file or directory' % args[0])
+ shebang = resolve_shebang(cmd)
+ if shebang:
+ cmd = shebang
+ else:
+ cmd = [cmd]
+ args[0:1] = cmd
+ command = Utility(builtin.run_command)
+
+ # Command execution
+ if 'debug-cmd' in self._debugflags:
+ self.log('redirections ' + str(redirs) + '\n')
+
+ res = command.func(args[0], args[1:], self, env,
+ redirs.stdin(), redirs.stdout(),
+ redirs.stderr(), self._debugflags)
+
+ if self._env.has_opt('-x'):
+ # Trace command execution in shell environment
+ # BUG: would be hard to reproduce a real shell behaviour since
+ # the AST is not annotated with source lines/tokens.
+ self._redirs.stdout().write(' '.join(args))
+
+ except ReturnSignal:
+ raise
+ except ShellError as e:
+ if is_special or isinstance(e, (ExitSignal,
+ ShellSyntaxError, ExpansionError)):
+ raise e
+ self._redirs.stderr().write(str(e)+'\n')
+ return 1
+
+ return res
+
+ def expand_token(self, word):
+ """Expand a word as specified in [2.6 Word Expansions]. Return the list
+ of expanded words.
+ """
+ status, wtrees = self._expand_word(word)
+ return map(pyshlex.wordtree_as_string, wtrees)
+
+ def expand_variable(self, word):
+ """Return a status code (or None if no command expansion occurred)
+ and a single word.
+ """
+ status, wtrees = self._expand_word(word, pathname=False, split=False)
+ words = map(pyshlex.wordtree_as_string, wtrees)
+ assert len(words)==1
+ return status, words[0]
+
+ def expand_here_document(self, word):
+ """Return the expanded document as a single word. The here document is
+ assumed to be unquoted.
+ """
+ status, wtrees = self._expand_word(word, pathname=False,
+ split=False, here_document=True)
+ words = map(pyshlex.wordtree_as_string, wtrees)
+ assert len(words)==1
+ return words[0]
+
+ def expand_redirection(self, word):
+ """Return a single word."""
+ return self.expand_variable(word)[1]
+
+ def get_env(self):
+ return self._env
+
+ def _expand_word(self, token, pathname=True, split=True, here_document=False):
+ wtree = pyshlex.make_wordtree(token[1], here_document=here_document)
+
+ # TODO: implement tilde expansion
+ def expand(wtree):
+ """Return a pseudo wordtree: the tree or its subelements can be empty
+ lists when no value result from the expansion.
+ """
+ status = None
+ for part in wtree:
+ if not isinstance(part, list):
+ continue
+ if part[0]in ("'", '\\'):
+ continue
+ elif part[0] in ('`', '$('):
+ status, result = self._expand_command(part)
+ part[:] = result
+ elif part[0] in ('$', '${'):
+ part[:] = self._expand_parameter(part, wtree[0]=='"', split)
+ elif part[0] in ('', '"'):
+ status, result = expand(part)
+ part[:] = result
+ else:
+ raise NotImplementedError('%s expansion is not implemented'
+ % part[0])
+ # [] is returned when an expansion result in no-field,
+ # like an empty $@
+ wtree = [p for p in wtree if p != []]
+ if len(wtree) < 3:
+ return status, []
+ return status, wtree
+
+ status, wtree = expand(wtree)
+ if len(wtree) == 0:
+ return status, wtree
+ wtree = pyshlex.normalize_wordtree(wtree)
+
+ if split:
+ wtrees = self._split_fields(wtree)
+ else:
+ wtrees = [wtree]
+
+ if pathname:
+ wtrees = mappend(self._expand_pathname, wtrees)
+
+ wtrees = map(self._remove_quotes, wtrees)
+ return status, wtrees
+
+ def _expand_command(self, wtree):
+ # BUG: there is something to do with backslashes and quoted
+ # characters here
+ command = pyshlex.wordtree_as_string(wtree[1:-1])
+ status, output = self.subshell_output(command)
+ return status, ['', output, '']
+
+ def _expand_parameter(self, wtree, quoted=False, split=False):
+ """Return a valid wtree or an empty list when no parameter results."""
+ # Get the parameter name
+ # TODO: implement weird expansion rules with ':'
+ name = pyshlex.wordtree_as_string(wtree[1:-1])
+ if not is_name(name) and not is_special_param(name):
+ raise ExpansionError('Bad substitution "%s"' % name)
+ # TODO: implement special parameters
+ if name in ('@', '*'):
+ args = self._env.get_positional_args()
+ if len(args) == 0:
+ return []
+ if len(args)<2:
+ return ['', ''.join(args), '']
+
+ sep = self._env.get('IFS', '')[:1]
+ if split and quoted and name=='@':
+ # Introduce a new token to tell the caller that these parameters
+ # cause a split as specified in 2.5.2
+ return ['@'] + args + ['']
+ else:
+ return ['', sep.join(args), '']
+
+ return ['', self._env.get(name, ''), '']
+
+ def _split_fields(self, wtree):
+ def is_empty(split):
+ return split==['', '', '']
+
+ def split_positional(quoted):
+ # Return a list of wtree split according positional parameters rules.
+ # All remaining '@' groups are removed.
+ assert quoted[0]=='"'
+
+ splits = [[]]
+ for part in quoted:
+ if not isinstance(part, list) or part[0]!='@':
+ splits[-1].append(part)
+ else:
+ # Empty or single argument list were dealt with already
+ assert len(part)>3
+ # First argument must join with the beginning part of the original word
+ splits[-1].append(part[1])
+ # Create double-quotes expressions for every argument after the first
+ for arg in part[2:-1]:
+ splits[-1].append('"')
+ splits.append(['"', arg])
+ return splits
+
+ # At this point, all expansions but pathnames have occured. Only quoted
+ # and positional sequences remain. Thus, all candidates for field splitting
+ # are in the tree root, or are positional splits ('@') and lie in root
+ # children.
+ if not wtree or wtree[0] not in ('', '"'):
+ # The whole token is quoted or empty, nothing to split
+ return [wtree]
+
+ if wtree[0]=='"':
+ wtree = ['', wtree, '']
+
+ result = [['', '']]
+ for part in wtree[1:-1]:
+ if isinstance(part, list):
+ if part[0]=='"':
+ splits = split_positional(part)
+ if len(splits)<=1:
+ result[-1] += [part, '']
+ else:
+ # Terminate the current split
+ result[-1] += [splits[0], '']
+ result += splits[1:-1]
+ # Create a new split
+ result += [['', splits[-1], '']]
+ else:
+ result[-1] += [part, '']
+ else:
+ splits = self._env.split_fields(part)
+ if len(splits)<=1:
+ # No split
+ result[-1][-1] += part
+ else:
+ # Terminate the current resulting part and create a new one
+ result[-1][-1] += splits[0]
+ result[-1].append('')
+ result += [['', r, ''] for r in splits[1:-1]]
+ result += [['', splits[-1]]]
+ result[-1].append('')
+
+ # Leading and trailing empty groups come from leading/trailing blanks
+ if result and is_empty(result[-1]):
+ result[-1:] = []
+ if result and is_empty(result[0]):
+ result[:1] = []
+ return result
+
+ def _expand_pathname(self, wtree):
+ """See [2.6.6 Pathname Expansion]."""
+ if self._env.has_opt('-f'):
+ return [wtree]
+
+ # All expansions have been performed, only quoted sequences should remain
+ # in the tree. Generate the pattern by folding the tree, escaping special
+ # characters when appear quoted
+ special_chars = '*?[]'
+
+ def make_pattern(wtree):
+ subpattern = []
+ for part in wtree[1:-1]:
+ if isinstance(part, list):
+ part = make_pattern(part)
+ elif wtree[0]!='':
+ for c in part:
+ # Meta-characters cannot be quoted
+ if c in special_chars:
+ raise GlobError()
+ subpattern.append(part)
+ return ''.join(subpattern)
+
+ def pwd_glob(pattern):
+ cwd = os.getcwd()
+ os.chdir(self._env['PWD'])
+ try:
+ return glob.glob(pattern)
+ finally:
+ os.chdir(cwd)
+
+ #TODO: check working directory issues here wrt relative patterns
+ try:
+ pattern = make_pattern(wtree)
+ paths = pwd_glob(pattern)
+ except GlobError:
+ # BUG: Meta-characters were found in quoted sequences. The should
+ # have been used literally but this is unsupported in current glob module.
+ # Instead we consider the whole tree must be used literally and
+ # therefore there is no point in globbing. This is wrong when meta
+ # characters are mixed with quoted meta in the same pattern like:
+ # < foo*"py*" >
+ paths = []
+
+ if not paths:
+ return [wtree]
+ return [['', path, ''] for path in paths]
+
+ def _remove_quotes(self, wtree):
+ """See [2.6.7 Quote Removal]."""
+
+ def unquote(wtree):
+ unquoted = []
+ for part in wtree[1:-1]:
+ if isinstance(part, list):
+ part = unquote(part)
+ unquoted.append(part)
+ return ''.join(unquoted)
+
+ return ['', unquote(wtree), '']
+
+ def subshell(self, script=None, ast=None, redirs=None):
+ """Execute the script or AST in a subshell, with inherited redirections
+ if redirs is not None.
+ """
+ if redirs:
+ sub_redirs = redirs
+ else:
+ sub_redirs = redirs.clone()
+
+ subshell = None
+ try:
+ subshell = Interpreter(None, self._debugflags, self._env.clone(True),
+ sub_redirs, opts=self._options)
+ return subshell.execute_script(script, ast)
+ finally:
+ if not redirs: sub_redirs.close()
+ if subshell: subshell.close()
+
+ def subshell_output(self, script):
+ """Execute the script in a subshell and return the captured output."""
+ # Create temporary file to capture subshell output
+ tmpfd, tmppath = tempfile.mkstemp()
+ try:
+ tmpfile = os.fdopen(tmpfd, 'wb')
+ stdout = FileWrapper('w', tmpfile)
+
+ redirs = Redirections(self._redirs.stdin().dup(),
+ stdout,
+ self._redirs.stderr().dup())
+ try:
+ status = self.subshell(script=script, redirs=redirs)
+ finally:
+ redirs.close()
+ redirs = None
+
+ # Extract subshell standard output
+ tmpfile = open(tmppath, 'rb')
+ try:
+ output = tmpfile.read()
+ return status, output.rstrip('\n')
+ finally:
+ tmpfile.close()
+ finally:
+ os.remove(tmppath)
+
+ def _asynclist(self, cmd):
+ args = (self._env.get_variables(), cmd)
+ arg = encodeargs(args)
+ assert len(args) < 30*1024
+ cmd = ['pysh.bat', '--ast', '-c', arg]
+ p = subprocess.Popen(cmd, cwd=self._env['PWD'])
+ self._children[p.pid] = p
+ self._env['!'] = p.pid
+ return 0
+
+ def wait(self, pids=None):
+ if not pids:
+ pids = self._children.keys()
+
+ status = 127
+ for pid in pids:
+ if pid not in self._children:
+ continue
+ p = self._children.pop(pid)
+ status = p.wait()
+
+ return status
+
diff --git a/poky/bitbake/lib/bb/pysh/lsprof.py b/poky/bitbake/lib/bb/pysh/lsprof.py
new file mode 100644
index 000000000..b1831c22a
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/lsprof.py
@@ -0,0 +1,116 @@
+#! /usr/bin/env python
+
+import sys
+from _lsprof import Profiler, profiler_entry
+
+__all__ = ['profile', 'Stats']
+
+def profile(f, *args, **kwds):
+ """XXX docstring"""
+ p = Profiler()
+ p.enable(subcalls=True, builtins=True)
+ try:
+ f(*args, **kwds)
+ finally:
+ p.disable()
+ return Stats(p.getstats())
+
+
+class Stats(object):
+ """XXX docstring"""
+
+ def __init__(self, data):
+ self.data = data
+
+ def sort(self, crit="inlinetime"):
+ """XXX docstring"""
+ if crit not in profiler_entry.__dict__:
+ raise ValueError("Can't sort by %s" % crit)
+ self.data.sort(lambda b, a: cmp(getattr(a, crit),
+ getattr(b, crit)))
+ for e in self.data:
+ if e.calls:
+ e.calls.sort(lambda b, a: cmp(getattr(a, crit),
+ getattr(b, crit)))
+
+ def pprint(self, top=None, file=None, limit=None, climit=None):
+ """XXX docstring"""
+ if file is None:
+ file = sys.stdout
+ d = self.data
+ if top is not None:
+ d = d[:top]
+ cols = "% 12s %12s %11.4f %11.4f %s\n"
+ hcols = "% 12s %12s %12s %12s %s\n"
+ cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
+ file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
+ "Inline(ms)", "module:lineno(function)"))
+ count = 0
+ for e in d:
+ file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
+ e.inlinetime, label(e.code)))
+ count += 1
+ if limit is not None and count == limit:
+ return
+ ccount = 0
+ if e.calls:
+ for se in e.calls:
+ file.write(cols % ("+%s" % se.callcount, se.reccallcount,
+ se.totaltime, se.inlinetime,
+ "+%s" % label(se.code)))
+ count += 1
+ ccount += 1
+ if limit is not None and count == limit:
+ return
+ if climit is not None and ccount == climit:
+ break
+
+ def freeze(self):
+ """Replace all references to code objects with string
+ descriptions; this makes it possible to pickle the instance."""
+
+ # this code is probably rather ickier than it needs to be!
+ for i in range(len(self.data)):
+ e = self.data[i]
+ if not isinstance(e.code, str):
+ self.data[i] = type(e)((label(e.code),) + e[1:])
+ if e.calls:
+ for j in range(len(e.calls)):
+ se = e.calls[j]
+ if not isinstance(se.code, str):
+ e.calls[j] = type(se)((label(se.code),) + se[1:])
+
+_fn2mod = {}
+
+def label(code):
+ if isinstance(code, str):
+ return code
+ try:
+ mname = _fn2mod[code.co_filename]
+ except KeyError:
+ for k, v in sys.modules.items():
+ if v is None:
+ continue
+ if not hasattr(v, '__file__'):
+ continue
+ if not isinstance(v.__file__, str):
+ continue
+ if v.__file__.startswith(code.co_filename):
+ mname = _fn2mod[code.co_filename] = k
+ break
+ else:
+ mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
+
+ return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
+
+
+if __name__ == '__main__':
+ import os
+ sys.argv = sys.argv[1:]
+ if not sys.argv:
+ print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
+ sys.exit(2)
+ sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
+ stats = profile(execfile, sys.argv[0], globals(), locals())
+ stats.sort()
+ stats.pprint()
diff --git a/poky/bitbake/lib/bb/pysh/pysh.py b/poky/bitbake/lib/bb/pysh/pysh.py
new file mode 100644
index 000000000..b4e6145b5
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/pysh.py
@@ -0,0 +1,167 @@
+# pysh.py - command processing for pysh.
+#
+# Copyright 2007 Patrick Mezard
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+import optparse
+import os
+import sys
+
+import interp
+
+SH_OPT = optparse.OptionParser(prog='pysh', usage="%prog [OPTIONS]", version='0.1')
+SH_OPT.add_option('-c', action='store_true', dest='command_string', default=None,
+ help='A string that shall be interpreted by the shell as one or more commands')
+SH_OPT.add_option('--redirect-to', dest='redirect_to', default=None,
+ help='Redirect script commands stdout and stderr to the specified file')
+# See utility_command in builtin.py about the reason for this flag.
+SH_OPT.add_option('--redirected', dest='redirected', action='store_true', default=False,
+ help='Tell the interpreter that stdout and stderr are actually the same objects, which is really stdout')
+SH_OPT.add_option('--debug-parsing', action='store_true', dest='debug_parsing', default=False,
+ help='Trace PLY execution')
+SH_OPT.add_option('--debug-tree', action='store_true', dest='debug_tree', default=False,
+ help='Display the generated syntax tree.')
+SH_OPT.add_option('--debug-cmd', action='store_true', dest='debug_cmd', default=False,
+ help='Trace command execution before parameters expansion and exit status.')
+SH_OPT.add_option('--debug-utility', action='store_true', dest='debug_utility', default=False,
+ help='Trace utility calls, after parameters expansions')
+SH_OPT.add_option('--ast', action='store_true', dest='ast', default=False,
+ help='Encoded commands to execute in a subprocess')
+SH_OPT.add_option('--profile', action='store_true', default=False,
+ help='Profile pysh run')
+
+
+def split_args(args):
+ # Separate shell arguments from command ones
+ # Just stop at the first argument not starting with a dash. I know, this is completely broken,
+ # it ignores files starting with a dash or may take option values for command file. This is not
+ # supposed to happen for now
+ command_index = len(args)
+ for i,arg in enumerate(args):
+ if not arg.startswith('-'):
+ command_index = i
+ break
+
+ return args[:command_index], args[command_index:]
+
+
+def fixenv(env):
+ path = env.get('PATH')
+ if path is not None:
+ parts = path.split(os.pathsep)
+ # Remove Windows utilities from PATH, they are useless at best and
+ # some of them (find) may be confused with other utilities.
+ parts = [p for p in parts if 'system32' not in p.lower()]
+ env['PATH'] = os.pathsep.join(parts)
+ if env.get('HOME') is None:
+ # Several utilities, including cvsps, cannot work without
+ # a defined HOME directory.
+ env['HOME'] = os.path.expanduser('~')
+ return env
+
+def _sh(cwd, shargs, cmdargs, options, debugflags=None, env=None):
+ if os.environ.get('PYSH_TEXT') != '1':
+ import msvcrt
+ for fp in (sys.stdin, sys.stdout, sys.stderr):
+ msvcrt.setmode(fp.fileno(), os.O_BINARY)
+
+ hgbin = os.environ.get('PYSH_HGTEXT') != '1'
+
+ if debugflags is None:
+ debugflags = []
+ if options.debug_parsing: debugflags.append('debug-parsing')
+ if options.debug_utility: debugflags.append('debug-utility')
+ if options.debug_cmd: debugflags.append('debug-cmd')
+ if options.debug_tree: debugflags.append('debug-tree')
+
+ if env is None:
+ env = fixenv(dict(os.environ))
+ if cwd is None:
+ cwd = os.getcwd()
+
+ if not cmdargs:
+ # Nothing to do
+ return 0
+
+ ast = None
+ command_file = None
+ if options.command_string:
+ input = cmdargs[0]
+ if not options.ast:
+ input += '\n'
+ else:
+ args, input = interp.decodeargs(input), None
+ env, ast = args
+ cwd = env.get('PWD', cwd)
+ else:
+ command_file = cmdargs[0]
+ arguments = cmdargs[1:]
+
+ prefix = interp.resolve_shebang(command_file, ignoreshell=True)
+ if prefix:
+ input = ' '.join(prefix + [command_file] + arguments)
+ else:
+ # Read commands from file
+ f = file(command_file)
+ try:
+ # Trailing newline to help the parser
+ input = f.read() + '\n'
+ finally:
+ f.close()
+
+ redirect = None
+ try:
+ if options.redirected:
+ stdout = sys.stdout
+ stderr = stdout
+ elif options.redirect_to:
+ redirect = open(options.redirect_to, 'wb')
+ stdout = redirect
+ stderr = redirect
+ else:
+ stdout = sys.stdout
+ stderr = sys.stderr
+
+ # TODO: set arguments to environment variables
+ opts = interp.Options()
+ opts.hgbinary = hgbin
+ ip = interp.Interpreter(cwd, debugflags, stdout=stdout, stderr=stderr,
+ opts=opts)
+ try:
+ # Export given environment in shell object
+ for k,v in env.iteritems():
+ ip.get_env().export(k,v)
+ return ip.execute_script(input, ast, scriptpath=command_file)
+ finally:
+ ip.close()
+ finally:
+ if redirect is not None:
+ redirect.close()
+
+def sh(cwd=None, args=None, debugflags=None, env=None):
+ if args is None:
+ args = sys.argv[1:]
+ shargs, cmdargs = split_args(args)
+ options, shargs = SH_OPT.parse_args(shargs)
+
+ if options.profile:
+ import lsprof
+ p = lsprof.Profiler()
+ p.enable(subcalls=True)
+ try:
+ return _sh(cwd, shargs, cmdargs, options, debugflags, env)
+ finally:
+ p.disable()
+ stats = lsprof.Stats(p.getstats())
+ stats.sort()
+ stats.pprint(top=10, file=sys.stderr, climit=5)
+ else:
+ return _sh(cwd, shargs, cmdargs, options, debugflags, env)
+
+def main():
+ sys.exit(sh())
+
+if __name__=='__main__':
+ main()
diff --git a/poky/bitbake/lib/bb/pysh/pyshlex.py b/poky/bitbake/lib/bb/pysh/pyshlex.py
new file mode 100644
index 000000000..fbf094b7a
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/pyshlex.py
@@ -0,0 +1,888 @@
+# pyshlex.py - PLY compatible lexer for pysh.
+#
+# Copyright 2007 Patrick Mezard
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+# TODO:
+# - review all "char in 'abc'" snippets: the empty string can be matched
+# - test line continuations within quoted/expansion strings
+# - eof is buggy wrt sublexers
+# - the lexer cannot really work in pull mode as it would be required to run
+# PLY in pull mode. It was designed to work incrementally and it would not be
+# that hard to enable pull mode.
+import re
+try:
+ s = set()
+ del s
+except NameError:
+ from Set import Set as set
+
+from ply import lex
+from bb.pysh.sherrors import *
+
+class NeedMore(Exception):
+ pass
+
+def is_blank(c):
+ return c in (' ', '\t')
+
+_RE_DIGITS = re.compile(r'^\d+$')
+
+def are_digits(s):
+ return _RE_DIGITS.search(s) is not None
+
+_OPERATORS = dict([
+ ('&&', 'AND_IF'),
+ ('||', 'OR_IF'),
+ (';;', 'DSEMI'),
+ ('<<', 'DLESS'),
+ ('>>', 'DGREAT'),
+ ('<&', 'LESSAND'),
+ ('>&', 'GREATAND'),
+ ('<>', 'LESSGREAT'),
+ ('<<-', 'DLESSDASH'),
+ ('>|', 'CLOBBER'),
+ ('&', 'AMP'),
+ (';', 'COMMA'),
+ ('<', 'LESS'),
+ ('>', 'GREATER'),
+ ('(', 'LPARENS'),
+ (')', 'RPARENS'),
+])
+
+#Make a function to silence pychecker "Local variable shadows global"
+def make_partial_ops():
+ partials = {}
+ for k in _OPERATORS:
+ for i in range(1, len(k)+1):
+ partials[k[:i]] = None
+ return partials
+
+_PARTIAL_OPERATORS = make_partial_ops()
+
+def is_partial_op(s):
+ """Return True if s matches a non-empty subpart of an operator starting
+ at its first character.
+ """
+ return s in _PARTIAL_OPERATORS
+
+def is_op(s):
+ """If s matches an operator, returns the operator identifier. Return None
+ otherwise.
+ """
+ return _OPERATORS.get(s)
+
+_RESERVEDS = dict([
+ ('if', 'If'),
+ ('then', 'Then'),
+ ('else', 'Else'),
+ ('elif', 'Elif'),
+ ('fi', 'Fi'),
+ ('do', 'Do'),
+ ('done', 'Done'),
+ ('case', 'Case'),
+ ('esac', 'Esac'),
+ ('while', 'While'),
+ ('until', 'Until'),
+ ('for', 'For'),
+ ('{', 'Lbrace'),
+ ('}', 'Rbrace'),
+ ('!', 'Bang'),
+ ('in', 'In'),
+ ('|', 'PIPE'),
+])
+
+def get_reserved(s):
+ return _RESERVEDS.get(s)
+
+_RE_NAME = re.compile(r'^[0-9a-zA-Z_]+$')
+
+def is_name(s):
+ return _RE_NAME.search(s) is not None
+
+def find_chars(seq, chars):
+ for i,v in enumerate(seq):
+ if v in chars:
+ return i,v
+ return -1, None
+
+class WordLexer:
+ """WordLexer parse quoted or expansion expressions and return an expression
+ tree. The input string can be any well formed sequence beginning with quoting
+ or expansion character. Embedded expressions are handled recursively. The
+ resulting tree is made of lists and strings. Lists represent quoted or
+ expansion expressions. Each list first element is the opening separator,
+ the last one the closing separator. In-between can be any number of strings
+ or lists for sub-expressions. Non quoted/expansion expression can written as
+ strings or as lists with empty strings as starting and ending delimiters.
+ """
+
+ NAME_CHARSET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+ NAME_CHARSET = dict(zip(NAME_CHARSET, NAME_CHARSET))
+
+ SPECIAL_CHARSET = '@*#?-$!0'
+
+ #Characters which can be escaped depends on the current delimiters
+ ESCAPABLE = {
+ '`': set(['$', '\\', '`']),
+ '"': set(['$', '\\', '`', '"']),
+ "'": set(),
+ }
+
+ def __init__(self, heredoc = False):
+ # _buffer is the unprocessed input characters buffer
+ self._buffer = []
+ # _stack is empty or contains a quoted list being processed
+ # (this is the DFS path to the quoted expression being evaluated).
+ self._stack = []
+ self._escapable = None
+ # True when parsing unquoted here documents
+ self._heredoc = heredoc
+
+ def add(self, data, eof=False):
+ """Feed the lexer with more data. If the quoted expression can be
+ delimited, return a tuple (expr, remaining) containing the expression
+ tree and the unconsumed data.
+ Otherwise, raise NeedMore.
+ """
+ self._buffer += list(data)
+ self._parse(eof)
+
+ result = self._stack[0]
+ remaining = ''.join(self._buffer)
+ self._stack = []
+ self._buffer = []
+ return result, remaining
+
+ def _is_escapable(self, c, delim=None):
+ if delim is None:
+ if self._heredoc:
+ # Backslashes works as if they were double quoted in unquoted
+ # here-documents
+ delim = '"'
+ else:
+ if len(self._stack)<=1:
+ return True
+ delim = self._stack[-2][0]
+
+ escapables = self.ESCAPABLE.get(delim, None)
+ return escapables is None or c in escapables
+
+ def _parse_squote(self, buf, result, eof):
+ if not buf:
+ raise NeedMore()
+ try:
+ pos = buf.index("'")
+ except ValueError:
+ raise NeedMore()
+ result[-1] += ''.join(buf[:pos])
+ result += ["'"]
+ return pos+1, True
+
+ def _parse_bquote(self, buf, result, eof):
+ if not buf:
+ raise NeedMore()
+
+ if buf[0]=='\n':
+ #Remove line continuations
+ result[:] = ['', '', '']
+ elif self._is_escapable(buf[0]):
+ result[-1] += buf[0]
+ result += ['']
+ else:
+ #Keep as such
+ result[:] = ['', '\\'+buf[0], '']
+
+ return 1, True
+
+ def _parse_dquote(self, buf, result, eof):
+ if not buf:
+ raise NeedMore()
+ pos, sep = find_chars(buf, '$\\`"')
+ if pos==-1:
+ raise NeedMore()
+
+ result[-1] += ''.join(buf[:pos])
+ if sep=='"':
+ result += ['"']
+ return pos+1, True
+ else:
+ #Keep everything until the separator and defer processing
+ return pos, False
+
+ def _parse_command(self, buf, result, eof):
+ if not buf:
+ raise NeedMore()
+
+ chars = '$\\`"\''
+ if result[0] == '$(':
+ chars += ')'
+ pos, sep = find_chars(buf, chars)
+ if pos == -1:
+ raise NeedMore()
+
+ result[-1] += ''.join(buf[:pos])
+ if (result[0]=='$(' and sep==')') or (result[0]=='`' and sep=='`'):
+ result += [sep]
+ return pos+1, True
+ else:
+ return pos, False
+
+ def _parse_parameter(self, buf, result, eof):
+ if not buf:
+ raise NeedMore()
+
+ pos, sep = find_chars(buf, '$\\`"\'}')
+ if pos==-1:
+ raise NeedMore()
+
+ result[-1] += ''.join(buf[:pos])
+ if sep=='}':
+ result += [sep]
+ return pos+1, True
+ else:
+ return pos, False
+
+ def _parse_dollar(self, buf, result, eof):
+ sep = result[0]
+ if sep=='$':
+ if not buf:
+ #TODO: handle empty $
+ raise NeedMore()
+ if buf[0]=='(':
+ if len(buf)==1:
+ raise NeedMore()
+
+ if buf[1]=='(':
+ result[0] = '$(('
+ buf[:2] = []
+ else:
+ result[0] = '$('
+ buf[:1] = []
+
+ elif buf[0]=='{':
+ result[0] = '${'
+ buf[:1] = []
+ else:
+ if buf[0] in self.SPECIAL_CHARSET:
+ result[-1] = buf[0]
+ read = 1
+ else:
+ for read,c in enumerate(buf):
+ if c not in self.NAME_CHARSET:
+ break
+ else:
+ if not eof:
+ raise NeedMore()
+ read += 1
+
+ result[-1] += ''.join(buf[0:read])
+
+ if not result[-1]:
+ result[:] = ['', result[0], '']
+ else:
+ result += ['']
+ return read,True
+
+ sep = result[0]
+ if sep=='$(':
+ parsefunc = self._parse_command
+ elif sep=='${':
+ parsefunc = self._parse_parameter
+ else:
+ raise NotImplementedError(sep)
+
+ pos, closed = parsefunc(buf, result, eof)
+ return pos, closed
+
+ def _parse(self, eof):
+ buf = self._buffer
+ stack = self._stack
+ recurse = False
+
+ while 1:
+ if not stack or recurse:
+ if not buf:
+ raise NeedMore()
+ if buf[0] not in ('"\\`$\''):
+ raise ShellSyntaxError('Invalid quoted string sequence')
+ stack.append([buf[0], ''])
+ buf[:1] = []
+ recurse = False
+
+ result = stack[-1]
+ if result[0]=="'":
+ parsefunc = self._parse_squote
+ elif result[0]=='\\':
+ parsefunc = self._parse_bquote
+ elif result[0]=='"':
+ parsefunc = self._parse_dquote
+ elif result[0]=='`':
+ parsefunc = self._parse_command
+ elif result[0][0]=='$':
+ parsefunc = self._parse_dollar
+ else:
+ raise NotImplementedError()
+
+ read, closed = parsefunc(buf, result, eof)
+
+ buf[:read] = []
+ if closed:
+ if len(stack)>1:
+ #Merge in parent expression
+ parsed = stack.pop()
+ stack[-1] += [parsed]
+ stack[-1] += ['']
+ else:
+ break
+ else:
+ recurse = True
+
+def normalize_wordtree(wtree):
+ """Fold back every literal sequence (delimited with empty strings) into
+ parent sequence.
+ """
+ def normalize(wtree):
+ result = []
+ for part in wtree[1:-1]:
+ if isinstance(part, list):
+ part = normalize(part)
+ if part[0]=='':
+ #Move the part content back at current level
+ result += part[1:-1]
+ continue
+ elif not part:
+ #Remove empty strings
+ continue
+ result.append(part)
+ if not result:
+ result = ['']
+ return [wtree[0]] + result + [wtree[-1]]
+
+ return normalize(wtree)
+
+
+def make_wordtree(token, here_document=False):
+ """Parse a delimited token and return a tree similar to the ones returned by
+ WordLexer. token may contain any combinations of expansion/quoted fields and
+ non-ones.
+ """
+ tree = ['']
+ remaining = token
+ delimiters = '\\$`'
+ if not here_document:
+ delimiters += '\'"'
+
+ while 1:
+ pos, sep = find_chars(remaining, delimiters)
+ if pos==-1:
+ tree += [remaining, '']
+ return normalize_wordtree(tree)
+ tree.append(remaining[:pos])
+ remaining = remaining[pos:]
+
+ try:
+ result, remaining = WordLexer(heredoc = here_document).add(remaining, True)
+ except NeedMore:
+ raise ShellSyntaxError('Invalid token "%s"')
+ tree.append(result)
+
+
+def wordtree_as_string(wtree):
+ """Rewrite an expression tree generated by make_wordtree as string."""
+ def visit(node, output):
+ for child in node:
+ if isinstance(child, list):
+ visit(child, output)
+ else:
+ output.append(child)
+
+ output = []
+ visit(wtree, output)
+ return ''.join(output)
+
+
+def unquote_wordtree(wtree):
+ """Fold the word tree while removing quotes everywhere. Other expansion
+ sequences are joined as such.
+ """
+ def unquote(wtree):
+ unquoted = []
+ if wtree[0] in ('', "'", '"', '\\'):
+ wtree = wtree[1:-1]
+
+ for part in wtree:
+ if isinstance(part, list):
+ part = unquote(part)
+ unquoted.append(part)
+ return ''.join(unquoted)
+
+ return unquote(wtree)
+
+
+class HereDocLexer:
+ """HereDocLexer delimits whatever comes from the here-document starting newline
+ not included to the closing delimiter line included.
+ """
+ def __init__(self, op, delim):
+ assert op in ('<<', '<<-')
+ if not delim:
+ raise ShellSyntaxError('invalid here document delimiter %s' % str(delim))
+
+ self._op = op
+ self._delim = delim
+ self._buffer = []
+ self._token = []
+
+ def add(self, data, eof):
+ """If the here-document was delimited, return a tuple (content, remaining).
+ Raise NeedMore() otherwise.
+ """
+ self._buffer += list(data)
+ self._parse(eof)
+ token = ''.join(self._token)
+ remaining = ''.join(self._buffer)
+ self._token, self._remaining = [], []
+ return token, remaining
+
+ def _parse(self, eof):
+ while 1:
+ #Look for first unescaped newline. Quotes may be ignored
+ escaped = False
+ for i,c in enumerate(self._buffer):
+ if escaped:
+ escaped = False
+ elif c=='\\':
+ escaped = True
+ elif c=='\n':
+ break
+ else:
+ i = -1
+
+ if i==-1 or self._buffer[i]!='\n':
+ if not eof:
+ raise NeedMore()
+ #No more data, maybe the last line is closing delimiter
+ line = ''.join(self._buffer)
+ eol = ''
+ self._buffer[:] = []
+ else:
+ line = ''.join(self._buffer[:i])
+ eol = self._buffer[i]
+ self._buffer[:i+1] = []
+
+ if self._op=='<<-':
+ line = line.lstrip('\t')
+
+ if line==self._delim:
+ break
+
+ self._token += [line, eol]
+ if i==-1:
+ break
+
+class Token:
+ #TODO: check this is still in use
+ OPERATOR = 'OPERATOR'
+ WORD = 'WORD'
+
+ def __init__(self):
+ self.value = ''
+ self.type = None
+
+ def __getitem__(self, key):
+ #Behave like a two elements tuple
+ if key==0:
+ return self.type
+ if key==1:
+ return self.value
+ raise IndexError(key)
+
+
+class HereDoc:
+ def __init__(self, op, name=None):
+ self.op = op
+ self.name = name
+ self.pendings = []
+
+TK_COMMA = 'COMMA'
+TK_AMPERSAND = 'AMP'
+TK_OP = 'OP'
+TK_TOKEN = 'TOKEN'
+TK_COMMENT = 'COMMENT'
+TK_NEWLINE = 'NEWLINE'
+TK_IONUMBER = 'IO_NUMBER'
+TK_ASSIGNMENT = 'ASSIGNMENT_WORD'
+TK_HERENAME = 'HERENAME'
+
+class Lexer:
+ """Main lexer.
+
+ Call add() until the script AST is returned.
+ """
+ # Here-document handling makes the whole thing more complex because they basically
+ # force tokens to be reordered: here-content must come right after the operator
+ # and the here-document name, while some other tokens might be following the
+ # here-document expression on the same line.
+ #
+ # So, here-doc states are basically:
+ # *self._state==ST_NORMAL
+ # - self._heredoc.op is None: no here-document
+ # - self._heredoc.op is not None but name is: here-document operator matched,
+ # waiting for the document name/delimiter
+ # - self._heredoc.op and name are not None: here-document is ready, following
+ # tokens are being stored and will be pushed again when the document is
+ # completely parsed.
+ # *self._state==ST_HEREDOC
+ # - The here-document is being delimited by self._herelexer. Once it is done
+ # the content is pushed in front of the pending token list then all these
+ # tokens are pushed once again.
+ ST_NORMAL = 'ST_NORMAL'
+ ST_OP = 'ST_OP'
+ ST_BACKSLASH = 'ST_BACKSLASH'
+ ST_QUOTED = 'ST_QUOTED'
+ ST_COMMENT = 'ST_COMMENT'
+ ST_HEREDOC = 'ST_HEREDOC'
+
+ #Match end of backquote strings
+ RE_BACKQUOTE_END = re.compile(r'(?<!\\)(`)')
+
+ def __init__(self, parent_state = None):
+ self._input = []
+ self._pos = 0
+
+ self._token = ''
+ self._type = TK_TOKEN
+
+ self._state = self.ST_NORMAL
+ self._parent_state = parent_state
+ self._wordlexer = None
+
+ self._heredoc = HereDoc(None)
+ self._herelexer = None
+
+ ### Following attributes are not used for delimiting token and can safely
+ ### be changed after here-document detection (see _push_toke)
+
+ # Count the number of tokens following a 'For' reserved word. Needed to
+ # return an 'In' reserved word if it comes in third place.
+ self._for_count = None
+
+ def add(self, data, eof=False):
+ """Feed the lexer with data.
+
+ When eof is set to True, returns unconsumed data or raise if the lexer
+ is in the middle of a delimiting operation.
+ Raise NeedMore otherwise.
+ """
+ self._input += list(data)
+ self._parse(eof)
+ self._input[:self._pos] = []
+ return ''.join(self._input)
+
+ def _parse(self, eof):
+ while self._state:
+ if self._pos>=len(self._input):
+ if not eof:
+ raise NeedMore()
+ elif self._state not in (self.ST_OP, self.ST_QUOTED, self.ST_HEREDOC):
+ #Delimit the current token and leave cleanly
+ self._push_token('')
+ break
+ else:
+ #Let the sublexer handle the eof themselves
+ pass
+
+ if self._state==self.ST_NORMAL:
+ self._parse_normal()
+ elif self._state==self.ST_COMMENT:
+ self._parse_comment()
+ elif self._state==self.ST_OP:
+ self._parse_op(eof)
+ elif self._state==self.ST_QUOTED:
+ self._parse_quoted(eof)
+ elif self._state==self.ST_HEREDOC:
+ self._parse_heredoc(eof)
+ else:
+ assert False, "Unknown state " + str(self._state)
+
+ if self._heredoc.op is not None:
+ raise ShellSyntaxError('missing here-document delimiter')
+
+ def _parse_normal(self):
+ c = self._input[self._pos]
+ if c=='\n':
+ self._push_token(c)
+ self._token = c
+ self._type = TK_NEWLINE
+ self._push_token('')
+ self._pos += 1
+ elif c in ('\\', '\'', '"', '`', '$'):
+ self._state = self.ST_QUOTED
+ elif is_partial_op(c):
+ self._push_token(c)
+
+ self._type = TK_OP
+ self._token += c
+ self._pos += 1
+ self._state = self.ST_OP
+ elif is_blank(c):
+ self._push_token(c)
+
+ #Discard blanks
+ self._pos += 1
+ elif self._token:
+ self._token += c
+ self._pos += 1
+ elif c=='#':
+ self._state = self.ST_COMMENT
+ self._type = TK_COMMENT
+ self._pos += 1
+ else:
+ self._pos += 1
+ self._token += c
+
+ def _parse_op(self, eof):
+ assert self._token
+
+ while 1:
+ if self._pos>=len(self._input):
+ if not eof:
+ raise NeedMore()
+ c = ''
+ else:
+ c = self._input[self._pos]
+
+ op = self._token + c
+ if c and is_partial_op(op):
+ #Still parsing an operator
+ self._token = op
+ self._pos += 1
+ else:
+ #End of operator
+ self._push_token(c)
+ self._state = self.ST_NORMAL
+ break
+
+ def _parse_comment(self):
+ while 1:
+ if self._pos>=len(self._input):
+ raise NeedMore()
+
+ c = self._input[self._pos]
+ if c=='\n':
+ #End of comment, do not consume the end of line
+ self._state = self.ST_NORMAL
+ break
+ else:
+ self._token += c
+ self._pos += 1
+
+ def _parse_quoted(self, eof):
+ """Precondition: the starting backquote/dollar is still in the input queue."""
+ if not self._wordlexer:
+ self._wordlexer = WordLexer()
+
+ if self._pos<len(self._input):
+ #Transfer input queue character into the subparser
+ input = self._input[self._pos:]
+ self._pos += len(input)
+
+ wtree, remaining = self._wordlexer.add(input, eof)
+ self._wordlexer = None
+ self._token += wordtree_as_string(wtree)
+
+ #Put unparsed character back in the input queue
+ if remaining:
+ self._input[self._pos:self._pos] = list(remaining)
+ self._state = self.ST_NORMAL
+
+ def _parse_heredoc(self, eof):
+ assert not self._token
+
+ if self._herelexer is None:
+ self._herelexer = HereDocLexer(self._heredoc.op, self._heredoc.name)
+
+ if self._pos<len(self._input):
+ #Transfer input queue character into the subparser
+ input = self._input[self._pos:]
+ self._pos += len(input)
+
+ self._token, remaining = self._herelexer.add(input, eof)
+
+ #Reset here-document state
+ self._herelexer = None
+ heredoc, self._heredoc = self._heredoc, HereDoc(None)
+ if remaining:
+ self._input[self._pos:self._pos] = list(remaining)
+ self._state = self.ST_NORMAL
+
+ #Push pending tokens
+ heredoc.pendings[:0] = [(self._token, self._type, heredoc.name)]
+ for token, type, delim in heredoc.pendings:
+ self._token = token
+ self._type = type
+ self._push_token(delim)
+
+ def _push_token(self, delim):
+ if not self._token:
+ return 0
+
+ if self._heredoc.op is not None:
+ if self._heredoc.name is None:
+ #Here-document name
+ if self._type!=TK_TOKEN:
+ raise ShellSyntaxError("expecting here-document name, got '%s'" % self._token)
+ self._heredoc.name = unquote_wordtree(make_wordtree(self._token))
+ self._type = TK_HERENAME
+ else:
+ #Capture all tokens until the newline starting the here-document
+ if self._type==TK_NEWLINE:
+ assert self._state==self.ST_NORMAL
+ self._state = self.ST_HEREDOC
+
+ self._heredoc.pendings.append((self._token, self._type, delim))
+ self._token = ''
+ self._type = TK_TOKEN
+ return 1
+
+ # BEWARE: do not change parser state from here to the end of the function:
+ # when parsing between an here-document operator to the end of the line
+ # tokens are stored in self._heredoc.pendings. Therefore, they will not
+ # reach the section below.
+
+ #Check operators
+ if self._type==TK_OP:
+ #False positive because of partial op matching
+ op = is_op(self._token)
+ if not op:
+ self._type = TK_TOKEN
+ else:
+ #Map to the specific operator
+ self._type = op
+ if self._token in ('<<', '<<-'):
+ #Done here rather than in _parse_op because there is no need
+ #to change the parser state since we are still waiting for
+ #the here-document name
+ if self._heredoc.op is not None:
+ raise ShellSyntaxError("syntax error near token '%s'" % self._token)
+ assert self._heredoc.op is None
+ self._heredoc.op = self._token
+
+ if self._type==TK_TOKEN:
+ if '=' in self._token and not delim:
+ if self._token.startswith('='):
+ #Token is a WORD... a TOKEN that is.
+ pass
+ else:
+ prev = self._token[:self._token.find('=')]
+ if is_name(prev):
+ self._type = TK_ASSIGNMENT
+ else:
+ #Just a token (unspecified)
+ pass
+ else:
+ reserved = get_reserved(self._token)
+ if reserved is not None:
+ if reserved=='In' and self._for_count!=2:
+ #Sorry, not a reserved word after all
+ pass
+ else:
+ self._type = reserved
+ if reserved in ('For', 'Case'):
+ self._for_count = 0
+ elif are_digits(self._token) and delim in ('<', '>'):
+ #Detect IO_NUMBER
+ self._type = TK_IONUMBER
+ elif self._token==';':
+ self._type = TK_COMMA
+ elif self._token=='&':
+ self._type = TK_AMPERSAND
+ elif self._type==TK_COMMENT:
+ #Comments are not part of sh grammar, ignore them
+ self._token = ''
+ self._type = TK_TOKEN
+ return 0
+
+ if self._for_count is not None:
+ #Track token count in 'For' expression to detect 'In' reserved words.
+ #Can only be in third position, no need to go beyond
+ self._for_count += 1
+ if self._for_count==3:
+ self._for_count = None
+
+ self.on_token((self._token, self._type))
+ self._token = ''
+ self._type = TK_TOKEN
+ return 1
+
+ def on_token(self, token):
+ raise NotImplementedError
+
+
+tokens = [
+ TK_TOKEN,
+# To silence yacc unused token warnings
+# TK_COMMENT,
+ TK_NEWLINE,
+ TK_IONUMBER,
+ TK_ASSIGNMENT,
+ TK_HERENAME,
+]
+
+#Add specific operators
+tokens += _OPERATORS.values()
+#Add reserved words
+tokens += _RESERVEDS.values()
+
+class PLYLexer(Lexer):
+ """Bridge Lexer and PLY lexer interface."""
+ def __init__(self):
+ Lexer.__init__(self)
+ self._tokens = []
+ self._current = 0
+ self.lineno = 0
+
+ def on_token(self, token):
+ value, type = token
+
+ self.lineno = 0
+ t = lex.LexToken()
+ t.value = value
+ t.type = type
+ t.lexer = self
+ t.lexpos = 0
+ t.lineno = 0
+
+ self._tokens.append(t)
+
+ def is_empty(self):
+ return not bool(self._tokens)
+
+ #PLY compliant interface
+ def token(self):
+ if self._current>=len(self._tokens):
+ return None
+ t = self._tokens[self._current]
+ self._current += 1
+ return t
+
+
+def get_tokens(s):
+ """Parse the input string and return a tuple (tokens, unprocessed) where
+ tokens is a list of parsed tokens and unprocessed is the part of the input
+ string left untouched by the lexer.
+ """
+ lexer = PLYLexer()
+ untouched = lexer.add(s, True)
+ tokens = []
+ while 1:
+ token = lexer.token()
+ if token is None:
+ break
+ tokens.append(token)
+
+ tokens = [(t.value, t.type) for t in tokens]
+ return tokens, untouched
diff --git a/poky/bitbake/lib/bb/pysh/pyshyacc.py b/poky/bitbake/lib/bb/pysh/pyshyacc.py
new file mode 100644
index 000000000..ba4cefdcb
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/pyshyacc.py
@@ -0,0 +1,779 @@
+# pyshyacc.py - PLY grammar definition for pysh
+#
+# Copyright 2007 Patrick Mezard
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+"""PLY grammar file.
+"""
+import os.path
+import sys
+
+import bb.pysh.pyshlex as pyshlex
+tokens = pyshlex.tokens
+
+from ply import yacc
+import bb.pysh.sherrors as sherrors
+
+class IORedirect:
+ def __init__(self, op, filename, io_number=None):
+ self.op = op
+ self.filename = filename
+ self.io_number = io_number
+
+class HereDocument:
+ def __init__(self, op, name, content, io_number=None):
+ self.op = op
+ self.name = name
+ self.content = content
+ self.io_number = io_number
+
+def make_io_redirect(p):
+ """Make an IORedirect instance from the input 'io_redirect' production."""
+ name, io_number, io_target = p
+ assert name=='io_redirect'
+
+ if io_target[0]=='io_file':
+ io_type, io_op, io_file = io_target
+ return IORedirect(io_op, io_file, io_number)
+ elif io_target[0]=='io_here':
+ io_type, io_op, io_name, io_content = io_target
+ return HereDocument(io_op, io_name, io_content, io_number)
+ else:
+ assert False, "Invalid IO redirection token %s" % repr(io_type)
+
+class SimpleCommand:
+ """
+ assigns contains (name, value) pairs.
+ """
+ def __init__(self, words, redirs, assigns):
+ self.words = list(words)
+ self.redirs = list(redirs)
+ self.assigns = list(assigns)
+
+class Pipeline:
+ def __init__(self, commands, reverse_status=False):
+ self.commands = list(commands)
+ assert self.commands #Grammar forbids this
+ self.reverse_status = reverse_status
+
+class AndOr:
+ def __init__(self, op, left, right):
+ self.op = str(op)
+ self.left = left
+ self.right = right
+
+class ForLoop:
+ def __init__(self, name, items, cmds):
+ self.name = str(name)
+ self.items = list(items)
+ self.cmds = list(cmds)
+
+class WhileLoop:
+ def __init__(self, condition, cmds):
+ self.condition = list(condition)
+ self.cmds = list(cmds)
+
+class UntilLoop:
+ def __init__(self, condition, cmds):
+ self.condition = list(condition)
+ self.cmds = list(cmds)
+
+class FunDef:
+ def __init__(self, name, body):
+ self.name = str(name)
+ self.body = body
+
+class BraceGroup:
+ def __init__(self, cmds):
+ self.cmds = list(cmds)
+
+class IfCond:
+ def __init__(self, cond, if_cmds, else_cmds):
+ self.cond = list(cond)
+ self.if_cmds = if_cmds
+ self.else_cmds = else_cmds
+
+class Case:
+ def __init__(self, name, items):
+ self.name = name
+ self.items = items
+
+class SubShell:
+ def __init__(self, cmds):
+ self.cmds = cmds
+
+class RedirectList:
+ def __init__(self, cmd, redirs):
+ self.cmd = cmd
+ self.redirs = list(redirs)
+
+def get_production(productions, ptype):
+ """productions must be a list of production tuples like (name, obj) where
+ name is the production string identifier.
+ Return the first production named 'ptype'. Raise KeyError if None can be
+ found.
+ """
+ for production in productions:
+ if production is not None and production[0]==ptype:
+ return production
+ raise KeyError(ptype)
+
+#-------------------------------------------------------------------------------
+# PLY grammar definition
+#-------------------------------------------------------------------------------
+
+def p_multiple_commands(p):
+ """multiple_commands : newline_sequence
+ | complete_command
+ | multiple_commands complete_command"""
+ if len(p)==2:
+ if p[1] is not None:
+ p[0] = [p[1]]
+ else:
+ p[0] = []
+ else:
+ p[0] = p[1] + [p[2]]
+
+def p_complete_command(p):
+ """complete_command : list separator
+ | list"""
+ if len(p)==3 and p[2] and p[2][1] == '&':
+ p[0] = ('async', p[1])
+ else:
+ p[0] = p[1]
+
+def p_list(p):
+ """list : list separator_op and_or
+ | and_or"""
+ if len(p)==2:
+ p[0] = [p[1]]
+ else:
+ #if p[2]!=';':
+ # raise NotImplementedError('AND-OR list asynchronous execution is not implemented')
+ p[0] = p[1] + [p[3]]
+
+def p_and_or(p):
+ """and_or : pipeline
+ | and_or AND_IF linebreak pipeline
+ | and_or OR_IF linebreak pipeline"""
+ if len(p)==2:
+ p[0] = p[1]
+ else:
+ p[0] = ('and_or', AndOr(p[2], p[1], p[4]))
+
+def p_maybe_bang_word(p):
+ """maybe_bang_word : Bang"""
+ p[0] = ('maybe_bang_word', p[1])
+
+def p_pipeline(p):
+ """pipeline : pipe_sequence
+ | bang_word pipe_sequence"""
+ if len(p)==3:
+ p[0] = ('pipeline', Pipeline(p[2][1:], True))
+ else:
+ p[0] = ('pipeline', Pipeline(p[1][1:]))
+
+def p_pipe_sequence(p):
+ """pipe_sequence : command
+ | pipe_sequence PIPE linebreak command"""
+ if len(p)==2:
+ p[0] = ['pipe_sequence', p[1]]
+ else:
+ p[0] = p[1] + [p[4]]
+
+def p_command(p):
+ """command : simple_command
+ | compound_command
+ | compound_command redirect_list
+ | function_definition"""
+
+ if p[1][0] in ( 'simple_command',
+ 'for_clause',
+ 'while_clause',
+ 'until_clause',
+ 'case_clause',
+ 'if_clause',
+ 'function_definition',
+ 'subshell',
+ 'brace_group',):
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = ('redirect_list', RedirectList(p[1], p[2][1:]))
+ else:
+ raise NotImplementedError('%s command is not implemented' % repr(p[1][0]))
+
+def p_compound_command(p):
+ """compound_command : brace_group
+ | subshell
+ | for_clause
+ | case_clause
+ | if_clause
+ | while_clause
+ | until_clause"""
+ p[0] = p[1]
+
+def p_subshell(p):
+ """subshell : LPARENS compound_list RPARENS"""
+ p[0] = ('subshell', SubShell(p[2][1:]))
+
+def p_compound_list(p):
+ """compound_list : term
+ | newline_list term
+ | term separator
+ | newline_list term separator"""
+ productions = p[1:]
+ try:
+ sep = get_production(productions, 'separator')
+ if sep[1]!=';':
+ raise NotImplementedError()
+ except KeyError:
+ pass
+ term = get_production(productions, 'term')
+ p[0] = ['compound_list'] + term[1:]
+
+def p_term(p):
+ """term : term separator and_or
+ | and_or"""
+ if len(p)==2:
+ p[0] = ['term', p[1]]
+ else:
+ if p[2] is not None and p[2][1] == '&':
+ p[0] = ['term', ('async', p[1][1:])] + [p[3]]
+ else:
+ p[0] = p[1] + [p[3]]
+
+def p_maybe_for_word(p):
+ # Rearrange 'For' priority wrt TOKEN. See p_for_word
+ """maybe_for_word : For"""
+ p[0] = ('maybe_for_word', p[1])
+
+def p_for_clause(p):
+ """for_clause : for_word name linebreak do_group
+ | for_word name linebreak in sequential_sep do_group
+ | for_word name linebreak in wordlist sequential_sep do_group"""
+ productions = p[1:]
+ do_group = get_production(productions, 'do_group')
+ try:
+ items = get_production(productions, 'in')[1:]
+ except KeyError:
+ raise NotImplementedError('"in" omission is not implemented')
+
+ try:
+ items = get_production(productions, 'wordlist')[1:]
+ except KeyError:
+ items = []
+
+ name = p[2]
+ p[0] = ('for_clause', ForLoop(name, items, do_group[1:]))
+
+def p_name(p):
+ """name : token""" #Was NAME instead of token
+ p[0] = p[1]
+
+def p_in(p):
+ """in : In"""
+ p[0] = ('in', p[1])
+
+def p_wordlist(p):
+ """wordlist : wordlist token
+ | token"""
+ if len(p)==2:
+ p[0] = ['wordlist', ('TOKEN', p[1])]
+ else:
+ p[0] = p[1] + [('TOKEN', p[2])]
+
+def p_case_clause(p):
+ """case_clause : Case token linebreak in linebreak case_list Esac
+ | Case token linebreak in linebreak case_list_ns Esac
+ | Case token linebreak in linebreak Esac"""
+ if len(p) < 8:
+ items = []
+ else:
+ items = p[6][1:]
+ name = p[2]
+ p[0] = ('case_clause', Case(name, [c[1] for c in items]))
+
+def p_case_list_ns(p):
+ """case_list_ns : case_list case_item_ns
+ | case_item_ns"""
+ p_case_list(p)
+
+def p_case_list(p):
+ """case_list : case_list case_item
+ | case_item"""
+ if len(p)==2:
+ p[0] = ['case_list', p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+def p_case_item_ns(p):
+ """case_item_ns : pattern RPARENS linebreak
+ | pattern RPARENS compound_list linebreak
+ | LPARENS pattern RPARENS linebreak
+ | LPARENS pattern RPARENS compound_list linebreak"""
+ p_case_item(p)
+
+def p_case_item(p):
+ """case_item : pattern RPARENS linebreak DSEMI linebreak
+ | pattern RPARENS compound_list DSEMI linebreak
+ | LPARENS pattern RPARENS linebreak DSEMI linebreak
+ | LPARENS pattern RPARENS compound_list DSEMI linebreak"""
+ if len(p) < 7:
+ name = p[1][1:]
+ else:
+ name = p[2][1:]
+
+ try:
+ cmds = get_production(p[1:], "compound_list")[1:]
+ except KeyError:
+ cmds = []
+
+ p[0] = ('case_item', (name, cmds))
+
+def p_pattern(p):
+ """pattern : token
+ | pattern PIPE token"""
+ if len(p)==2:
+ p[0] = ['pattern', ('TOKEN', p[1])]
+ else:
+ p[0] = p[1] + [('TOKEN', p[2])]
+
+def p_maybe_if_word(p):
+ # Rearrange 'If' priority wrt TOKEN. See p_if_word
+ """maybe_if_word : If"""
+ p[0] = ('maybe_if_word', p[1])
+
+def p_maybe_then_word(p):
+ # Rearrange 'Then' priority wrt TOKEN. See p_then_word
+ """maybe_then_word : Then"""
+ p[0] = ('maybe_then_word', p[1])
+
+def p_if_clause(p):
+ """if_clause : if_word compound_list then_word compound_list else_part Fi
+ | if_word compound_list then_word compound_list Fi"""
+ else_part = []
+ if len(p)==7:
+ else_part = p[5]
+ p[0] = ('if_clause', IfCond(p[2][1:], p[4][1:], else_part))
+
+def p_else_part(p):
+ """else_part : Elif compound_list then_word compound_list else_part
+ | Elif compound_list then_word compound_list
+ | Else compound_list"""
+ if len(p)==3:
+ p[0] = p[2][1:]
+ else:
+ else_part = []
+ if len(p)==6:
+ else_part = p[5]
+ p[0] = ('elif', IfCond(p[2][1:], p[4][1:], else_part))
+
+def p_while_clause(p):
+ """while_clause : While compound_list do_group"""
+ p[0] = ('while_clause', WhileLoop(p[2][1:], p[3][1:]))
+
+def p_maybe_until_word(p):
+ # Rearrange 'Until' priority wrt TOKEN. See p_until_word
+ """maybe_until_word : Until"""
+ p[0] = ('maybe_until_word', p[1])
+
+def p_until_clause(p):
+ """until_clause : until_word compound_list do_group"""
+ p[0] = ('until_clause', UntilLoop(p[2][1:], p[3][1:]))
+
+def p_function_definition(p):
+ """function_definition : fname LPARENS RPARENS linebreak function_body"""
+ p[0] = ('function_definition', FunDef(p[1], p[5]))
+
+def p_function_body(p):
+ """function_body : compound_command
+ | compound_command redirect_list"""
+ if len(p)!=2:
+ raise NotImplementedError('functions redirections lists are not implemented')
+ p[0] = p[1]
+
+def p_fname(p):
+ """fname : TOKEN""" #Was NAME instead of token
+ p[0] = p[1]
+
+def p_brace_group(p):
+ """brace_group : Lbrace compound_list Rbrace"""
+ p[0] = ('brace_group', BraceGroup(p[2][1:]))
+
+def p_maybe_done_word(p):
+ #See p_assignment_word for details.
+ """maybe_done_word : Done"""
+ p[0] = ('maybe_done_word', p[1])
+
+def p_maybe_do_word(p):
+ """maybe_do_word : Do"""
+ p[0] = ('maybe_do_word', p[1])
+
+def p_do_group(p):
+ """do_group : do_word compound_list done_word"""
+ #Do group contains a list of AndOr
+ p[0] = ['do_group'] + p[2][1:]
+
+def p_simple_command(p):
+ """simple_command : cmd_prefix cmd_word cmd_suffix
+ | cmd_prefix cmd_word
+ | cmd_prefix
+ | cmd_name cmd_suffix
+ | cmd_name"""
+ words, redirs, assigns = [], [], []
+ for e in p[1:]:
+ name = e[0]
+ if name in ('cmd_prefix', 'cmd_suffix'):
+ for sube in e[1:]:
+ subname = sube[0]
+ if subname=='io_redirect':
+ redirs.append(make_io_redirect(sube))
+ elif subname=='ASSIGNMENT_WORD':
+ assigns.append(sube)
+ else:
+ words.append(sube)
+ elif name in ('cmd_word', 'cmd_name'):
+ words.append(e)
+
+ cmd = SimpleCommand(words, redirs, assigns)
+ p[0] = ('simple_command', cmd)
+
+def p_cmd_name(p):
+ """cmd_name : TOKEN"""
+ p[0] = ('cmd_name', p[1])
+
+def p_cmd_word(p):
+ """cmd_word : token"""
+ p[0] = ('cmd_word', p[1])
+
+def p_maybe_assignment_word(p):
+ #See p_assignment_word for details.
+ """maybe_assignment_word : ASSIGNMENT_WORD"""
+ p[0] = ('maybe_assignment_word', p[1])
+
+def p_cmd_prefix(p):
+ """cmd_prefix : io_redirect
+ | cmd_prefix io_redirect
+ | assignment_word
+ | cmd_prefix assignment_word"""
+ try:
+ prefix = get_production(p[1:], 'cmd_prefix')
+ except KeyError:
+ prefix = ['cmd_prefix']
+
+ try:
+ value = get_production(p[1:], 'assignment_word')[1]
+ value = ('ASSIGNMENT_WORD', value.split('=', 1))
+ except KeyError:
+ value = get_production(p[1:], 'io_redirect')
+ p[0] = prefix + [value]
+
+def p_cmd_suffix(p):
+ """cmd_suffix : io_redirect
+ | cmd_suffix io_redirect
+ | token
+ | cmd_suffix token
+ | maybe_for_word
+ | cmd_suffix maybe_for_word
+ | maybe_done_word
+ | cmd_suffix maybe_done_word
+ | maybe_do_word
+ | cmd_suffix maybe_do_word
+ | maybe_until_word
+ | cmd_suffix maybe_until_word
+ | maybe_assignment_word
+ | cmd_suffix maybe_assignment_word
+ | maybe_if_word
+ | cmd_suffix maybe_if_word
+ | maybe_then_word
+ | cmd_suffix maybe_then_word
+ | maybe_bang_word
+ | cmd_suffix maybe_bang_word"""
+ try:
+ suffix = get_production(p[1:], 'cmd_suffix')
+ token = p[2]
+ except KeyError:
+ suffix = ['cmd_suffix']
+ token = p[1]
+
+ if isinstance(token, tuple):
+ if token[0]=='io_redirect':
+ p[0] = suffix + [token]
+ else:
+ #Convert maybe_* to TOKEN if necessary
+ p[0] = suffix + [('TOKEN', token[1])]
+ else:
+ p[0] = suffix + [('TOKEN', token)]
+
+def p_redirect_list(p):
+ """redirect_list : io_redirect
+ | redirect_list io_redirect"""
+ if len(p) == 2:
+ p[0] = ['redirect_list', make_io_redirect(p[1])]
+ else:
+ p[0] = p[1] + [make_io_redirect(p[2])]
+
+def p_io_redirect(p):
+ """io_redirect : io_file
+ | IO_NUMBER io_file
+ | io_here
+ | IO_NUMBER io_here"""
+ if len(p)==3:
+ p[0] = ('io_redirect', p[1], p[2])
+ else:
+ p[0] = ('io_redirect', None, p[1])
+
+def p_io_file(p):
+ #Return the tuple (operator, filename)
+ """io_file : LESS filename
+ | LESSAND filename
+ | GREATER filename
+ | GREATAND filename
+ | DGREAT filename
+ | LESSGREAT filename
+ | CLOBBER filename"""
+ #Extract the filename from the file
+ p[0] = ('io_file', p[1], p[2][1])
+
+def p_filename(p):
+ #Return the filename
+ """filename : TOKEN"""
+ p[0] = ('filename', p[1])
+
+def p_io_here(p):
+ """io_here : DLESS here_end
+ | DLESSDASH here_end"""
+ p[0] = ('io_here', p[1], p[2][1], p[2][2])
+
+def p_here_end(p):
+ """here_end : HERENAME TOKEN"""
+ p[0] = ('here_document', p[1], p[2])
+
+def p_newline_sequence(p):
+ # Nothing in the grammar can handle leading NEWLINE productions, so add
+ # this one with the lowest possible priority relatively to newline_list.
+ """newline_sequence : newline_list"""
+ p[0] = None
+
+def p_newline_list(p):
+ """newline_list : NEWLINE
+ | newline_list NEWLINE"""
+ p[0] = None
+
+def p_linebreak(p):
+ """linebreak : newline_list
+ | empty"""
+ p[0] = None
+
+def p_separator_op(p):
+ """separator_op : COMMA
+ | AMP"""
+ p[0] = p[1]
+
+def p_separator(p):
+ """separator : separator_op linebreak
+ | newline_list"""
+ if len(p)==2:
+ #Ignore newlines
+ p[0] = None
+ else:
+ #Keep the separator operator
+ p[0] = ('separator', p[1])
+
+def p_sequential_sep(p):
+ """sequential_sep : COMMA linebreak
+ | newline_list"""
+ p[0] = None
+
+# Low priority TOKEN => for_word conversion.
+# Let maybe_for_word be used as a token when necessary in higher priority
+# rules.
+def p_for_word(p):
+ """for_word : maybe_for_word"""
+ p[0] = p[1]
+
+def p_if_word(p):
+ """if_word : maybe_if_word"""
+ p[0] = p[1]
+
+def p_then_word(p):
+ """then_word : maybe_then_word"""
+ p[0] = p[1]
+
+def p_done_word(p):
+ """done_word : maybe_done_word"""
+ p[0] = p[1]
+
+def p_do_word(p):
+ """do_word : maybe_do_word"""
+ p[0] = p[1]
+
+def p_until_word(p):
+ """until_word : maybe_until_word"""
+ p[0] = p[1]
+
+def p_assignment_word(p):
+ """assignment_word : maybe_assignment_word"""
+ p[0] = ('assignment_word', p[1][1])
+
+def p_bang_word(p):
+ """bang_word : maybe_bang_word"""
+ p[0] = ('bang_word', p[1][1])
+
+def p_token(p):
+ """token : TOKEN
+ | Fi"""
+ p[0] = p[1]
+
+def p_empty(p):
+ 'empty :'
+ p[0] = None
+
+# Error rule for syntax errors
+def p_error(p):
+ msg = []
+ w = msg.append
+ w('%r\n' % p)
+ w('followed by:\n')
+ for i in range(5):
+ n = yacc.token()
+ if not n:
+ break
+ w(' %r\n' % n)
+ raise sherrors.ShellSyntaxError(''.join(msg))
+
+# Build the parser
+try:
+ import pyshtables
+except ImportError:
+ outputdir = os.path.dirname(__file__)
+ if not os.access(outputdir, os.W_OK):
+ outputdir = ''
+ yacc.yacc(tabmodule = 'pyshtables', outputdir = outputdir, debug = 0)
+else:
+ yacc.yacc(tabmodule = 'pysh.pyshtables', write_tables = 0, debug = 0)
+
+
+def parse(input, eof=False, debug=False):
+ """Parse a whole script at once and return the generated AST and unconsumed
+ data in a tuple.
+
+ NOTE: eof is probably meaningless for now, the parser being unable to work
+ in pull mode. It should be set to True.
+ """
+ lexer = pyshlex.PLYLexer()
+ remaining = lexer.add(input, eof)
+ if lexer.is_empty():
+ return [], remaining
+ if debug:
+ debug = 2
+ return yacc.parse(lexer=lexer, debug=debug), remaining
+
+#-------------------------------------------------------------------------------
+# AST rendering helpers
+#-------------------------------------------------------------------------------
+
+def format_commands(v):
+ """Return a tree made of strings and lists. Make command trees easier to
+ display.
+ """
+ if isinstance(v, list):
+ return [format_commands(c) for c in v]
+ if isinstance(v, tuple):
+ if len(v)==2 and isinstance(v[0], str) and not isinstance(v[1], str):
+ if v[0] == 'async':
+ return ['AsyncList', map(format_commands, v[1])]
+ else:
+ #Avoid decomposing tuples like ('pipeline', Pipeline(...))
+ return format_commands(v[1])
+ return format_commands(list(v))
+ elif isinstance(v, IfCond):
+ name = ['IfCond']
+ name += ['if', map(format_commands, v.cond)]
+ name += ['then', map(format_commands, v.if_cmds)]
+ name += ['else', map(format_commands, v.else_cmds)]
+ return name
+ elif isinstance(v, ForLoop):
+ name = ['ForLoop']
+ name += [repr(v.name)+' in ', map(str, v.items)]
+ name += ['commands', map(format_commands, v.cmds)]
+ return name
+ elif isinstance(v, AndOr):
+ return [v.op, format_commands(v.left), format_commands(v.right)]
+ elif isinstance(v, Pipeline):
+ name = 'Pipeline'
+ if v.reverse_status:
+ name = '!' + name
+ return [name, format_commands(v.commands)]
+ elif isinstance(v, Case):
+ name = ['Case']
+ name += [v.name, format_commands(v.items)]
+ elif isinstance(v, SimpleCommand):
+ name = ['SimpleCommand']
+ if v.words:
+ name += ['words', map(str, v.words)]
+ if v.assigns:
+ assigns = [tuple(a[1]) for a in v.assigns]
+ name += ['assigns', map(str, assigns)]
+ if v.redirs:
+ name += ['redirs', map(format_commands, v.redirs)]
+ return name
+ elif isinstance(v, RedirectList):
+ name = ['RedirectList']
+ if v.redirs:
+ name += ['redirs', map(format_commands, v.redirs)]
+ name += ['command', format_commands(v.cmd)]
+ return name
+ elif isinstance(v, IORedirect):
+ return ' '.join(map(str, (v.io_number, v.op, v.filename)))
+ elif isinstance(v, HereDocument):
+ return ' '.join(map(str, (v.io_number, v.op, repr(v.name), repr(v.content))))
+ elif isinstance(v, SubShell):
+ return ['SubShell', map(format_commands, v.cmds)]
+ else:
+ return repr(v)
+
+def print_commands(cmds, output=sys.stdout):
+ """Pretty print a command tree."""
+ def print_tree(cmd, spaces, output):
+ if isinstance(cmd, list):
+ for c in cmd:
+ print_tree(c, spaces + 3, output)
+ else:
+ print >>output, ' '*spaces + str(cmd)
+
+ formatted = format_commands(cmds)
+ print_tree(formatted, 0, output)
+
+
+def stringify_commands(cmds):
+ """Serialize a command tree as a string.
+
+ Returned string is not pretty and is currently used for unit tests only.
+ """
+ def stringify(value):
+ output = []
+ if isinstance(value, list):
+ formatted = []
+ for v in value:
+ formatted.append(stringify(v))
+ formatted = ' '.join(formatted)
+ output.append(''.join(['<', formatted, '>']))
+ else:
+ output.append(value)
+ return ' '.join(output)
+
+ return stringify(format_commands(cmds))
+
+
+def visit_commands(cmds, callable):
+ """Visit the command tree and execute callable on every Pipeline and
+ SimpleCommand instances.
+ """
+ if isinstance(cmds, (tuple, list)):
+ map(lambda c: visit_commands(c,callable), cmds)
+ elif isinstance(cmds, (Pipeline, SimpleCommand)):
+ callable(cmds)
diff --git a/poky/bitbake/lib/bb/pysh/sherrors.py b/poky/bitbake/lib/bb/pysh/sherrors.py
new file mode 100644
index 000000000..49d0533de
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/sherrors.py
@@ -0,0 +1,41 @@
+# sherrors.py - shell errors and signals
+#
+# Copyright 2007 Patrick Mezard
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+"""Define shell exceptions and error codes.
+"""
+
+class ShellError(Exception):
+ pass
+
+class ShellSyntaxError(ShellError):
+ pass
+
+class UtilityError(ShellError):
+ """Raised upon utility syntax error (option or operand error)."""
+ pass
+
+class ExpansionError(ShellError):
+ pass
+
+class CommandNotFound(ShellError):
+ """Specified command was not found."""
+ pass
+
+class RedirectionError(ShellError):
+ pass
+
+class VarAssignmentError(ShellError):
+ """Variable assignment error."""
+ pass
+
+class ExitSignal(ShellError):
+ """Exit signal."""
+ pass
+
+class ReturnSignal(ShellError):
+ """Exit signal."""
+ pass
diff --git a/poky/bitbake/lib/bb/pysh/subprocess_fix.py b/poky/bitbake/lib/bb/pysh/subprocess_fix.py
new file mode 100644
index 000000000..46eca2280
--- /dev/null
+++ b/poky/bitbake/lib/bb/pysh/subprocess_fix.py
@@ -0,0 +1,77 @@
+# subprocess - Subprocesses with accessible I/O streams
+#
+# For more information about this module, see PEP 324.
+#
+# This module should remain compatible with Python 2.2, see PEP 291.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+def list2cmdline(seq):
+ """
+ Translate a sequence of arguments into a command line
+ string, using the same rules as the MS C runtime:
+
+ 1) Arguments are delimited by white space, which is either a
+ space or a tab.
+
+ 2) A string surrounded by double quotation marks is
+ interpreted as a single argument, regardless of white space
+ contained within. A quoted string can be embedded in an
+ argument.
+
+ 3) A double quotation mark preceded by a backslash is
+ interpreted as a literal double quotation mark.
+
+ 4) Backslashes are interpreted literally, unless they
+ immediately precede a double quotation mark.
+
+ 5) If backslashes immediately precede a double quotation mark,
+ every pair of backslashes is interpreted as a literal
+ backslash. If the number of backslashes is odd, the last
+ backslash escapes the next double quotation mark as
+ described in rule 3.
+ """
+
+ # See
+ # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
+ result = []
+ needquote = False
+ for arg in seq:
+ bs_buf = []
+
+ # Add a space to separate this argument from the others
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg) or ("|" in arg) or arg == ""
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ # Don't know if we need to double yet.
+ bs_buf.append(c)
+ elif c == '"':
+ # Double backspaces.
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ # Normal char
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ # Add remaining backspaces, if any.
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return ''.join(result)
diff --git a/poky/bitbake/lib/bb/remotedata.py b/poky/bitbake/lib/bb/remotedata.py
new file mode 100644
index 000000000..68ecffc19
--- /dev/null
+++ b/poky/bitbake/lib/bb/remotedata.py
@@ -0,0 +1,116 @@
+"""
+BitBake 'remotedata' module
+
+Provides support for using a datastore from the bitbake client
+"""
+
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import bb.data
+
+class RemoteDatastores:
+ """Used on the server side to manage references to server-side datastores"""
+ def __init__(self, cooker):
+ self.cooker = cooker
+ self.datastores = {}
+ self.locked = []
+ self.nextindex = 1
+
+ def __len__(self):
+ return len(self.datastores)
+
+ def __getitem__(self, key):
+ if key is None:
+ return self.cooker.data
+ else:
+ return self.datastores[key]
+
+ def items(self):
+ return self.datastores.items()
+
+ def store(self, d, locked=False):
+ """
+ Put a datastore into the collection. If locked=True then the datastore
+ is understood to be managed externally and cannot be released by calling
+ release().
+ """
+ idx = self.nextindex
+ self.datastores[idx] = d
+ if locked:
+ self.locked.append(idx)
+ self.nextindex += 1
+ return idx
+
+ def check_store(self, d, locked=False):
+ """
+ Put a datastore into the collection if it's not already in there;
+ in either case return the index
+ """
+ for key, val in self.datastores.items():
+ if val is d:
+ idx = key
+ break
+ else:
+ idx = self.store(d, locked)
+ return idx
+
+ def release(self, idx):
+ """Discard a datastore in the collection"""
+ if idx in self.locked:
+ raise Exception('Tried to release locked datastore %d' % idx)
+ del self.datastores[idx]
+
+ def receive_datastore(self, remote_data):
+ """Receive a datastore object sent from the client (as prepared by transmit_datastore())"""
+ dct = dict(remote_data)
+ d = bb.data_smart.DataSmart()
+ d.dict = dct
+ while True:
+ if '_remote_data' in dct:
+ dsindex = dct['_remote_data']['_content']
+ del dct['_remote_data']
+ if dsindex is None:
+ dct['_data'] = self.cooker.data.dict
+ else:
+ dct['_data'] = self.datastores[dsindex].dict
+ break
+ elif '_data' in dct:
+ idct = dict(dct['_data'])
+ dct['_data'] = idct
+ dct = idct
+ else:
+ break
+ return d
+
+ @staticmethod
+ def transmit_datastore(d):
+ """Prepare a datastore object for sending over IPC from the client end"""
+ # FIXME content might be a dict, need to turn that into a list as well
+ def copy_dicts(dct):
+ if '_remote_data' in dct:
+ dsindex = dct['_remote_data']['_content'].dsindex
+ newdct = dct.copy()
+ newdct['_remote_data'] = {'_content': dsindex}
+ return list(newdct.items())
+ elif '_data' in dct:
+ newdct = dct.copy()
+ newdata = copy_dicts(dct['_data'])
+ if newdata:
+ newdct['_data'] = newdata
+ return list(newdct.items())
+ return None
+ main_dict = copy_dicts(d.dict)
+ return main_dict
diff --git a/poky/bitbake/lib/bb/runqueue.py b/poky/bitbake/lib/bb/runqueue.py
new file mode 100644
index 000000000..f2e52cf75
--- /dev/null
+++ b/poky/bitbake/lib/bb/runqueue.py
@@ -0,0 +1,2682 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'RunQueue' implementation
+
+Handles preparation and execution of a queue of tasks
+"""
+
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import copy
+import os
+import sys
+import signal
+import stat
+import fcntl
+import errno
+import logging
+import re
+import bb
+from bb import msg, data, event
+from bb import monitordisk
+import subprocess
+import pickle
+from multiprocessing import Process
+
+bblogger = logging.getLogger("BitBake")
+logger = logging.getLogger("BitBake.RunQueue")
+
+__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
+
+def fn_from_tid(tid):
+ return tid.rsplit(":", 1)[0]
+
+def taskname_from_tid(tid):
+ return tid.rsplit(":", 1)[1]
+
+def split_tid(tid):
+ (mc, fn, taskname, _) = split_tid_mcfn(tid)
+ return (mc, fn, taskname)
+
+def split_tid_mcfn(tid):
+ if tid.startswith('multiconfig:'):
+ elems = tid.split(':')
+ mc = elems[1]
+ fn = ":".join(elems[2:-1])
+ taskname = elems[-1]
+ mcfn = "multiconfig:" + mc + ":" + fn
+ else:
+ tid = tid.rsplit(":", 1)
+ mc = ""
+ fn = tid[0]
+ taskname = tid[1]
+ mcfn = fn
+
+ return (mc, fn, taskname, mcfn)
+
+def build_tid(mc, fn, taskname):
+ if mc:
+ return "multiconfig:" + mc + ":" + fn + ":" + taskname
+ return fn + ":" + taskname
+
+class RunQueueStats:
+ """
+ Holds statistics on the tasks handled by the associated runQueue
+ """
+ def __init__(self, total):
+ self.completed = 0
+ self.skipped = 0
+ self.failed = 0
+ self.active = 0
+ self.total = total
+
+ def copy(self):
+ obj = self.__class__(self.total)
+ obj.__dict__.update(self.__dict__)
+ return obj
+
+ def taskFailed(self):
+ self.active = self.active - 1
+ self.failed = self.failed + 1
+
+ def taskCompleted(self, number = 1):
+ self.active = self.active - number
+ self.completed = self.completed + number
+
+ def taskSkipped(self, number = 1):
+ self.active = self.active + number
+ self.skipped = self.skipped + number
+
+ def taskActive(self):
+ self.active = self.active + 1
+
+# These values indicate the next step due to be run in the
+# runQueue state machine
+runQueuePrepare = 2
+runQueueSceneInit = 3
+runQueueSceneRun = 4
+runQueueRunInit = 5
+runQueueRunning = 6
+runQueueFailed = 7
+runQueueCleanUp = 8
+runQueueComplete = 9
+
+class RunQueueScheduler(object):
+ """
+ Control the order tasks are scheduled in.
+ """
+ name = "basic"
+
+ def __init__(self, runqueue, rqdata):
+ """
+ The default scheduler just returns the first buildable task (the
+ priority map is sorted by task number)
+ """
+ self.rq = runqueue
+ self.rqdata = rqdata
+ self.numTasks = len(self.rqdata.runtaskentries)
+
+ self.prio_map = [self.rqdata.runtaskentries.keys()]
+
+ self.buildable = []
+ self.stamps = {}
+ for tid in self.rqdata.runtaskentries:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
+ if tid in self.rq.runq_buildable:
+ self.buildable.append(tid)
+
+ self.rev_prio_map = None
+
+ def next_buildable_task(self):
+ """
+ Return the id of the first task we find that is buildable
+ """
+ self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
+ if not self.buildable:
+ return None
+ if len(self.buildable) == 1:
+ tid = self.buildable[0]
+ stamp = self.stamps[tid]
+ if stamp not in self.rq.build_stamps.values():
+ return tid
+
+ if not self.rev_prio_map:
+ self.rev_prio_map = {}
+ for tid in self.rqdata.runtaskentries:
+ self.rev_prio_map[tid] = self.prio_map.index(tid)
+
+ best = None
+ bestprio = None
+ for tid in self.buildable:
+ prio = self.rev_prio_map[tid]
+ if bestprio is None or bestprio > prio:
+ stamp = self.stamps[tid]
+ if stamp in self.rq.build_stamps.values():
+ continue
+ bestprio = prio
+ best = tid
+
+ return best
+
+ def next(self):
+ """
+ Return the id of the task we should build next
+ """
+ if self.rq.stats.active < self.rq.number_tasks:
+ return self.next_buildable_task()
+
+ def newbuildable(self, task):
+ self.buildable.append(task)
+
+ def describe_task(self, taskid):
+ result = 'ID %s' % taskid
+ if self.rev_prio_map:
+ result = result + (' pri %d' % self.rev_prio_map[taskid])
+ return result
+
+ def dump_prio(self, comment):
+ bb.debug(3, '%s (most important first):\n%s' %
+ (comment,
+ '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
+ index, taskid in enumerate(self.prio_map)])))
+
+class RunQueueSchedulerSpeed(RunQueueScheduler):
+ """
+ A scheduler optimised for speed. The priority map is sorted by task weight,
+ heavier weighted tasks (tasks needed by the most other tasks) are run first.
+ """
+ name = "speed"
+
+ def __init__(self, runqueue, rqdata):
+ """
+ The priority map is sorted by task weight.
+ """
+ RunQueueScheduler.__init__(self, runqueue, rqdata)
+
+ weights = {}
+ for tid in self.rqdata.runtaskentries:
+ weight = self.rqdata.runtaskentries[tid].weight
+ if not weight in weights:
+ weights[weight] = []
+ weights[weight].append(tid)
+
+ self.prio_map = []
+ for weight in sorted(weights):
+ for w in weights[weight]:
+ self.prio_map.append(w)
+
+ self.prio_map.reverse()
+
+class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
+ """
+ A scheduler optimised to complete .bb files as quickly as possible. The
+ priority map is sorted by task weight, but then reordered so once a given
+ .bb file starts to build, it's completed as quickly as possible by
+ running all tasks related to the same .bb file one after the after.
+ This works well where disk space is at a premium and classes like OE's
+ rm_work are in force.
+ """
+ name = "completion"
+
+ def __init__(self, runqueue, rqdata):
+ super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
+
+ # Extract list of tasks for each recipe, with tasks sorted
+ # ascending from "must run first" (typically do_fetch) to
+ # "runs last" (do_build). The speed scheduler prioritizes
+ # tasks that must run first before the ones that run later;
+ # this is what we depend on here.
+ task_lists = {}
+ for taskid in self.prio_map:
+ fn, taskname = taskid.rsplit(':', 1)
+ task_lists.setdefault(fn, []).append(taskname)
+
+ # Now unify the different task lists. The strategy is that
+ # common tasks get skipped and new ones get inserted after the
+ # preceeding common one(s) as they are found. Because task
+ # lists should differ only by their number of tasks, but not
+ # the ordering of the common tasks, this should result in a
+ # deterministic result that is a superset of the individual
+ # task ordering.
+ all_tasks = []
+ for recipe, new_tasks in task_lists.items():
+ index = 0
+ old_task = all_tasks[index] if index < len(all_tasks) else None
+ for new_task in new_tasks:
+ if old_task == new_task:
+ # Common task, skip it. This is the fast-path which
+ # avoids a full search.
+ index += 1
+ old_task = all_tasks[index] if index < len(all_tasks) else None
+ else:
+ try:
+ index = all_tasks.index(new_task)
+ # Already present, just not at the current
+ # place. We re-synchronized by changing the
+ # index so that it matches again. Now
+ # move on to the next existing task.
+ index += 1
+ old_task = all_tasks[index] if index < len(all_tasks) else None
+ except ValueError:
+ # Not present. Insert before old_task, which
+ # remains the same (but gets shifted back).
+ all_tasks.insert(index, new_task)
+ index += 1
+ bb.debug(3, 'merged task list: %s' % all_tasks)
+
+ # Now reverse the order so that tasks that finish the work on one
+ # recipe are considered more imporant (= come first). The ordering
+ # is now so that do_build is most important.
+ all_tasks.reverse()
+
+ # Group tasks of the same kind before tasks of less important
+ # kinds at the head of the queue (because earlier = lower
+ # priority number = runs earlier), while preserving the
+ # ordering by recipe. If recipe foo is more important than
+ # bar, then the goal is to work on foo's do_populate_sysroot
+ # before bar's do_populate_sysroot and on the more important
+ # tasks of foo before any of the less important tasks in any
+ # other recipe (if those other recipes are more important than
+ # foo).
+ #
+ # All of this only applies when tasks are runable. Explicit
+ # dependencies still override this ordering by priority.
+ #
+ # Here's an example why this priority re-ordering helps with
+ # minimizing disk usage. Consider a recipe foo with a higher
+ # priority than bar where foo DEPENDS on bar. Then the
+ # implicit rule (from base.bbclass) is that foo's do_configure
+ # depends on bar's do_populate_sysroot. This ensures that
+ # bar's do_populate_sysroot gets done first. Normally the
+ # tasks from foo would continue to run once that is done, and
+ # bar only gets completed and cleaned up later. By ordering
+ # bar's task that depend on bar's do_populate_sysroot before foo's
+ # do_configure, that problem gets avoided.
+ task_index = 0
+ self.dump_prio('original priorities')
+ for task in all_tasks:
+ for index in range(task_index, self.numTasks):
+ taskid = self.prio_map[index]
+ taskname = taskid.rsplit(':', 1)[1]
+ if taskname == task:
+ del self.prio_map[index]
+ self.prio_map.insert(task_index, taskid)
+ task_index += 1
+ self.dump_prio('completion priorities')
+
+class RunTaskEntry(object):
+ def __init__(self):
+ self.depends = set()
+ self.revdeps = set()
+ self.hash = None
+ self.task = None
+ self.weight = 1
+
+class RunQueueData:
+ """
+ BitBake Run Queue implementation
+ """
+ def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
+ self.cooker = cooker
+ self.dataCaches = dataCaches
+ self.taskData = taskData
+ self.targets = targets
+ self.rq = rq
+ self.warn_multi_bb = False
+
+ self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
+ self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
+ self.setscenewhitelist_checked = False
+ self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
+ self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
+
+ self.reset()
+
+ def reset(self):
+ self.runtaskentries = {}
+
+ def runq_depends_names(self, ids):
+ import re
+ ret = []
+ for id in ids:
+ nam = os.path.basename(id)
+ nam = re.sub("_[^,]*,", ",", nam)
+ ret.extend([nam])
+ return ret
+
+ def get_task_hash(self, tid):
+ return self.runtaskentries[tid].hash
+
+ def get_user_idstring(self, tid, task_name_suffix = ""):
+ return tid + task_name_suffix
+
+ def get_short_user_idstring(self, task, task_name_suffix = ""):
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
+ pn = self.dataCaches[mc].pkg_fn[taskfn]
+ taskname = taskname_from_tid(task) + task_name_suffix
+ return "%s:%s" % (pn, taskname)
+
+ def circular_depchains_handler(self, tasks):
+ """
+ Some tasks aren't buildable, likely due to circular dependency issues.
+ Identify the circular dependencies and print them in a user readable format.
+ """
+ from copy import deepcopy
+
+ valid_chains = []
+ explored_deps = {}
+ msgs = []
+
+ def chain_reorder(chain):
+ """
+ Reorder a dependency chain so the lowest task id is first
+ """
+ lowest = 0
+ new_chain = []
+ for entry in range(len(chain)):
+ if chain[entry] < chain[lowest]:
+ lowest = entry
+ new_chain.extend(chain[lowest:])
+ new_chain.extend(chain[:lowest])
+ return new_chain
+
+ def chain_compare_equal(chain1, chain2):
+ """
+ Compare two dependency chains and see if they're the same
+ """
+ if len(chain1) != len(chain2):
+ return False
+ for index in range(len(chain1)):
+ if chain1[index] != chain2[index]:
+ return False
+ return True
+
+ def chain_array_contains(chain, chain_array):
+ """
+ Return True if chain_array contains chain
+ """
+ for ch in chain_array:
+ if chain_compare_equal(ch, chain):
+ return True
+ return False
+
+ def find_chains(tid, prev_chain):
+ prev_chain.append(tid)
+ total_deps = []
+ total_deps.extend(self.runtaskentries[tid].revdeps)
+ for revdep in self.runtaskentries[tid].revdeps:
+ if revdep in prev_chain:
+ idx = prev_chain.index(revdep)
+ # To prevent duplicates, reorder the chain to start with the lowest taskid
+ # and search through an array of those we've already printed
+ chain = prev_chain[idx:]
+ new_chain = chain_reorder(chain)
+ if not chain_array_contains(new_chain, valid_chains):
+ valid_chains.append(new_chain)
+ msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
+ for dep in new_chain:
+ msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
+ msgs.append("\n")
+ if len(valid_chains) > 10:
+ msgs.append("Aborted dependency loops search after 10 matches.\n")
+ return msgs
+ continue
+ scan = False
+ if revdep not in explored_deps:
+ scan = True
+ elif revdep in explored_deps[revdep]:
+ scan = True
+ else:
+ for dep in prev_chain:
+ if dep in explored_deps[revdep]:
+ scan = True
+ if scan:
+ find_chains(revdep, copy.deepcopy(prev_chain))
+ for dep in explored_deps[revdep]:
+ if dep not in total_deps:
+ total_deps.append(dep)
+
+ explored_deps[tid] = total_deps
+
+ for task in tasks:
+ find_chains(task, [])
+
+ return msgs
+
+ def calculate_task_weights(self, endpoints):
+ """
+ Calculate a number representing the "weight" of each task. Heavier weighted tasks
+ have more dependencies and hence should be executed sooner for maximum speed.
+
+ This function also sanity checks the task list finding tasks that are not
+ possible to execute due to circular dependencies.
+ """
+
+ numTasks = len(self.runtaskentries)
+ weight = {}
+ deps_left = {}
+ task_done = {}
+
+ for tid in self.runtaskentries:
+ task_done[tid] = False
+ weight[tid] = 1
+ deps_left[tid] = len(self.runtaskentries[tid].revdeps)
+
+ for tid in endpoints:
+ weight[tid] = 10
+ task_done[tid] = True
+
+ while True:
+ next_points = []
+ for tid in endpoints:
+ for revdep in self.runtaskentries[tid].depends:
+ weight[revdep] = weight[revdep] + weight[tid]
+ deps_left[revdep] = deps_left[revdep] - 1
+ if deps_left[revdep] == 0:
+ next_points.append(revdep)
+ task_done[revdep] = True
+ endpoints = next_points
+ if len(next_points) == 0:
+ break
+
+ # Circular dependency sanity check
+ problem_tasks = []
+ for tid in self.runtaskentries:
+ if task_done[tid] is False or deps_left[tid] != 0:
+ problem_tasks.append(tid)
+ logger.debug(2, "Task %s is not buildable", tid)
+ logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
+ self.runtaskentries[tid].weight = weight[tid]
+
+ if problem_tasks:
+ message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
+ message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
+ message = message + "Identifying dependency loops (this may take a short while)...\n"
+ logger.error(message)
+
+ msgs = self.circular_depchains_handler(problem_tasks)
+
+ message = "\n"
+ for msg in msgs:
+ message = message + msg
+ bb.msg.fatal("RunQueue", message)
+
+ return weight
+
+ def prepare(self):
+ """
+ Turn a set of taskData into a RunQueue and compute data needed
+ to optimise the execution order.
+ """
+
+ runq_build = {}
+ recursivetasks = {}
+ recursiveitasks = {}
+ recursivetasksselfref = set()
+
+ taskData = self.taskData
+
+ found = False
+ for mc in self.taskData:
+ if len(taskData[mc].taskentries) > 0:
+ found = True
+ break
+ if not found:
+ # Nothing to do
+ return 0
+
+ self.init_progress_reporter.start()
+ self.init_progress_reporter.next_stage()
+
+ # Step A - Work out a list of tasks to run
+ #
+ # Taskdata gives us a list of possible providers for every build and run
+ # target ordered by priority. It also gives information on each of those
+ # providers.
+ #
+ # To create the actual list of tasks to execute we fix the list of
+ # providers and then resolve the dependencies into task IDs. This
+ # process is repeated for each type of dependency (tdepends, deptask,
+ # rdeptast, recrdeptask, idepends).
+
+ def add_build_dependencies(depids, tasknames, depends, mc):
+ for depname in depids:
+ # Won't be in build_targets if ASSUME_PROVIDED
+ if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
+ continue
+ depdata = taskData[mc].build_targets[depname][0]
+ if depdata is None:
+ continue
+ for taskname in tasknames:
+ t = depdata + ":" + taskname
+ if t in taskData[mc].taskentries:
+ depends.add(t)
+
+ def add_runtime_dependencies(depids, tasknames, depends, mc):
+ for depname in depids:
+ if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
+ continue
+ depdata = taskData[mc].run_targets[depname][0]
+ if depdata is None:
+ continue
+ for taskname in tasknames:
+ t = depdata + ":" + taskname
+ if t in taskData[mc].taskentries:
+ depends.add(t)
+
+ for mc in taskData:
+ for tid in taskData[mc].taskentries:
+
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ #runtid = build_tid(mc, fn, taskname)
+
+ #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
+
+ depends = set()
+ task_deps = self.dataCaches[mc].task_deps[taskfn]
+
+ self.runtaskentries[tid] = RunTaskEntry()
+
+ if fn in taskData[mc].failed_fns:
+ continue
+
+ # Resolve task internal dependencies
+ #
+ # e.g. addtask before X after Y
+ for t in taskData[mc].taskentries[tid].tdepends:
+ (_, depfn, deptaskname, _) = split_tid_mcfn(t)
+ depends.add(build_tid(mc, depfn, deptaskname))
+
+ # Resolve 'deptask' dependencies
+ #
+ # e.g. do_sometask[deptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all DEPENDS)
+ if 'deptask' in task_deps and taskname in task_deps['deptask']:
+ tasknames = task_deps['deptask'][taskname].split()
+ add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
+
+ # Resolve 'rdeptask' dependencies
+ #
+ # e.g. do_sometask[rdeptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all RDEPENDS)
+ if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
+ tasknames = task_deps['rdeptask'][taskname].split()
+ add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
+
+ # Resolve inter-task dependencies
+ #
+ # e.g. do_sometask[depends] = "targetname:do_someothertask"
+ # (makes sure sometask runs after targetname's someothertask)
+ idepends = taskData[mc].taskentries[tid].idepends
+ for (depname, idependtask) in idepends:
+ if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
+ # Won't be in build_targets if ASSUME_PROVIDED
+ depdata = taskData[mc].build_targets[depname][0]
+ if depdata is not None:
+ t = depdata + ":" + idependtask
+ depends.add(t)
+ if t not in taskData[mc].taskentries:
+ bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
+ irdepends = taskData[mc].taskentries[tid].irdepends
+ for (depname, idependtask) in irdepends:
+ if depname in taskData[mc].run_targets:
+ # Won't be in run_targets if ASSUME_PROVIDED
+ if not taskData[mc].run_targets[depname]:
+ continue
+ depdata = taskData[mc].run_targets[depname][0]
+ if depdata is not None:
+ t = depdata + ":" + idependtask
+ depends.add(t)
+ if t not in taskData[mc].taskentries:
+ bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
+
+ # Resolve recursive 'recrdeptask' dependencies (Part A)
+ #
+ # e.g. do_sometask[recrdeptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
+ # We cover the recursive part of the dependencies below
+ if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
+ tasknames = task_deps['recrdeptask'][taskname].split()
+ recursivetasks[tid] = tasknames
+ add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
+ add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
+ if taskname in tasknames:
+ recursivetasksselfref.add(tid)
+
+ if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
+ recursiveitasks[tid] = []
+ for t in task_deps['recideptask'][taskname].split():
+ newdep = build_tid(mc, fn, t)
+ recursiveitasks[tid].append(newdep)
+
+ self.runtaskentries[tid].depends = depends
+ # Remove all self references
+ self.runtaskentries[tid].depends.discard(tid)
+
+ #self.dump_data()
+
+ self.init_progress_reporter.next_stage()
+
+ # Resolve recursive 'recrdeptask' dependencies (Part B)
+ #
+ # e.g. do_sometask[recrdeptask] = "do_someothertask"
+ # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
+ # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
+
+ # Generating/interating recursive lists of dependencies is painful and potentially slow
+ # Precompute recursive task dependencies here by:
+ # a) create a temp list of reverse dependencies (revdeps)
+ # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
+ # c) combine the total list of dependencies in cumulativedeps
+ # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
+
+
+ revdeps = {}
+ deps = {}
+ cumulativedeps = {}
+ for tid in self.runtaskentries:
+ deps[tid] = set(self.runtaskentries[tid].depends)
+ revdeps[tid] = set()
+ cumulativedeps[tid] = set()
+ # Generate a temp list of reverse dependencies
+ for tid in self.runtaskentries:
+ for dep in self.runtaskentries[tid].depends:
+ revdeps[dep].add(tid)
+ # Find the dependency chain endpoints
+ endpoints = set()
+ for tid in self.runtaskentries:
+ if len(deps[tid]) == 0:
+ endpoints.add(tid)
+ # Iterate the chains collating dependencies
+ while endpoints:
+ next = set()
+ for tid in endpoints:
+ for dep in revdeps[tid]:
+ cumulativedeps[dep].add(fn_from_tid(tid))
+ cumulativedeps[dep].update(cumulativedeps[tid])
+ if tid in deps[dep]:
+ deps[dep].remove(tid)
+ if len(deps[dep]) == 0:
+ next.add(dep)
+ endpoints = next
+ #for tid in deps:
+ # if len(deps[tid]) != 0:
+ # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
+
+ # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
+ # resolve these recursively until we aren't adding any further extra dependencies
+ extradeps = True
+ while extradeps:
+ extradeps = 0
+ for tid in recursivetasks:
+ tasknames = recursivetasks[tid]
+
+ totaldeps = set(self.runtaskentries[tid].depends)
+ if tid in recursiveitasks:
+ totaldeps.update(recursiveitasks[tid])
+ for dep in recursiveitasks[tid]:
+ if dep not in self.runtaskentries:
+ continue
+ totaldeps.update(self.runtaskentries[dep].depends)
+
+ deps = set()
+ for dep in totaldeps:
+ if dep in cumulativedeps:
+ deps.update(cumulativedeps[dep])
+
+ for t in deps:
+ for taskname in tasknames:
+ newtid = t + ":" + taskname
+ if newtid == tid:
+ continue
+ if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
+ extradeps += 1
+ self.runtaskentries[tid].depends.add(newtid)
+
+ # Handle recursive tasks which depend upon other recursive tasks
+ deps = set()
+ for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
+ deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
+ for newtid in deps:
+ for taskname in tasknames:
+ if not newtid.endswith(":" + taskname):
+ continue
+ if newtid in self.runtaskentries:
+ extradeps += 1
+ self.runtaskentries[tid].depends.add(newtid)
+
+ bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
+
+ # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
+ for tid in recursivetasksselfref:
+ self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
+
+ self.init_progress_reporter.next_stage()
+
+ #self.dump_data()
+
+ # Step B - Mark all active tasks
+ #
+ # Start with the tasks we were asked to run and mark all dependencies
+ # as active too. If the task is to be 'forced', clear its stamp. Once
+ # all active tasks are marked, prune the ones we don't need.
+
+ logger.verbose("Marking Active Tasks")
+
+ def mark_active(tid, depth):
+ """
+ Mark an item as active along with its depends
+ (calls itself recursively)
+ """
+
+ if tid in runq_build:
+ return
+
+ runq_build[tid] = 1
+
+ depends = self.runtaskentries[tid].depends
+ for depend in depends:
+ mark_active(depend, depth+1)
+
+ self.target_tids = []
+ for (mc, target, task, fn) in self.targets:
+
+ if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
+ continue
+
+ if target in taskData[mc].failed_deps:
+ continue
+
+ parents = False
+ if task.endswith('-'):
+ parents = True
+ task = task[:-1]
+
+ if fn in taskData[mc].failed_fns:
+ continue
+
+ # fn already has mc prefix
+ tid = fn + ":" + task
+ self.target_tids.append(tid)
+ if tid not in taskData[mc].taskentries:
+ import difflib
+ tasks = []
+ for x in taskData[mc].taskentries:
+ if x.startswith(fn + ":"):
+ tasks.append(taskname_from_tid(x))
+ close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
+ if close_matches:
+ extra = ". Close matches:\n %s" % "\n ".join(close_matches)
+ else:
+ extra = ""
+ bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
+
+ # For tasks called "XXXX-", ony run their dependencies
+ if parents:
+ for i in self.runtaskentries[tid].depends:
+ mark_active(i, 1)
+ else:
+ mark_active(tid, 1)
+
+ self.init_progress_reporter.next_stage()
+
+ # Step C - Prune all inactive tasks
+ #
+ # Once all active tasks are marked, prune the ones we don't need.
+
+ delcount = {}
+ for tid in list(self.runtaskentries.keys()):
+ if tid not in runq_build:
+ delcount[tid] = self.runtaskentries[tid]
+ del self.runtaskentries[tid]
+
+ # Handle --runall
+ if self.cooker.configuration.runall:
+ # re-run the mark_active and then drop unused tasks from new list
+ runq_build = {}
+
+ for task in self.cooker.configuration.runall:
+ runall_tids = set()
+ for tid in list(self.runtaskentries):
+ wanttid = fn_from_tid(tid) + ":do_%s" % task
+ if wanttid in delcount:
+ self.runtaskentries[wanttid] = delcount[wanttid]
+ if wanttid in self.runtaskentries:
+ runall_tids.add(wanttid)
+
+ for tid in list(runall_tids):
+ mark_active(tid,1)
+
+ for tid in list(self.runtaskentries.keys()):
+ if tid not in runq_build:
+ delcount[tid] = self.runtaskentries[tid]
+ del self.runtaskentries[tid]
+
+ if len(self.runtaskentries) == 0:
+ bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
+
+ self.init_progress_reporter.next_stage()
+
+ # Handle runonly
+ if self.cooker.configuration.runonly:
+ # re-run the mark_active and then drop unused tasks from new list
+ runq_build = {}
+
+ for task in self.cooker.configuration.runonly:
+ runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
+
+ for tid in list(runonly_tids):
+ mark_active(tid,1)
+
+ for tid in list(self.runtaskentries.keys()):
+ if tid not in runq_build:
+ delcount[tid] = self.runtaskentries[tid]
+ del self.runtaskentries[tid]
+
+ if len(self.runtaskentries) == 0:
+ bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
+
+ #
+ # Step D - Sanity checks and computation
+ #
+
+ # Check to make sure we still have tasks to run
+ if len(self.runtaskentries) == 0:
+ if not taskData[''].abort:
+ bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
+ else:
+ bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
+
+ logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
+
+ logger.verbose("Assign Weightings")
+
+ self.init_progress_reporter.next_stage()
+
+ # Generate a list of reverse dependencies to ease future calculations
+ for tid in self.runtaskentries:
+ for dep in self.runtaskentries[tid].depends:
+ self.runtaskentries[dep].revdeps.add(tid)
+
+ self.init_progress_reporter.next_stage()
+
+ # Identify tasks at the end of dependency chains
+ # Error on circular dependency loops (length two)
+ endpoints = []
+ for tid in self.runtaskentries:
+ revdeps = self.runtaskentries[tid].revdeps
+ if len(revdeps) == 0:
+ endpoints.append(tid)
+ for dep in revdeps:
+ if dep in self.runtaskentries[tid].depends:
+ bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
+
+
+ logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
+
+ self.init_progress_reporter.next_stage()
+
+ # Calculate task weights
+ # Check of higher length circular dependencies
+ self.runq_weight = self.calculate_task_weights(endpoints)
+
+ self.init_progress_reporter.next_stage()
+
+ # Sanity Check - Check for multiple tasks building the same provider
+ for mc in self.dataCaches:
+ prov_list = {}
+ seen_fn = []
+ for tid in self.runtaskentries:
+ (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ if taskfn in seen_fn:
+ continue
+ if mc != tidmc:
+ continue
+ seen_fn.append(taskfn)
+ for prov in self.dataCaches[mc].fn_provides[taskfn]:
+ if prov not in prov_list:
+ prov_list[prov] = [taskfn]
+ elif taskfn not in prov_list[prov]:
+ prov_list[prov].append(taskfn)
+ for prov in prov_list:
+ if len(prov_list[prov]) < 2:
+ continue
+ if prov in self.multi_provider_whitelist:
+ continue
+ seen_pn = []
+ # If two versions of the same PN are being built its fatal, we don't support it.
+ for fn in prov_list[prov]:
+ pn = self.dataCaches[mc].pkg_fn[fn]
+ if pn not in seen_pn:
+ seen_pn.append(pn)
+ else:
+ bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
+ msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
+ #
+ # Construct a list of things which uniquely depend on each provider
+ # since this may help the user figure out which dependency is triggering this warning
+ #
+ msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
+ deplist = {}
+ commondeps = None
+ for provfn in prov_list[prov]:
+ deps = set()
+ for tid in self.runtaskentries:
+ fn = fn_from_tid(tid)
+ if fn != provfn:
+ continue
+ for dep in self.runtaskentries[tid].revdeps:
+ fn = fn_from_tid(dep)
+ if fn == provfn:
+ continue
+ deps.add(dep)
+ if not commondeps:
+ commondeps = set(deps)
+ else:
+ commondeps &= deps
+ deplist[provfn] = deps
+ for provfn in deplist:
+ msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
+ #
+ # Construct a list of provides and runtime providers for each recipe
+ # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
+ #
+ msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
+ provide_results = {}
+ rprovide_results = {}
+ commonprovs = None
+ commonrprovs = None
+ for provfn in prov_list[prov]:
+ provides = set(self.dataCaches[mc].fn_provides[provfn])
+ rprovides = set()
+ for rprovide in self.dataCaches[mc].rproviders:
+ if provfn in self.dataCaches[mc].rproviders[rprovide]:
+ rprovides.add(rprovide)
+ for package in self.dataCaches[mc].packages:
+ if provfn in self.dataCaches[mc].packages[package]:
+ rprovides.add(package)
+ for package in self.dataCaches[mc].packages_dynamic:
+ if provfn in self.dataCaches[mc].packages_dynamic[package]:
+ rprovides.add(package)
+ if not commonprovs:
+ commonprovs = set(provides)
+ else:
+ commonprovs &= provides
+ provide_results[provfn] = provides
+ if not commonrprovs:
+ commonrprovs = set(rprovides)
+ else:
+ commonrprovs &= rprovides
+ rprovide_results[provfn] = rprovides
+ #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
+ #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
+ for provfn in prov_list[prov]:
+ msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
+ msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
+
+ if self.warn_multi_bb:
+ logger.warning(msg)
+ else:
+ logger.error(msg)
+
+ self.init_progress_reporter.next_stage()
+
+ # Create a whitelist usable by the stamp checks
+ self.stampfnwhitelist = {}
+ for mc in self.taskData:
+ self.stampfnwhitelist[mc] = []
+ for entry in self.stampwhitelist.split():
+ if entry not in self.taskData[mc].build_targets:
+ continue
+ fn = self.taskData.build_targets[entry][0]
+ self.stampfnwhitelist[mc].append(fn)
+
+ self.init_progress_reporter.next_stage()
+
+ # Iterate over the task list looking for tasks with a 'setscene' function
+ self.runq_setscene_tids = []
+ if not self.cooker.configuration.nosetscene:
+ for tid in self.runtaskentries:
+ (mc, fn, taskname, _) = split_tid_mcfn(tid)
+ setscenetid = tid + "_setscene"
+ if setscenetid not in taskData[mc].taskentries:
+ continue
+ self.runq_setscene_tids.append(tid)
+
+ def invalidate_task(tid, error_nostamp):
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ taskdep = self.dataCaches[mc].task_deps[taskfn]
+ if fn + ":" + taskname not in taskData[mc].taskentries:
+ logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
+ if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
+ if error_nostamp:
+ bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
+ else:
+ bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
+ else:
+ logger.verbose("Invalidate task %s, %s", taskname, fn)
+ bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
+
+ self.init_progress_reporter.next_stage()
+
+ # Invalidate task if force mode active
+ if self.cooker.configuration.force:
+ for tid in self.target_tids:
+ invalidate_task(tid, False)
+
+ # Invalidate task if invalidate mode active
+ if self.cooker.configuration.invalidate_stamp:
+ for tid in self.target_tids:
+ fn = fn_from_tid(tid)
+ for st in self.cooker.configuration.invalidate_stamp.split(','):
+ if not st.startswith("do_"):
+ st = "do_%s" % st
+ invalidate_task(fn + ":" + st, True)
+
+ self.init_progress_reporter.next_stage()
+
+ # Create and print to the logs a virtual/xxxx -> PN (fn) table
+ for mc in taskData:
+ virtmap = taskData[mc].get_providermap(prefix="virtual/")
+ virtpnmap = {}
+ for v in virtmap:
+ virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
+ bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
+ if hasattr(bb.parse.siggen, "tasks_resolved"):
+ bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
+
+ self.init_progress_reporter.next_stage()
+
+ # Iterate over the task list and call into the siggen code
+ dealtwith = set()
+ todeal = set(self.runtaskentries)
+ while len(todeal) > 0:
+ for tid in todeal.copy():
+ if len(self.runtaskentries[tid].depends - dealtwith) == 0:
+ dealtwith.add(tid)
+ todeal.remove(tid)
+ procdep = []
+ for dep in self.runtaskentries[tid].depends:
+ procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
+ task = self.runtaskentries[tid].task
+
+ bb.parse.siggen.writeout_file_checksum_cache()
+
+ #self.dump_data()
+ return len(self.runtaskentries)
+
+ def dump_data(self):
+ """
+ Dump some debug information on the internal data structures
+ """
+ logger.debug(3, "run_tasks:")
+ for tid in self.runtaskentries:
+ logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
+ self.runtaskentries[tid].weight,
+ self.runtaskentries[tid].depends,
+ self.runtaskentries[tid].revdeps)
+
+class RunQueueWorker():
+ def __init__(self, process, pipe):
+ self.process = process
+ self.pipe = pipe
+
+class RunQueue:
+ def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
+
+ self.cooker = cooker
+ self.cfgData = cfgData
+ self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
+
+ self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
+ self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
+ self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
+ self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
+
+ self.state = runQueuePrepare
+
+ # For disk space monitor
+ # Invoked at regular time intervals via the bitbake heartbeat event
+ # while the build is running. We generate a unique name for the handler
+ # here, just in case that there ever is more than one RunQueue instance,
+ # start the handler when reaching runQueueSceneRun, and stop it when
+ # done with the build.
+ self.dm = monitordisk.diskMonitor(cfgData)
+ self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
+ self.dm_event_handler_registered = False
+ self.rqexe = None
+ self.worker = {}
+ self.fakeworker = {}
+
+ def _start_worker(self, mc, fakeroot = False, rqexec = None):
+ logger.debug(1, "Starting bitbake-worker")
+ magic = "decafbad"
+ if self.cooker.configuration.profile:
+ magic = "decafbadbad"
+ if fakeroot:
+ magic = magic + "beef"
+ mcdata = self.cooker.databuilder.mcdata[mc]
+ fakerootcmd = mcdata.getVar("FAKEROOTCMD")
+ fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
+ env = os.environ.copy()
+ for key, value in (var.split('=') for var in fakerootenv):
+ env[key] = value
+ worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
+ else:
+ worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ bb.utils.nonblockingfd(worker.stdout)
+ workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
+
+ runqhash = {}
+ for tid in self.rqdata.runtaskentries:
+ runqhash[tid] = self.rqdata.runtaskentries[tid].hash
+
+ workerdata = {
+ "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
+ "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
+ "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
+ "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
+ "sigdata" : bb.parse.siggen.get_taskdata(),
+ "runq_hash" : runqhash,
+ "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
+ "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
+ "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
+ "logdefaultdomain" : bb.msg.loggerDefaultDomains,
+ "prhost" : self.cooker.prhost,
+ "buildname" : self.cfgData.getVar("BUILDNAME"),
+ "date" : self.cfgData.getVar("DATE"),
+ "time" : self.cfgData.getVar("TIME"),
+ }
+
+ worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
+ worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
+ worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
+ worker.stdin.flush()
+
+ return RunQueueWorker(worker, workerpipe)
+
+ def _teardown_worker(self, worker):
+ if not worker:
+ return
+ logger.debug(1, "Teardown for bitbake-worker")
+ try:
+ worker.process.stdin.write(b"<quit></quit>")
+ worker.process.stdin.flush()
+ worker.process.stdin.close()
+ except IOError:
+ pass
+ while worker.process.returncode is None:
+ worker.pipe.read()
+ worker.process.poll()
+ while worker.pipe.read():
+ continue
+ worker.pipe.close()
+
+ def start_worker(self):
+ if self.worker:
+ self.teardown_workers()
+ self.teardown = False
+ for mc in self.rqdata.dataCaches:
+ self.worker[mc] = self._start_worker(mc)
+
+ def start_fakeworker(self, rqexec, mc):
+ if not mc in self.fakeworker:
+ self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
+
+ def teardown_workers(self):
+ self.teardown = True
+ for mc in self.worker:
+ self._teardown_worker(self.worker[mc])
+ self.worker = {}
+ for mc in self.fakeworker:
+ self._teardown_worker(self.fakeworker[mc])
+ self.fakeworker = {}
+
+ def read_workers(self):
+ for mc in self.worker:
+ self.worker[mc].pipe.read()
+ for mc in self.fakeworker:
+ self.fakeworker[mc].pipe.read()
+
+ def active_fds(self):
+ fds = []
+ for mc in self.worker:
+ fds.append(self.worker[mc].pipe.input)
+ for mc in self.fakeworker:
+ fds.append(self.fakeworker[mc].pipe.input)
+ return fds
+
+ def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
+ def get_timestamp(f):
+ try:
+ if not os.access(f, os.F_OK):
+ return None
+ return os.stat(f)[stat.ST_MTIME]
+ except:
+ return None
+
+ (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
+ if taskname is None:
+ taskname = tn
+
+ if self.stamppolicy == "perfile":
+ fulldeptree = False
+ else:
+ fulldeptree = True
+ stampwhitelist = []
+ if self.stamppolicy == "whitelist":
+ stampwhitelist = self.rqdata.stampfnwhitelist[mc]
+
+ stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
+
+ # If the stamp is missing, it's not current
+ if not os.access(stampfile, os.F_OK):
+ logger.debug(2, "Stampfile %s not available", stampfile)
+ return False
+ # If it's a 'nostamp' task, it's not current
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
+ logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
+ return False
+
+ if taskname != "do_setscene" and taskname.endswith("_setscene"):
+ return True
+
+ if cache is None:
+ cache = {}
+
+ iscurrent = True
+ t1 = get_timestamp(stampfile)
+ for dep in self.rqdata.runtaskentries[tid].depends:
+ if iscurrent:
+ (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
+ stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
+ stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
+ t2 = get_timestamp(stampfile2)
+ t3 = get_timestamp(stampfile3)
+ if t3 and not t2:
+ continue
+ if t3 and t3 > t2:
+ continue
+ if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
+ if not t2:
+ logger.debug(2, 'Stampfile %s does not exist', stampfile2)
+ iscurrent = False
+ break
+ if t1 < t2:
+ logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
+ iscurrent = False
+ break
+ if recurse and iscurrent:
+ if dep in cache:
+ iscurrent = cache[dep]
+ if not iscurrent:
+ logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
+ else:
+ iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
+ cache[dep] = iscurrent
+ if recurse:
+ cache[tid] = iscurrent
+ return iscurrent
+
+ def _execute_runqueue(self):
+ """
+ Run the tasks in a queue prepared by rqdata.prepare()
+ Upon failure, optionally try to recover the build using any alternate providers
+ (if the abort on failure configuration option isn't set)
+ """
+
+ retval = True
+
+ if self.state is runQueuePrepare:
+ self.rqexe = RunQueueExecuteDummy(self)
+ # NOTE: if you add, remove or significantly refactor the stages of this
+ # process then you should recalculate the weightings here. This is quite
+ # easy to do - just change the next line temporarily to pass debug=True as
+ # the last parameter and you'll get a printout of the weightings as well
+ # as a map to the lines where next_stage() was called. Of course this isn't
+ # critical, but it helps to keep the progress reporting accurate.
+ self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
+ "Initialising tasks",
+ [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
+ if self.rqdata.prepare() == 0:
+ self.state = runQueueComplete
+ else:
+ self.state = runQueueSceneInit
+ self.rqdata.init_progress_reporter.next_stage()
+
+ # we are ready to run, emit dependency info to any UI or class which
+ # needs it
+ depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
+ self.rqdata.init_progress_reporter.next_stage()
+ bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
+
+ if self.state is runQueueSceneInit:
+ dump = self.cooker.configuration.dump_signatures
+ if dump:
+ self.rqdata.init_progress_reporter.finish()
+ if 'printdiff' in dump:
+ invalidtasks = self.print_diffscenetasks()
+ self.dump_signatures(dump)
+ if 'printdiff' in dump:
+ self.write_diffscenetasks(invalidtasks)
+ self.state = runQueueComplete
+ else:
+ self.rqdata.init_progress_reporter.next_stage()
+ self.start_worker()
+ self.rqdata.init_progress_reporter.next_stage()
+ self.rqexe = RunQueueExecuteScenequeue(self)
+
+ if self.state is runQueueSceneRun:
+ if not self.dm_event_handler_registered:
+ res = bb.event.register(self.dm_event_handler_name,
+ lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
+ ('bb.event.HeartbeatEvent',))
+ self.dm_event_handler_registered = True
+ retval = self.rqexe.execute()
+
+ if self.state is runQueueRunInit:
+ if self.cooker.configuration.setsceneonly:
+ self.state = runQueueComplete
+ else:
+ # Just in case we didn't setscene
+ self.rqdata.init_progress_reporter.finish()
+ logger.info("Executing RunQueue Tasks")
+ self.rqexe = RunQueueExecuteTasks(self)
+ self.state = runQueueRunning
+
+ if self.state is runQueueRunning:
+ retval = self.rqexe.execute()
+
+ if self.state is runQueueCleanUp:
+ retval = self.rqexe.finish()
+
+ build_done = self.state is runQueueComplete or self.state is runQueueFailed
+
+ if build_done and self.dm_event_handler_registered:
+ bb.event.remove(self.dm_event_handler_name, None)
+ self.dm_event_handler_registered = False
+
+ if build_done and self.rqexe:
+ self.teardown_workers()
+ if self.rqexe.stats.failed:
+ logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
+ else:
+ # Let's avoid the word "failed" if nothing actually did
+ logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
+
+ if self.state is runQueueFailed:
+ raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
+
+ if self.state is runQueueComplete:
+ # All done
+ return False
+
+ # Loop
+ return retval
+
+ def execute_runqueue(self):
+ # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
+ try:
+ return self._execute_runqueue()
+ except bb.runqueue.TaskFailure:
+ raise
+ except SystemExit:
+ raise
+ except bb.BBHandledException:
+ try:
+ self.teardown_workers()
+ except:
+ pass
+ self.state = runQueueComplete
+ raise
+ except Exception as err:
+ logger.exception("An uncaught exception occurred in runqueue")
+ try:
+ self.teardown_workers()
+ except:
+ pass
+ self.state = runQueueComplete
+ raise
+
+ def finish_runqueue(self, now = False):
+ if not self.rqexe:
+ self.state = runQueueComplete
+ return
+
+ if now:
+ self.rqexe.finish_now()
+ else:
+ self.rqexe.finish()
+
+ def rq_dump_sigfn(self, fn, options):
+ bb_cache = bb.cache.NoCache(self.cooker.databuilder)
+ the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
+ siggen = bb.parse.siggen
+ dataCaches = self.rqdata.dataCaches
+ siggen.dump_sigfn(fn, dataCaches, options)
+
+ def dump_signatures(self, options):
+ fns = set()
+ bb.note("Reparsing files to collect dependency data")
+
+ for tid in self.rqdata.runtaskentries:
+ fn = fn_from_tid(tid)
+ fns.add(fn)
+
+ max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
+ # We cannot use the real multiprocessing.Pool easily due to some local data
+ # that can't be pickled. This is a cheap multi-process solution.
+ launched = []
+ while fns:
+ if len(launched) < max_process:
+ p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ launched.remove(q)
+ for p in launched:
+ p.join()
+
+ bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
+
+ return
+
+ def print_diffscenetasks(self):
+
+ valid = []
+ sq_hash = []
+ sq_hashfn = []
+ sq_fn = []
+ sq_taskname = []
+ sq_task = []
+ noexec = []
+ stamppresent = []
+ valid_new = set()
+
+ for tid in self.rqdata.runtaskentries:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ noexec.append(tid)
+ continue
+
+ sq_fn.append(fn)
+ sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
+ sq_hash.append(self.rqdata.runtaskentries[tid].hash)
+ sq_taskname.append(taskname)
+ sq_task.append(tid)
+ locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
+ try:
+ call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
+ valid = bb.utils.better_eval(call, locs)
+ # Handle version with no siginfo parameter
+ except TypeError:
+ call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
+ valid = bb.utils.better_eval(call, locs)
+ for v in valid:
+ valid_new.add(sq_task[v])
+
+ # Tasks which are both setscene and noexec never care about dependencies
+ # We therefore find tasks which are setscene and noexec and mark their
+ # unique dependencies as valid.
+ for tid in noexec:
+ if tid not in self.rqdata.runq_setscene_tids:
+ continue
+ for dep in self.rqdata.runtaskentries[tid].depends:
+ hasnoexecparents = True
+ for dep2 in self.rqdata.runtaskentries[dep].revdeps:
+ if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
+ continue
+ hasnoexecparents = False
+ break
+ if hasnoexecparents:
+ valid_new.add(dep)
+
+ invalidtasks = set()
+ for tid in self.rqdata.runtaskentries:
+ if tid not in valid_new and tid not in noexec:
+ invalidtasks.add(tid)
+
+ found = set()
+ processed = set()
+ for tid in invalidtasks:
+ toprocess = set([tid])
+ while toprocess:
+ next = set()
+ for t in toprocess:
+ for dep in self.rqdata.runtaskentries[t].depends:
+ if dep in invalidtasks:
+ found.add(tid)
+ if dep not in processed:
+ processed.add(dep)
+ next.add(dep)
+ toprocess = next
+ if tid in found:
+ toprocess = set()
+
+ tasklist = []
+ for tid in invalidtasks.difference(found):
+ tasklist.append(tid)
+
+ if tasklist:
+ bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
+
+ return invalidtasks.difference(found)
+
+ def write_diffscenetasks(self, invalidtasks):
+
+ # Define recursion callback
+ def recursecb(key, hash1, hash2):
+ hashes = [hash1, hash2]
+ hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
+
+ recout = []
+ if len(hashfiles) == 2:
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
+ recout.extend(list(' ' + l for l in out2))
+ else:
+ recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
+
+ return recout
+
+
+ for tid in invalidtasks:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ h = self.rqdata.runtaskentries[tid].hash
+ matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
+ match = None
+ for m in matches:
+ if h in m:
+ match = m
+ if match is None:
+ bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
+ matches = {k : v for k, v in iter(matches.items()) if h not in k}
+ if matches:
+ latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
+ prevh = __find_md5__.search(latestmatch).group(0)
+ output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
+ bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
+
+class RunQueueExecute:
+
+ def __init__(self, rq):
+ self.rq = rq
+ self.cooker = rq.cooker
+ self.cfgData = rq.cfgData
+ self.rqdata = rq.rqdata
+
+ self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
+ self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
+
+ self.runq_buildable = set()
+ self.runq_running = set()
+ self.runq_complete = set()
+
+ self.build_stamps = {}
+ self.build_stamps2 = []
+ self.failed_tids = []
+
+ self.stampcache = {}
+
+ for mc in rq.worker:
+ rq.worker[mc].pipe.setrunqueueexec(self)
+ for mc in rq.fakeworker:
+ rq.fakeworker[mc].pipe.setrunqueueexec(self)
+
+ if self.number_tasks <= 0:
+ bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
+
+ def runqueue_process_waitpid(self, task, status):
+
+ # self.build_stamps[pid] may not exist when use shared work directory.
+ if task in self.build_stamps:
+ self.build_stamps2.remove(self.build_stamps[task])
+ del self.build_stamps[task]
+
+ if status != 0:
+ self.task_fail(task, status)
+ else:
+ self.task_complete(task)
+ return True
+
+ def finish_now(self):
+ for mc in self.rq.worker:
+ try:
+ self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
+ self.rq.worker[mc].process.stdin.flush()
+ except IOError:
+ # worker must have died?
+ pass
+ for mc in self.rq.fakeworker:
+ try:
+ self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
+ self.rq.fakeworker[mc].process.stdin.flush()
+ except IOError:
+ # worker must have died?
+ pass
+
+ if len(self.failed_tids) != 0:
+ self.rq.state = runQueueFailed
+ return
+
+ self.rq.state = runQueueComplete
+ return
+
+ def finish(self):
+ self.rq.state = runQueueCleanUp
+
+ if self.stats.active > 0:
+ bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
+ self.rq.read_workers()
+ return self.rq.active_fds()
+
+ if len(self.failed_tids) != 0:
+ self.rq.state = runQueueFailed
+ return True
+
+ self.rq.state = runQueueComplete
+ return True
+
+ def check_dependencies(self, task, taskdeps, setscene = False):
+ if not self.rq.depvalidate:
+ return False
+
+ taskdata = {}
+ taskdeps.add(task)
+ for dep in taskdeps:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ taskdata[dep] = [pn, taskname, fn]
+ call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
+ locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
+ valid = bb.utils.better_eval(call, locs)
+ return valid
+
+class RunQueueExecuteDummy(RunQueueExecute):
+ def __init__(self, rq):
+ self.rq = rq
+ self.stats = RunQueueStats(0)
+
+ def finish(self):
+ self.rq.state = runQueueComplete
+ return
+
+class RunQueueExecuteTasks(RunQueueExecute):
+ def __init__(self, rq):
+ RunQueueExecute.__init__(self, rq)
+
+ self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
+
+ self.stampcache = {}
+
+ initial_covered = self.rq.scenequeue_covered.copy()
+
+ # Mark initial buildable tasks
+ for tid in self.rqdata.runtaskentries:
+ if len(self.rqdata.runtaskentries[tid].depends) == 0:
+ self.runq_buildable.add(tid)
+ if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
+ self.rq.scenequeue_covered.add(tid)
+
+ found = True
+ while found:
+ found = False
+ for tid in self.rqdata.runtaskentries:
+ if tid in self.rq.scenequeue_covered:
+ continue
+ logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
+
+ if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
+ if tid in self.rq.scenequeue_notcovered:
+ continue
+ found = True
+ self.rq.scenequeue_covered.add(tid)
+
+ logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
+
+ # Allow the metadata to elect for setscene tasks to run anyway
+ covered_remove = set()
+ if self.rq.setsceneverify:
+ invalidtasks = []
+ tasknames = {}
+ fns = {}
+ for tid in self.rqdata.runtaskentries:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ fns[tid] = taskfn
+ tasknames[tid] = taskname
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ continue
+ if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
+ logger.debug(2, 'Setscene stamp current for task %s', tid)
+ continue
+ if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
+ logger.debug(2, 'Normal stamp current for task %s', tid)
+ continue
+ invalidtasks.append(tid)
+
+ call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
+ locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
+ covered_remove = bb.utils.better_eval(call, locs)
+
+ def removecoveredtask(tid):
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ taskname = taskname + '_setscene'
+ bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
+ self.rq.scenequeue_covered.remove(tid)
+
+ toremove = covered_remove
+ for task in toremove:
+ logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
+ while toremove:
+ covered_remove = []
+ for task in toremove:
+ removecoveredtask(task)
+ for deptask in self.rqdata.runtaskentries[task].depends:
+ if deptask not in self.rq.scenequeue_covered:
+ continue
+ if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
+ continue
+ logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
+ covered_remove.append(deptask)
+ toremove = covered_remove
+
+ logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
+
+
+ for mc in self.rqdata.dataCaches:
+ target_pairs = []
+ for tid in self.rqdata.target_tids:
+ (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
+ if tidmc == mc:
+ target_pairs.append((fn, taskname))
+
+ event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
+
+ schedulers = self.get_schedulers()
+ for scheduler in schedulers:
+ if self.scheduler == scheduler.name:
+ self.sched = scheduler(self, self.rqdata)
+ logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
+ break
+ else:
+ bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
+ (self.scheduler, ", ".join(obj.name for obj in schedulers)))
+
+ def get_schedulers(self):
+ schedulers = set(obj for obj in globals().values()
+ if type(obj) is type and
+ issubclass(obj, RunQueueScheduler))
+
+ user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
+ if user_schedulers:
+ for sched in user_schedulers.split():
+ if not "." in sched:
+ bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
+ continue
+
+ modname, name = sched.rsplit(".", 1)
+ try:
+ module = __import__(modname, fromlist=(name,))
+ except ImportError as exc:
+ logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
+ raise SystemExit(1)
+ else:
+ schedulers.add(getattr(module, name))
+ return schedulers
+
+ def setbuildable(self, task):
+ self.runq_buildable.add(task)
+ self.sched.newbuildable(task)
+
+ def task_completeoutright(self, task):
+ """
+ Mark a task as completed
+ Look at the reverse dependencies and mark any task with
+ completed dependencies as buildable
+ """
+ self.runq_complete.add(task)
+ for revdep in self.rqdata.runtaskentries[task].revdeps:
+ if revdep in self.runq_running:
+ continue
+ if revdep in self.runq_buildable:
+ continue
+ alldeps = 1
+ for dep in self.rqdata.runtaskentries[revdep].depends:
+ if dep not in self.runq_complete:
+ alldeps = 0
+ if alldeps == 1:
+ self.setbuildable(revdep)
+ fn = fn_from_tid(revdep)
+ taskname = taskname_from_tid(revdep)
+ logger.debug(1, "Marking task %s as buildable", revdep)
+
+ def task_complete(self, task):
+ self.stats.taskCompleted()
+ bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
+ self.task_completeoutright(task)
+
+ def task_fail(self, task, exitcode):
+ """
+ Called when a task has failed
+ Updates the state engine with the failure
+ """
+ self.stats.taskFailed()
+ self.failed_tids.append(task)
+ bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
+ if self.rqdata.taskData[''].abort:
+ self.rq.state = runQueueCleanUp
+
+ def task_skip(self, task, reason):
+ self.runq_running.add(task)
+ self.setbuildable(task)
+ bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
+ self.task_completeoutright(task)
+ self.stats.taskCompleted()
+ self.stats.taskSkipped()
+
+ def execute(self):
+ """
+ Run the tasks in a queue prepared by rqdata.prepare()
+ """
+
+ if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
+ self.rqdata.setscenewhitelist_checked = True
+
+ # Check tasks that are going to run against the whitelist
+ def check_norun_task(tid, showerror=False):
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ # Ignore covered tasks
+ if tid in self.rq.scenequeue_covered:
+ return False
+ # Ignore stamped tasks
+ if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
+ return False
+ # Ignore noexec tasks
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ return False
+
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
+ if showerror:
+ if tid in self.rqdata.runq_setscene_tids:
+ logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
+ else:
+ logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
+ return True
+ return False
+ # Look to see if any tasks that we think shouldn't run are going to
+ unexpected = False
+ for tid in self.rqdata.runtaskentries:
+ if check_norun_task(tid):
+ unexpected = True
+ break
+ if unexpected:
+ # Run through the tasks in the rough order they'd have executed and print errors
+ # (since the order can be useful - usually missing sstate for the last few tasks
+ # is the cause of the problem)
+ task = self.sched.next()
+ while task is not None:
+ check_norun_task(task, showerror=True)
+ self.task_skip(task, 'Setscene enforcement check')
+ task = self.sched.next()
+
+ self.rq.state = runQueueCleanUp
+ return True
+
+ self.rq.read_workers()
+
+ if self.stats.total == 0:
+ # nothing to do
+ self.rq.state = runQueueCleanUp
+
+ task = self.sched.next()
+ if task is not None:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
+
+ if task in self.rq.scenequeue_covered:
+ logger.debug(2, "Setscene covered task %s", task)
+ self.task_skip(task, "covered")
+ return True
+
+ if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
+ logger.debug(2, "Stamp current task %s", task)
+
+ self.task_skip(task, "existing")
+ return True
+
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ startevent = runQueueTaskStarted(task, self.stats, self.rq,
+ noexec=True)
+ bb.event.fire(startevent, self.cfgData)
+ self.runq_running.add(task)
+ self.stats.taskActive()
+ if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
+ bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
+ self.task_complete(task)
+ return True
+ else:
+ startevent = runQueueTaskStarted(task, self.stats, self.rq)
+ bb.event.fire(startevent, self.cfgData)
+
+ taskdepdata = self.build_taskdepdata(task)
+
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
+ if not mc in self.rq.fakeworker:
+ try:
+ self.rq.start_fakeworker(self, mc)
+ except OSError as exc:
+ logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
+ self.rq.state = runQueueFailed
+ self.stats.taskFailed()
+ return True
+ self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
+ self.rq.fakeworker[mc].process.stdin.flush()
+ else:
+ self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
+ self.rq.worker[mc].process.stdin.flush()
+
+ self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
+ self.build_stamps2.append(self.build_stamps[task])
+ self.runq_running.add(task)
+ self.stats.taskActive()
+ if self.stats.active < self.number_tasks:
+ return True
+
+ if self.stats.active > 0:
+ self.rq.read_workers()
+ return self.rq.active_fds()
+
+ if len(self.failed_tids) != 0:
+ self.rq.state = runQueueFailed
+ return True
+
+ # Sanity Checks
+ for task in self.rqdata.runtaskentries:
+ if task not in self.runq_buildable:
+ logger.error("Task %s never buildable!", task)
+ if task not in self.runq_running:
+ logger.error("Task %s never ran!", task)
+ if task not in self.runq_complete:
+ logger.error("Task %s never completed!", task)
+ self.rq.state = runQueueComplete
+
+ return True
+
+ def build_taskdepdata(self, task):
+ taskdepdata = {}
+ next = self.rqdata.runtaskentries[task].depends
+ next.add(task)
+ while next:
+ additional = []
+ for revdep in next:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ deps = self.rqdata.runtaskentries[revdep].depends
+ provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
+ taskhash = self.rqdata.runtaskentries[revdep].hash
+ taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
+ for revdep2 in deps:
+ if revdep2 not in taskdepdata:
+ additional.append(revdep2)
+ next = additional
+
+ #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
+ return taskdepdata
+
+class RunQueueExecuteScenequeue(RunQueueExecute):
+ def __init__(self, rq):
+ RunQueueExecute.__init__(self, rq)
+
+ self.scenequeue_covered = set()
+ self.scenequeue_notcovered = set()
+ self.scenequeue_notneeded = set()
+
+ # If we don't have any setscene functions, skip this step
+ if len(self.rqdata.runq_setscene_tids) == 0:
+ rq.scenequeue_covered = set()
+ rq.state = runQueueRunInit
+ return
+
+ self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
+
+ sq_revdeps = {}
+ sq_revdeps_new = {}
+ sq_revdeps_squash = {}
+ self.sq_harddeps = {}
+ self.stamps = {}
+
+ # We need to construct a dependency graph for the setscene functions. Intermediate
+ # dependencies between the setscene tasks only complicate the code. This code
+ # therefore aims to collapse the huge runqueue dependency tree into a smaller one
+ # only containing the setscene functions.
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ # First process the chains up to the first setscene task.
+ endpoints = {}
+ for tid in self.rqdata.runtaskentries:
+ sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
+ sq_revdeps_new[tid] = set()
+ if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
+ #bb.warn("Added endpoint %s" % (tid))
+ endpoints[tid] = set()
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ # Secondly process the chains between setscene tasks.
+ for tid in self.rqdata.runq_setscene_tids:
+ #bb.warn("Added endpoint 2 %s" % (tid))
+ for dep in self.rqdata.runtaskentries[tid].depends:
+ if tid in sq_revdeps[dep]:
+ sq_revdeps[dep].remove(tid)
+ if dep not in endpoints:
+ endpoints[dep] = set()
+ #bb.warn(" Added endpoint 3 %s" % (dep))
+ endpoints[dep].add(tid)
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ def process_endpoints(endpoints):
+ newendpoints = {}
+ for point, task in endpoints.items():
+ tasks = set()
+ if task:
+ tasks |= task
+ if sq_revdeps_new[point]:
+ tasks |= sq_revdeps_new[point]
+ sq_revdeps_new[point] = set()
+ if point in self.rqdata.runq_setscene_tids:
+ sq_revdeps_new[point] = tasks
+ tasks = set()
+ continue
+ for dep in self.rqdata.runtaskentries[point].depends:
+ if point in sq_revdeps[dep]:
+ sq_revdeps[dep].remove(point)
+ if tasks:
+ sq_revdeps_new[dep] |= tasks
+ if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
+ newendpoints[dep] = task
+ if len(newendpoints) != 0:
+ process_endpoints(newendpoints)
+
+ process_endpoints(endpoints)
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ # Build a list of setscene tasks which are "unskippable"
+ # These are direct endpoints referenced by the build
+ endpoints2 = {}
+ sq_revdeps2 = {}
+ sq_revdeps_new2 = {}
+ def process_endpoints2(endpoints):
+ newendpoints = {}
+ for point, task in endpoints.items():
+ tasks = set([point])
+ if task:
+ tasks |= task
+ if sq_revdeps_new2[point]:
+ tasks |= sq_revdeps_new2[point]
+ sq_revdeps_new2[point] = set()
+ if point in self.rqdata.runq_setscene_tids:
+ sq_revdeps_new2[point] = tasks
+ for dep in self.rqdata.runtaskentries[point].depends:
+ if point in sq_revdeps2[dep]:
+ sq_revdeps2[dep].remove(point)
+ if tasks:
+ sq_revdeps_new2[dep] |= tasks
+ if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
+ newendpoints[dep] = tasks
+ if len(newendpoints) != 0:
+ process_endpoints2(newendpoints)
+ for tid in self.rqdata.runtaskentries:
+ sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
+ sq_revdeps_new2[tid] = set()
+ if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
+ endpoints2[tid] = set()
+ process_endpoints2(endpoints2)
+ self.unskippable = []
+ for tid in self.rqdata.runq_setscene_tids:
+ if sq_revdeps_new2[tid]:
+ self.unskippable.append(tid)
+
+ self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
+
+ for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
+ if tid in self.rqdata.runq_setscene_tids:
+ deps = set()
+ for dep in sq_revdeps_new[tid]:
+ deps.add(dep)
+ sq_revdeps_squash[tid] = deps
+ elif len(sq_revdeps_new[tid]) != 0:
+ bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
+ self.rqdata.init_progress_reporter.update(taskcounter)
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ # Resolve setscene inter-task dependencies
+ # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
+ # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
+ for tid in self.rqdata.runq_setscene_tids:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ realtid = tid + "_setscene"
+ idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
+ self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
+ for (depname, idependtask) in idepends:
+
+ if depname not in self.rqdata.taskData[mc].build_targets:
+ continue
+
+ depfn = self.rqdata.taskData[mc].build_targets[depname][0]
+ if depfn is None:
+ continue
+ deptid = depfn + ":" + idependtask.replace("_setscene", "")
+ if deptid not in self.rqdata.runtaskentries:
+ bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
+
+ if not deptid in self.sq_harddeps:
+ self.sq_harddeps[deptid] = set()
+ self.sq_harddeps[deptid].add(tid)
+
+ sq_revdeps_squash[tid].add(deptid)
+ # Have to zero this to avoid circular dependencies
+ sq_revdeps_squash[deptid] = set()
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ for task in self.sq_harddeps:
+ for dep in self.sq_harddeps[task]:
+ sq_revdeps_squash[dep].add(task)
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ #for tid in sq_revdeps_squash:
+ # for dep in sq_revdeps_squash[tid]:
+ # data = data + "\n %s" % dep
+ # bb.warn("Task %s_setscene: is %s " % (tid, data
+
+ self.sq_deps = {}
+ self.sq_revdeps = sq_revdeps_squash
+ self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
+
+ for tid in self.sq_revdeps:
+ self.sq_deps[tid] = set()
+ for tid in self.sq_revdeps:
+ for dep in self.sq_revdeps[tid]:
+ self.sq_deps[dep].add(tid)
+
+ self.rqdata.init_progress_reporter.next_stage()
+
+ for tid in self.sq_revdeps:
+ if len(self.sq_revdeps[tid]) == 0:
+ self.runq_buildable.add(tid)
+
+ self.rqdata.init_progress_reporter.finish()
+
+ self.outrightfail = []
+ if self.rq.hashvalidate:
+ sq_hash = []
+ sq_hashfn = []
+ sq_fn = []
+ sq_taskname = []
+ sq_task = []
+ noexec = []
+ stamppresent = []
+ for tid in self.sq_revdeps:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ noexec.append(tid)
+ self.task_skip(tid)
+ bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
+ continue
+
+ if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
+ logger.debug(2, 'Setscene stamp current for task %s', tid)
+ stamppresent.append(tid)
+ self.task_skip(tid)
+ continue
+
+ if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
+ logger.debug(2, 'Normal stamp current for task %s', tid)
+ stamppresent.append(tid)
+ self.task_skip(tid)
+ continue
+
+ sq_fn.append(fn)
+ sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
+ sq_hash.append(self.rqdata.runtaskentries[tid].hash)
+ sq_taskname.append(taskname)
+ sq_task.append(tid)
+ call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
+ locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
+ valid = bb.utils.better_eval(call, locs)
+
+ valid_new = stamppresent
+ for v in valid:
+ valid_new.append(sq_task[v])
+
+ for tid in self.sq_revdeps:
+ if tid not in valid_new and tid not in noexec:
+ logger.debug(2, 'No package found, so skipping setscene task %s', tid)
+ self.outrightfail.append(tid)
+
+ logger.info('Executing SetScene Tasks')
+
+ self.rq.state = runQueueSceneRun
+
+ def scenequeue_updatecounters(self, task, fail = False):
+ for dep in self.sq_deps[task]:
+ if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
+ logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
+ self.scenequeue_updatecounters(dep, fail)
+ continue
+ if task not in self.sq_revdeps2[dep]:
+ # May already have been removed by the fail case above
+ continue
+ self.sq_revdeps2[dep].remove(task)
+ if len(self.sq_revdeps2[dep]) == 0:
+ self.runq_buildable.add(dep)
+
+ def task_completeoutright(self, task):
+ """
+ Mark a task as completed
+ Look at the reverse dependencies and mark any task with
+ completed dependencies as buildable
+ """
+
+ logger.debug(1, 'Found task %s which could be accelerated', task)
+ self.scenequeue_covered.add(task)
+ self.scenequeue_updatecounters(task)
+
+ def check_taskfail(self, task):
+ if self.rqdata.setscenewhitelist is not None:
+ realtask = task.split('_setscene')[0]
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
+ logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
+ self.rq.state = runQueueCleanUp
+
+ def task_complete(self, task):
+ self.stats.taskCompleted()
+ bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
+ self.task_completeoutright(task)
+
+ def task_fail(self, task, result):
+ self.stats.taskFailed()
+ bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
+ self.scenequeue_notcovered.add(task)
+ self.scenequeue_updatecounters(task, True)
+ self.check_taskfail(task)
+
+ def task_failoutright(self, task):
+ self.runq_running.add(task)
+ self.runq_buildable.add(task)
+ self.stats.taskCompleted()
+ self.stats.taskSkipped()
+ self.scenequeue_notcovered.add(task)
+ self.scenequeue_updatecounters(task, True)
+
+ def task_skip(self, task):
+ self.runq_running.add(task)
+ self.runq_buildable.add(task)
+ self.task_completeoutright(task)
+ self.stats.taskCompleted()
+ self.stats.taskSkipped()
+
+ def execute(self):
+ """
+ Run the tasks in a queue prepared by prepare_runqueue
+ """
+
+ self.rq.read_workers()
+
+ task = None
+ if self.stats.active < self.number_tasks:
+ # Find the next setscene to run
+ for nexttask in self.rqdata.runq_setscene_tids:
+ if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
+ if nexttask in self.unskippable:
+ logger.debug(2, "Setscene task %s is unskippable" % nexttask)
+ if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
+ fn = fn_from_tid(nexttask)
+ foundtarget = False
+
+ if nexttask in self.rqdata.target_tids:
+ foundtarget = True
+ if not foundtarget:
+ logger.debug(2, "Skipping setscene for task %s" % nexttask)
+ self.task_skip(nexttask)
+ self.scenequeue_notneeded.add(nexttask)
+ return True
+ if nexttask in self.outrightfail:
+ self.task_failoutright(nexttask)
+ return True
+ task = nexttask
+ break
+ if task is not None:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
+ taskname = taskname + "_setscene"
+ if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
+ logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
+ self.task_failoutright(task)
+ return True
+
+ if self.cooker.configuration.force:
+ if task in self.rqdata.target_tids:
+ self.task_failoutright(task)
+ return True
+
+ if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
+ logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
+ self.task_skip(task)
+ return True
+
+ startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
+ bb.event.fire(startevent, self.cfgData)
+
+ taskdepdata = self.build_taskdepdata(task)
+
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
+ if not mc in self.rq.fakeworker:
+ self.rq.start_fakeworker(self, mc)
+ self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
+ self.rq.fakeworker[mc].process.stdin.flush()
+ else:
+ self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
+ self.rq.worker[mc].process.stdin.flush()
+
+ self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
+ self.build_stamps2.append(self.build_stamps[task])
+ self.runq_running.add(task)
+ self.stats.taskActive()
+ if self.stats.active < self.number_tasks:
+ return True
+
+ if self.stats.active > 0:
+ self.rq.read_workers()
+ return self.rq.active_fds()
+
+ #for tid in self.sq_revdeps:
+ # if tid not in self.runq_running:
+ # buildable = tid in self.runq_buildable
+ # revdeps = self.sq_revdeps[tid]
+ # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
+
+ self.rq.scenequeue_covered = self.scenequeue_covered
+ self.rq.scenequeue_notcovered = self.scenequeue_notcovered
+
+ logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
+
+ self.rq.state = runQueueRunInit
+
+ completeevent = sceneQueueComplete(self.stats, self.rq)
+ bb.event.fire(completeevent, self.cfgData)
+
+ return True
+
+ def runqueue_process_waitpid(self, task, status):
+ RunQueueExecute.runqueue_process_waitpid(self, task, status)
+
+
+ def build_taskdepdata(self, task):
+ def getsetscenedeps(tid):
+ deps = set()
+ (mc, fn, taskname, _) = split_tid_mcfn(tid)
+ realtid = tid + "_setscene"
+ idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
+ for (depname, idependtask) in idepends:
+ if depname not in self.rqdata.taskData[mc].build_targets:
+ continue
+
+ depfn = self.rqdata.taskData[mc].build_targets[depname][0]
+ if depfn is None:
+ continue
+ deptid = depfn + ":" + idependtask.replace("_setscene", "")
+ deps.add(deptid)
+ return deps
+
+ taskdepdata = {}
+ next = getsetscenedeps(task)
+ next.add(task)
+ while next:
+ additional = []
+ for revdep in next:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ deps = getsetscenedeps(revdep)
+ provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
+ taskhash = self.rqdata.runtaskentries[revdep].hash
+ taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
+ for revdep2 in deps:
+ if revdep2 not in taskdepdata:
+ additional.append(revdep2)
+ next = additional
+
+ #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
+ return taskdepdata
+
+class TaskFailure(Exception):
+ """
+ Exception raised when a task in a runqueue fails
+ """
+ def __init__(self, x):
+ self.args = x
+
+
+class runQueueExitWait(bb.event.Event):
+ """
+ Event when waiting for task processes to exit
+ """
+
+ def __init__(self, remain):
+ self.remain = remain
+ self.message = "Waiting for %s active tasks to finish" % remain
+ bb.event.Event.__init__(self)
+
+class runQueueEvent(bb.event.Event):
+ """
+ Base runQueue event class
+ """
+ def __init__(self, task, stats, rq):
+ self.taskid = task
+ self.taskstring = task
+ self.taskname = taskname_from_tid(task)
+ self.taskfile = fn_from_tid(task)
+ self.taskhash = rq.rqdata.get_task_hash(task)
+ self.stats = stats.copy()
+ bb.event.Event.__init__(self)
+
+class sceneQueueEvent(runQueueEvent):
+ """
+ Base sceneQueue event class
+ """
+ def __init__(self, task, stats, rq, noexec=False):
+ runQueueEvent.__init__(self, task, stats, rq)
+ self.taskstring = task + "_setscene"
+ self.taskname = taskname_from_tid(task) + "_setscene"
+ self.taskfile = fn_from_tid(task)
+ self.taskhash = rq.rqdata.get_task_hash(task)
+
+class runQueueTaskStarted(runQueueEvent):
+ """
+ Event notifying a task was started
+ """
+ def __init__(self, task, stats, rq, noexec=False):
+ runQueueEvent.__init__(self, task, stats, rq)
+ self.noexec = noexec
+
+class sceneQueueTaskStarted(sceneQueueEvent):
+ """
+ Event notifying a setscene task was started
+ """
+ def __init__(self, task, stats, rq, noexec=False):
+ sceneQueueEvent.__init__(self, task, stats, rq)
+ self.noexec = noexec
+
+class runQueueTaskFailed(runQueueEvent):
+ """
+ Event notifying a task failed
+ """
+ def __init__(self, task, stats, exitcode, rq):
+ runQueueEvent.__init__(self, task, stats, rq)
+ self.exitcode = exitcode
+
+ def __str__(self):
+ return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
+
+class sceneQueueTaskFailed(sceneQueueEvent):
+ """
+ Event notifying a setscene task failed
+ """
+ def __init__(self, task, stats, exitcode, rq):
+ sceneQueueEvent.__init__(self, task, stats, rq)
+ self.exitcode = exitcode
+
+ def __str__(self):
+ return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
+
+class sceneQueueComplete(sceneQueueEvent):
+ """
+ Event when all the sceneQueue tasks are complete
+ """
+ def __init__(self, stats, rq):
+ self.stats = stats.copy()
+ bb.event.Event.__init__(self)
+
+class runQueueTaskCompleted(runQueueEvent):
+ """
+ Event notifying a task completed
+ """
+
+class sceneQueueTaskCompleted(sceneQueueEvent):
+ """
+ Event notifying a setscene task completed
+ """
+
+class runQueueTaskSkipped(runQueueEvent):
+ """
+ Event notifying a task was skipped
+ """
+ def __init__(self, task, stats, rq, reason):
+ runQueueEvent.__init__(self, task, stats, rq)
+ self.reason = reason
+
+class runQueuePipe():
+ """
+ Abstraction for a pipe between a worker thread and the server
+ """
+ def __init__(self, pipein, pipeout, d, rq, rqexec):
+ self.input = pipein
+ if pipeout:
+ pipeout.close()
+ bb.utils.nonblockingfd(self.input)
+ self.queue = b""
+ self.d = d
+ self.rq = rq
+ self.rqexec = rqexec
+
+ def setrunqueueexec(self, rqexec):
+ self.rqexec = rqexec
+
+ def read(self):
+ for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
+ for worker in workers.values():
+ worker.process.poll()
+ if worker.process.returncode is not None and not self.rq.teardown:
+ bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
+ self.rq.finish_runqueue(True)
+
+ start = len(self.queue)
+ try:
+ self.queue = self.queue + (self.input.read(102400) or b"")
+ except (OSError, IOError) as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ end = len(self.queue)
+ found = True
+ while found and len(self.queue):
+ found = False
+ index = self.queue.find(b"</event>")
+ while index != -1 and self.queue.startswith(b"<event>"):
+ try:
+ event = pickle.loads(self.queue[7:index])
+ except ValueError as e:
+ bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
+ bb.event.fire_from_worker(event, self.d)
+ found = True
+ self.queue = self.queue[index+8:]
+ index = self.queue.find(b"</event>")
+ index = self.queue.find(b"</exitcode>")
+ while index != -1 and self.queue.startswith(b"<exitcode>"):
+ try:
+ task, status = pickle.loads(self.queue[10:index])
+ except ValueError as e:
+ bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
+ self.rqexec.runqueue_process_waitpid(task, status)
+ found = True
+ self.queue = self.queue[index+11:]
+ index = self.queue.find(b"</exitcode>")
+ return (end > start)
+
+ def close(self):
+ while self.read():
+ continue
+ if len(self.queue) > 0:
+ print("Warning, worker left partial message: %s" % self.queue)
+ self.input.close()
+
+def get_setscene_enforce_whitelist(d):
+ if d.getVar('BB_SETSCENE_ENFORCE') != '1':
+ return None
+ whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
+ outlist = []
+ for item in whitelist[:]:
+ if item.startswith('%:'):
+ for target in sys.argv[1:]:
+ if not target.startswith('-'):
+ outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
+ else:
+ outlist.append(item)
+ return outlist
+
+def check_setscene_enforce_whitelist(pn, taskname, whitelist):
+ import fnmatch
+ if whitelist is not None:
+ item = '%s:%s' % (pn, taskname)
+ for whitelist_item in whitelist:
+ if fnmatch.fnmatch(item, whitelist_item):
+ return True
+ return False
+ return True
diff --git a/poky/bitbake/lib/bb/server/__init__.py b/poky/bitbake/lib/bb/server/__init__.py
new file mode 100644
index 000000000..5a3fba968
--- /dev/null
+++ b/poky/bitbake/lib/bb/server/__init__.py
@@ -0,0 +1,21 @@
+#
+# BitBake Base Server Code
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2008 Richard Purdie
+# Copyright (C) 2013 Alexandru Damian
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
diff --git a/poky/bitbake/lib/bb/server/process.py b/poky/bitbake/lib/bb/server/process.py
new file mode 100644
index 000000000..828159ed7
--- /dev/null
+++ b/poky/bitbake/lib/bb/server/process.py
@@ -0,0 +1,624 @@
+#
+# BitBake Process based server.
+#
+# Copyright (C) 2010 Bob Foerster <robert@erafx.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ This module implements a multiprocessing.Process based server for bitbake.
+"""
+
+import bb
+import bb.event
+import logging
+import multiprocessing
+import threading
+import array
+import os
+import sys
+import time
+import select
+import socket
+import subprocess
+import errno
+import re
+import datetime
+import bb.server.xmlrpcserver
+from bb import daemonize
+from multiprocessing import queues
+
+logger = logging.getLogger('BitBake')
+
+class ProcessTimeout(SystemExit):
+ pass
+
+class ProcessServer(multiprocessing.Process):
+ profile_filename = "profile.log"
+ profile_processed_filename = "profile.log.processed"
+
+ def __init__(self, lock, sock, sockname):
+ multiprocessing.Process.__init__(self)
+ self.command_channel = False
+ self.command_channel_reply = False
+ self.quit = False
+ self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore.
+ self.next_heartbeat = time.time()
+
+ self.event_handle = None
+ self.haveui = False
+ self.lastui = False
+ self.xmlrpc = False
+
+ self._idlefuns = {}
+
+ self.bitbake_lock = lock
+ self.sock = sock
+ self.sockname = sockname
+
+ def register_idle_function(self, function, data):
+ """Register a function to be called while the server is idle"""
+ assert hasattr(function, '__call__')
+ self._idlefuns[function] = data
+
+ def run(self):
+
+ if self.xmlrpcinterface[0]:
+ self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self)
+
+ print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port))
+
+ heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT')
+ if heartbeat_event:
+ try:
+ self.heartbeat_seconds = float(heartbeat_event)
+ except:
+ bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event)
+
+ self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT')
+ try:
+ if self.timeout:
+ self.timeout = float(self.timeout)
+ except:
+ bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
+
+
+ try:
+ self.bitbake_lock.seek(0)
+ self.bitbake_lock.truncate()
+ if self.xmlrpc:
+ self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port))
+ else:
+ self.bitbake_lock.write("%s\n" % (os.getpid()))
+ self.bitbake_lock.flush()
+ except Exception as e:
+ print("Error writing to lock file: %s" % str(e))
+ pass
+
+ if self.cooker.configuration.profile:
+ try:
+ import cProfile as profile
+ except:
+ import profile
+ prof = profile.Profile()
+
+ ret = profile.Profile.runcall(prof, self.main)
+
+ prof.dump_stats("profile.log")
+ bb.utils.process_profilelog("profile.log")
+ print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed")
+
+ else:
+ ret = self.main()
+
+ return ret
+
+ def main(self):
+ self.cooker.pre_serve()
+
+ bb.utils.set_process_name("Cooker")
+
+ ready = []
+
+ self.controllersock = False
+ fds = [self.sock]
+ if self.xmlrpc:
+ fds.append(self.xmlrpc)
+ print("Entering server connection loop")
+
+ def disconnect_client(self, fds):
+ if not self.haveui:
+ return
+ print("Disconnecting Client")
+ fds.remove(self.controllersock)
+ fds.remove(self.command_channel)
+ bb.event.unregister_UIHhandler(self.event_handle, True)
+ self.command_channel_reply.writer.close()
+ self.event_writer.writer.close()
+ del self.event_writer
+ self.controllersock.close()
+ self.controllersock = False
+ self.haveui = False
+ self.lastui = time.time()
+ self.cooker.clientComplete()
+ if self.timeout is None:
+ print("No timeout, exiting.")
+ self.quit = True
+
+ while not self.quit:
+ if self.sock in ready:
+ self.controllersock, address = self.sock.accept()
+ if self.haveui:
+ print("Dropping connection attempt as we have a UI %s" % (str(ready)))
+ self.controllersock.close()
+ else:
+ print("Accepting %s" % (str(ready)))
+ fds.append(self.controllersock)
+ if self.controllersock in ready:
+ try:
+ print("Connecting Client")
+ ui_fds = recvfds(self.controllersock, 3)
+
+ # Where to write events to
+ writer = ConnectionWriter(ui_fds[0])
+ self.event_handle = bb.event.register_UIHhandler(writer, True)
+ self.event_writer = writer
+
+ # Where to read commands from
+ reader = ConnectionReader(ui_fds[1])
+ fds.append(reader)
+ self.command_channel = reader
+
+ # Where to send command return values to
+ writer = ConnectionWriter(ui_fds[2])
+ self.command_channel_reply = writer
+
+ self.haveui = True
+
+ except (EOFError, OSError):
+ disconnect_client(self, fds)
+
+ if not self.timeout == -1.0 and not self.haveui and self.lastui and self.timeout and \
+ (self.lastui + self.timeout) < time.time():
+ print("Server timeout, exiting.")
+ self.quit = True
+
+ if self.command_channel in ready:
+ try:
+ command = self.command_channel.get()
+ except EOFError:
+ # Client connection shutting down
+ ready = []
+ disconnect_client(self, fds)
+ continue
+ if command[0] == "terminateServer":
+ self.quit = True
+ continue
+ try:
+ print("Running command %s" % command)
+ self.command_channel_reply.send(self.cooker.command.runCommand(command))
+ except Exception as e:
+ logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e)))
+
+ if self.xmlrpc in ready:
+ self.xmlrpc.handle_requests()
+
+ ready = self.idle_commands(.1, fds)
+
+ print("Exiting")
+ # Remove the socket file so we don't get any more connections to avoid races
+ os.unlink(self.sockname)
+ self.sock.close()
+
+ try:
+ self.cooker.shutdown(True)
+ self.cooker.notifier.stop()
+ self.cooker.confignotifier.stop()
+ except:
+ pass
+
+ self.cooker.post_serve()
+
+ # Finally release the lockfile but warn about other processes holding it open
+ lock = self.bitbake_lock
+ lockfile = lock.name
+ lock.close()
+ lock = None
+
+ while not lock:
+ with bb.utils.timeout(3):
+ lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True)
+ if not lock:
+ # Some systems may not have lsof available
+ procs = None
+ try:
+ procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ if procs is None:
+ # Fall back to fuser if lsof is unavailable
+ try:
+ procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
+ if procs:
+ msg += ":\n%s" % str(procs)
+ print(msg)
+ return
+ # We hold the lock so we can remove the file (hide stale pid data)
+ bb.utils.remove(lockfile)
+ bb.utils.unlockfile(lock)
+
+ def idle_commands(self, delay, fds=None):
+ nextsleep = delay
+ if not fds:
+ fds = []
+
+ for function, data in list(self._idlefuns.items()):
+ try:
+ retval = function(self, data, False)
+ if retval is False:
+ del self._idlefuns[function]
+ nextsleep = None
+ elif retval is True:
+ nextsleep = None
+ elif isinstance(retval, float) and nextsleep:
+ if (retval < nextsleep):
+ nextsleep = retval
+ elif nextsleep is None:
+ continue
+ else:
+ fds = fds + retval
+ except SystemExit:
+ raise
+ except Exception as exc:
+ if not isinstance(exc, bb.BBHandledException):
+ logger.exception('Running idle function')
+ del self._idlefuns[function]
+ self.quit = True
+
+ # Create new heartbeat event?
+ now = time.time()
+ if now >= self.next_heartbeat:
+ # We might have missed heartbeats. Just trigger once in
+ # that case and continue after the usual delay.
+ self.next_heartbeat += self.heartbeat_seconds
+ if self.next_heartbeat <= now:
+ self.next_heartbeat = now + self.heartbeat_seconds
+ heartbeat = bb.event.HeartbeatEvent(now)
+ bb.event.fire(heartbeat, self.cooker.data)
+ if nextsleep and now + nextsleep > self.next_heartbeat:
+ # Shorten timeout so that we we wake up in time for
+ # the heartbeat.
+ nextsleep = self.next_heartbeat - now
+
+ if nextsleep is not None:
+ if self.xmlrpc:
+ nextsleep = self.xmlrpc.get_timeout(nextsleep)
+ try:
+ return select.select(fds,[],[],nextsleep)[0]
+ except InterruptedError:
+ # Ignore EINTR
+ return []
+ else:
+ return select.select(fds,[],[],0)[0]
+
+
+class ServerCommunicator():
+ def __init__(self, connection, recv):
+ self.connection = connection
+ self.recv = recv
+
+ def runCommand(self, command):
+ self.connection.send(command)
+ if not self.recv.poll(30):
+ raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server")
+ return self.recv.get()
+
+ def updateFeatureSet(self, featureset):
+ _, error = self.runCommand(["setFeatures", featureset])
+ if error:
+ logger.error("Unable to set the cooker to the correct featureset: %s" % error)
+ raise BaseException(error)
+
+ def getEventHandle(self):
+ handle, error = self.runCommand(["getUIHandlerNum"])
+ if error:
+ logger.error("Unable to get UI Handler Number: %s" % error)
+ raise BaseException(error)
+
+ return handle
+
+ def terminateServer(self):
+ self.connection.send(['terminateServer'])
+ return
+
+class BitBakeProcessServerConnection(object):
+ def __init__(self, ui_channel, recv, eq, sock):
+ self.connection = ServerCommunicator(ui_channel, recv)
+ self.events = eq
+ # Save sock so it doesn't get gc'd for the life of our connection
+ self.socket_connection = sock
+
+ def terminate(self):
+ self.socket_connection.close()
+ self.connection.connection.close()
+ self.connection.recv.close()
+ return
+
+class BitBakeServer(object):
+ start_log_format = '--- Starting bitbake server pid %s at %s ---'
+ start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
+
+ def __init__(self, lock, sockname, configuration, featureset):
+
+ self.configuration = configuration
+ self.featureset = featureset
+ self.sockname = sockname
+ self.bitbake_lock = lock
+ self.readypipe, self.readypipein = os.pipe()
+
+ # Create server control socket
+ if os.path.exists(sockname):
+ os.unlink(sockname)
+
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ # AF_UNIX has path length issues so chdir here to workaround
+ cwd = os.getcwd()
+ logfile = os.path.join(cwd, "bitbake-cookerdaemon.log")
+
+ try:
+ os.chdir(os.path.dirname(sockname))
+ self.sock.bind(os.path.basename(sockname))
+ finally:
+ os.chdir(cwd)
+ self.sock.listen(1)
+
+ os.set_inheritable(self.sock.fileno(), True)
+ startdatetime = datetime.datetime.now()
+ bb.daemonize.createDaemon(self._startServer, logfile)
+ self.sock.close()
+ self.bitbake_lock.close()
+
+ ready = ConnectionReader(self.readypipe)
+ r = ready.poll(30)
+ if r:
+ r = ready.get()
+ if not r or r != "ready":
+ ready.close()
+ bb.error("Unable to start bitbake server")
+ if os.path.exists(logfile):
+ logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
+ started = False
+ lines = []
+ with open(logfile, "r") as f:
+ for line in f:
+ if started:
+ lines.append(line)
+ else:
+ res = logstart_re.match(line.rstrip())
+ if res:
+ ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format)
+ if ldatetime >= startdatetime:
+ started = True
+ lines.append(line)
+ if lines:
+ if len(lines) > 10:
+ bb.error("Last 10 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-10:])))
+ else:
+ bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
+ raise SystemExit(1)
+ ready.close()
+ os.close(self.readypipein)
+
+ def _startServer(self):
+ print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format)))
+ server = ProcessServer(self.bitbake_lock, self.sock, self.sockname)
+ self.configuration.setServerRegIdleCallback(server.register_idle_function)
+ writer = ConnectionWriter(self.readypipein)
+ try:
+ self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
+ writer.send("ready")
+ except:
+ writer.send("fail")
+ raise
+ finally:
+ os.close(self.readypipein)
+ server.cooker = self.cooker
+ server.server_timeout = self.configuration.server_timeout
+ server.xmlrpcinterface = self.configuration.xmlrpcinterface
+ print("Started bitbake server pid %d" % os.getpid())
+ server.start()
+
+def connectProcessServer(sockname, featureset):
+ # Connect to socket
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ # AF_UNIX has path length issues so chdir here to workaround
+ cwd = os.getcwd()
+
+ try:
+ os.chdir(os.path.dirname(sockname))
+ sock.connect(os.path.basename(sockname))
+ finally:
+ os.chdir(cwd)
+
+ readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None
+ eq = command_chan_recv = command_chan = None
+
+ try:
+
+ # Send an fd for the remote to write events to
+ readfd, writefd = os.pipe()
+ eq = BBUIEventQueue(readfd)
+ # Send an fd for the remote to recieve commands from
+ readfd1, writefd1 = os.pipe()
+ command_chan = ConnectionWriter(writefd1)
+ # Send an fd for the remote to write commands results to
+ readfd2, writefd2 = os.pipe()
+ command_chan_recv = ConnectionReader(readfd2)
+
+ sendfds(sock, [writefd, readfd1, writefd2])
+
+ server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock)
+
+ # Close the ends of the pipes we won't use
+ for i in [writefd, readfd1, writefd2]:
+ os.close(i)
+
+ server_connection.connection.updateFeatureSet(featureset)
+
+ except (Exception, SystemExit) as e:
+ if command_chan_recv:
+ command_chan_recv.close()
+ if command_chan:
+ command_chan.close()
+ for i in [writefd, readfd1, writefd2]:
+ try:
+ os.close(i)
+ except OSError:
+ pass
+ sock.close()
+ raise
+
+ return server_connection
+
+def sendfds(sock, fds):
+ '''Send an array of fds over an AF_UNIX socket.'''
+ fds = array.array('i', fds)
+ msg = bytes([len(fds) % 256])
+ sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
+
+def recvfds(sock, size):
+ '''Receive an array of fds over an AF_UNIX socket.'''
+ a = array.array('i')
+ bytes_size = a.itemsize * size
+ msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
+ if not msg and not ancdata:
+ raise EOFError
+ try:
+ if len(ancdata) != 1:
+ raise RuntimeError('received %d items of ancdata' %
+ len(ancdata))
+ cmsg_level, cmsg_type, cmsg_data = ancdata[0]
+ if (cmsg_level == socket.SOL_SOCKET and
+ cmsg_type == socket.SCM_RIGHTS):
+ if len(cmsg_data) % a.itemsize != 0:
+ raise ValueError
+ a.frombytes(cmsg_data)
+ assert len(a) % 256 == msg[0]
+ return list(a)
+ except (ValueError, IndexError):
+ pass
+ raise RuntimeError('Invalid data received')
+
+class BBUIEventQueue:
+ def __init__(self, readfd):
+
+ self.eventQueue = []
+ self.eventQueueLock = threading.Lock()
+ self.eventQueueNotify = threading.Event()
+
+ self.reader = ConnectionReader(readfd)
+
+ self.t = threading.Thread()
+ self.t.setDaemon(True)
+ self.t.run = self.startCallbackHandler
+ self.t.start()
+
+ def getEvent(self):
+ self.eventQueueLock.acquire()
+
+ if len(self.eventQueue) == 0:
+ self.eventQueueLock.release()
+ return None
+
+ item = self.eventQueue.pop(0)
+
+ if len(self.eventQueue) == 0:
+ self.eventQueueNotify.clear()
+
+ self.eventQueueLock.release()
+ return item
+
+ def waitEvent(self, delay):
+ self.eventQueueNotify.wait(delay)
+ return self.getEvent()
+
+ def queue_event(self, event):
+ self.eventQueueLock.acquire()
+ self.eventQueue.append(event)
+ self.eventQueueNotify.set()
+ self.eventQueueLock.release()
+
+ def send_event(self, event):
+ self.queue_event(pickle.loads(event))
+
+ def startCallbackHandler(self):
+ bb.utils.set_process_name("UIEventQueue")
+ while True:
+ try:
+ self.reader.wait()
+ event = self.reader.get()
+ self.queue_event(event)
+ except EOFError:
+ # Easiest way to exit is to close the file descriptor to cause an exit
+ break
+ self.reader.close()
+
+class ConnectionReader(object):
+
+ def __init__(self, fd):
+ self.reader = multiprocessing.connection.Connection(fd, writable=False)
+ self.rlock = multiprocessing.Lock()
+
+ def wait(self, timeout=None):
+ return multiprocessing.connection.wait([self.reader], timeout)
+
+ def poll(self, timeout=None):
+ return self.reader.poll(timeout)
+
+ def get(self):
+ with self.rlock:
+ res = self.reader.recv_bytes()
+ return multiprocessing.reduction.ForkingPickler.loads(res)
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def close(self):
+ return self.reader.close()
+
+
+class ConnectionWriter(object):
+
+ def __init__(self, fd):
+ self.writer = multiprocessing.connection.Connection(fd, readable=False)
+ self.wlock = multiprocessing.Lock()
+ # Why bb.event needs this I have no idea
+ self.event = self
+
+ def send(self, obj):
+ obj = multiprocessing.reduction.ForkingPickler.dumps(obj)
+ with self.wlock:
+ self.writer.send_bytes(obj)
+
+ def fileno(self):
+ return self.writer.fileno()
+
+ def close(self):
+ return self.writer.close()
diff --git a/poky/bitbake/lib/bb/server/xmlrpcclient.py b/poky/bitbake/lib/bb/server/xmlrpcclient.py
new file mode 100644
index 000000000..4661a9e5a
--- /dev/null
+++ b/poky/bitbake/lib/bb/server/xmlrpcclient.py
@@ -0,0 +1,154 @@
+#
+# BitBake XMLRPC Client Interface
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2008 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+
+import socket
+import http.client
+import xmlrpc.client
+
+import bb
+from bb.ui import uievent
+
+class BBTransport(xmlrpc.client.Transport):
+ def __init__(self, timeout):
+ self.timeout = timeout
+ self.connection_token = None
+ xmlrpc.client.Transport.__init__(self)
+
+ # Modified from default to pass timeout to HTTPConnection
+ def make_connection(self, host):
+ #return an existing connection if possible. This allows
+ #HTTP/1.1 keep-alive.
+ if self._connection and host == self._connection[0]:
+ return self._connection[1]
+
+ # create a HTTP connection object from a host descriptor
+ chost, self._extra_headers, x509 = self.get_host_info(host)
+ #store the host argument along with the connection object
+ self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
+ return self._connection[1]
+
+ def set_connection_token(self, token):
+ self.connection_token = token
+
+ def send_content(self, h, body):
+ if self.connection_token:
+ h.putheader("Bitbake-token", self.connection_token)
+ xmlrpc.client.Transport.send_content(self, h, body)
+
+def _create_server(host, port, timeout = 60):
+ t = BBTransport(timeout)
+ s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
+ return s, t
+
+def check_connection(remote, timeout):
+ try:
+ host, port = remote.split(":")
+ port = int(port)
+ except Exception as e:
+ bb.warn("Failed to read remote definition (%s)" % str(e))
+ raise e
+
+ server, _transport = _create_server(host, port, timeout)
+ try:
+ ret, err = server.runCommand(['getVariable', 'TOPDIR'])
+ if err or not ret:
+ return False
+ except ConnectionError:
+ return False
+ return True
+
+class BitBakeXMLRPCServerConnection(object):
+ def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None):
+ self.connection, self.transport = _create_server(host, port)
+ self.clientinfo = clientinfo
+ self.observer_only = observer_only
+ if featureset:
+ self.featureset = featureset
+ else:
+ self.featureset = []
+
+ self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
+
+ _, error = self.connection.runCommand(["setFeatures", self.featureset])
+ if error:
+ # disconnect the client, we can't make the setFeature work
+ self.connection.removeClient()
+ # no need to log it here, the error shall be sent to the client
+ raise BaseException(error)
+
+ def connect(self, token = None):
+ if token is None:
+ if self.observer_only:
+ token = "observer"
+ else:
+ token = self.connection.addClient()
+
+ if token is None:
+ return None
+
+ self.transport.set_connection_token(token)
+ return self
+
+ def removeClient(self):
+ if not self.observer_only:
+ self.connection.removeClient()
+
+ def terminate(self):
+ # Don't wait for server indefinitely
+ socket.setdefaulttimeout(2)
+ try:
+ self.events.system_quit()
+ except:
+ pass
+ try:
+ self.connection.removeClient()
+ except:
+ pass
+
+def connectXMLRPC(remote, featureset, observer_only = False, token = None):
+ # The format of "remote" must be "server:port"
+ try:
+ [host, port] = remote.split(":")
+ port = int(port)
+ except Exception as e:
+ bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e)))
+ raise e
+
+ # We need our IP for the server connection. We get the IP
+ # by trying to connect with the server
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect((host, port))
+ ip = s.getsockname()[0]
+ s.close()
+ except Exception as e:
+ bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
+ raise e
+ try:
+ connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset)
+ return connection.connect(token)
+ except Exception as e:
+ bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
+ raise e
+
+
+
diff --git a/poky/bitbake/lib/bb/server/xmlrpcserver.py b/poky/bitbake/lib/bb/server/xmlrpcserver.py
new file mode 100644
index 000000000..875b1282e
--- /dev/null
+++ b/poky/bitbake/lib/bb/server/xmlrpcserver.py
@@ -0,0 +1,158 @@
+#
+# BitBake XMLRPC Server Interface
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2008 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+
+import hashlib
+import time
+import inspect
+from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
+
+import bb
+
+# This request handler checks if the request has a "Bitbake-token" header
+# field (this comes from the client side) and compares it with its internal
+# "Bitbake-token" field (this comes from the server). If the two are not
+# equal, it is assumed that a client is trying to connect to the server
+# while another client is connected to the server. In this case, a 503 error
+# ("service unavailable") is returned to the client.
+class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+ def __init__(self, request, client_address, server):
+ self.server = server
+ SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
+
+ def do_POST(self):
+ try:
+ remote_token = self.headers["Bitbake-token"]
+ except:
+ remote_token = None
+ if 0 and remote_token != self.server.connection_token and remote_token != "observer":
+ self.report_503()
+ else:
+ if remote_token == "observer":
+ self.server.readonly = True
+ else:
+ self.server.readonly = False
+ SimpleXMLRPCRequestHandler.do_POST(self)
+
+ def report_503(self):
+ self.send_response(503)
+ response = 'No more client allowed'
+ self.send_header("Content-type", "text/plain")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(bytes(response, 'utf-8'))
+
+class BitBakeXMLRPCServer(SimpleXMLRPCServer):
+ # remove this when you're done with debugging
+ # allow_reuse_address = True
+
+ def __init__(self, interface, cooker, parent):
+ # Use auto port configuration
+ if (interface[1] == -1):
+ interface = (interface[0], 0)
+ SimpleXMLRPCServer.__init__(self, interface,
+ requestHandler=BitBakeXMLRPCRequestHandler,
+ logRequests=False, allow_none=True)
+ self.host, self.port = self.socket.getsockname()
+ self.interface = interface
+
+ self.connection_token = None
+ self.commands = BitBakeXMLRPCServerCommands(self)
+ self.register_functions(self.commands, "")
+
+ self.cooker = cooker
+ self.parent = parent
+
+
+ def register_functions(self, context, prefix):
+ """
+ Convenience method for registering all functions in the scope
+ of this class that start with a common prefix
+ """
+ methodlist = inspect.getmembers(context, inspect.ismethod)
+ for name, method in methodlist:
+ if name.startswith(prefix):
+ self.register_function(method, name[len(prefix):])
+
+ def get_timeout(self, delay):
+ socktimeout = self.socket.gettimeout() or delay
+ return min(socktimeout, delay)
+
+ def handle_requests(self):
+ self._handle_request_noblock()
+
+class BitBakeXMLRPCServerCommands():
+
+ def __init__(self, server):
+ self.server = server
+ self.has_client = False
+
+ def registerEventHandler(self, host, port):
+ """
+ Register a remote UI Event Handler
+ """
+ s, t = bb.server.xmlrpcclient._create_server(host, port)
+
+ # we don't allow connections if the cooker is running
+ if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
+ return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
+
+ self.event_handle = bb.event.register_UIHhandler(s, True)
+ return self.event_handle, 'OK'
+
+ def unregisterEventHandler(self, handlerNum):
+ """
+ Unregister a remote UI Event Handler
+ """
+ ret = bb.event.unregister_UIHhandler(handlerNum, True)
+ self.event_handle = None
+ return ret
+
+ def runCommand(self, command):
+ """
+ Run a cooker command on the server
+ """
+ return self.server.cooker.command.runCommand(command, self.server.readonly)
+
+ def getEventHandle(self):
+ return self.event_handle
+
+ def terminateServer(self):
+ """
+ Trigger the server to quit
+ """
+ self.server.parent.quit = True
+ print("XMLRPC Server triggering exit")
+ return
+
+ def addClient(self):
+ if self.server.parent.haveui:
+ return None
+ token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
+ self.server.connection_token = token
+ self.server.parent.haveui = True
+ return token
+
+ def removeClient(self):
+ if self.server.parent.haveui:
+ self.server.connection_token = None
+ self.server.parent.haveui = False
+
diff --git a/poky/bitbake/lib/bb/siggen.py b/poky/bitbake/lib/bb/siggen.py
new file mode 100644
index 000000000..5ef82d7be
--- /dev/null
+++ b/poky/bitbake/lib/bb/siggen.py
@@ -0,0 +1,729 @@
+import hashlib
+import logging
+import os
+import re
+import tempfile
+import pickle
+import bb.data
+import difflib
+import simplediff
+from bb.checksum import FileChecksumCache
+
+logger = logging.getLogger('BitBake.SigGen')
+
+def init(d):
+ siggens = [obj for obj in globals().values()
+ if type(obj) is type and issubclass(obj, SignatureGenerator)]
+
+ desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop"
+ for sg in siggens:
+ if desired == sg.name:
+ return sg(d)
+ break
+ else:
+ logger.error("Invalid signature generator '%s', using default 'noop'\n"
+ "Available generators: %s", desired,
+ ', '.join(obj.name for obj in siggens))
+ return SignatureGenerator(d)
+
+class SignatureGenerator(object):
+ """
+ """
+ name = "noop"
+
+ def __init__(self, data):
+ self.basehash = {}
+ self.taskhash = {}
+ self.runtaskdeps = {}
+ self.file_checksum_values = {}
+ self.taints = {}
+
+ def finalise(self, fn, d, varient):
+ return
+
+ def get_taskhash(self, fn, task, deps, dataCache):
+ return "0"
+
+ def writeout_file_checksum_cache(self):
+ """Write/update the file checksum cache onto disk"""
+ return
+
+ def stampfile(self, stampbase, file_name, taskname, extrainfo):
+ return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
+
+ def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
+ return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
+
+ def dump_sigtask(self, fn, task, stampbase, runtime):
+ return
+
+ def invalidate_task(self, task, d, fn):
+ bb.build.del_stamp(task, d, fn)
+
+ def dump_sigs(self, dataCache, options):
+ return
+
+ def get_taskdata(self):
+ return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
+
+ def set_taskdata(self, data):
+ self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
+
+ def reset(self, data):
+ self.__init__(data)
+
+
+class SignatureGeneratorBasic(SignatureGenerator):
+ """
+ """
+ name = "basic"
+
+ def __init__(self, data):
+ self.basehash = {}
+ self.taskhash = {}
+ self.taskdeps = {}
+ self.runtaskdeps = {}
+ self.file_checksum_values = {}
+ self.taints = {}
+ self.gendeps = {}
+ self.lookupcache = {}
+ self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
+ self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
+ self.taskwhitelist = None
+ self.init_rundepcheck(data)
+ checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
+ if checksum_cache_file:
+ self.checksum_cache = FileChecksumCache()
+ self.checksum_cache.init_cache(data, checksum_cache_file)
+ else:
+ self.checksum_cache = None
+
+ def init_rundepcheck(self, data):
+ self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
+ if self.taskwhitelist:
+ self.twl = re.compile(self.taskwhitelist)
+ else:
+ self.twl = None
+
+ def _build_data(self, fn, d):
+
+ ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
+ tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
+
+ taskdeps = {}
+ basehash = {}
+
+ for task in tasklist:
+ data = lookupcache[task]
+
+ if data is None:
+ bb.error("Task %s from %s seems to be empty?!" % (task, fn))
+ data = ''
+
+ gendeps[task] -= self.basewhitelist
+ newdeps = gendeps[task]
+ seen = set()
+ while newdeps:
+ nextdeps = newdeps
+ seen |= nextdeps
+ newdeps = set()
+ for dep in nextdeps:
+ if dep in self.basewhitelist:
+ continue
+ gendeps[dep] -= self.basewhitelist
+ newdeps |= gendeps[dep]
+ newdeps -= seen
+
+ alldeps = sorted(seen)
+ for dep in alldeps:
+ data = data + dep
+ var = lookupcache[dep]
+ if var is not None:
+ data = data + str(var)
+ datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
+ k = fn + "." + task
+ if not ignore_mismatch and k in self.basehash and self.basehash[k] != datahash:
+ bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
+ self.basehash[k] = datahash
+ taskdeps[task] = alldeps
+
+ self.taskdeps[fn] = taskdeps
+ self.gendeps[fn] = gendeps
+ self.lookupcache[fn] = lookupcache
+
+ return taskdeps
+
+ def finalise(self, fn, d, variant):
+
+ mc = d.getVar("__BBMULTICONFIG", False) or ""
+ if variant or mc:
+ fn = bb.cache.realfn2virtual(fn, variant, mc)
+
+ try:
+ taskdeps = self._build_data(fn, d)
+ except bb.parse.SkipRecipe:
+ raise
+ except:
+ bb.warn("Error during finalise of %s" % fn)
+ raise
+
+ #Slow but can be useful for debugging mismatched basehashes
+ #for task in self.taskdeps[fn]:
+ # self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
+
+ for task in taskdeps:
+ d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
+
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCache):
+ # Return True if we should keep the dependency, False to drop it
+ # We only manipulate the dependencies for packages not in the whitelist
+ if self.twl and not self.twl.search(recipename):
+ # then process the actual dependencies
+ if self.twl.search(depname):
+ return False
+ return True
+
+ def read_taint(self, fn, task, stampbase):
+ taint = None
+ try:
+ with open(stampbase + '.' + task + '.taint', 'r') as taintf:
+ taint = taintf.read()
+ except IOError:
+ pass
+ return taint
+
+ def get_taskhash(self, fn, task, deps, dataCache):
+ k = fn + "." + task
+ data = dataCache.basetaskhash[k]
+ self.basehash[k] = data
+ self.runtaskdeps[k] = []
+ self.file_checksum_values[k] = []
+ recipename = dataCache.pkg_fn[fn]
+
+ for dep in sorted(deps, key=clean_basepath):
+ depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
+ if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
+ continue
+ if dep not in self.taskhash:
+ bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
+ data = data + self.taskhash[dep]
+ self.runtaskdeps[k].append(dep)
+
+ if task in dataCache.file_checksums[fn]:
+ if self.checksum_cache:
+ checksums = self.checksum_cache.get_checksums(dataCache.file_checksums[fn][task], recipename)
+ else:
+ checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
+ for (f,cs) in checksums:
+ self.file_checksum_values[k].append((f,cs))
+ if cs:
+ data = data + cs
+
+ taskdep = dataCache.task_deps[fn]
+ if 'nostamp' in taskdep and task in taskdep['nostamp']:
+ # Nostamp tasks need an implicit taint so that they force any dependent tasks to run
+ import uuid
+ taint = str(uuid.uuid4())
+ data = data + taint
+ self.taints[k] = "nostamp:" + taint
+
+ taint = self.read_taint(fn, task, dataCache.stamp[fn])
+ if taint:
+ data = data + taint
+ self.taints[k] = taint
+ logger.warning("%s is tainted from a forced run" % k)
+
+ h = hashlib.md5(data.encode("utf-8")).hexdigest()
+ self.taskhash[k] = h
+ #d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
+ return h
+
+ def writeout_file_checksum_cache(self):
+ """Write/update the file checksum cache onto disk"""
+ if self.checksum_cache:
+ self.checksum_cache.save_extras()
+ self.checksum_cache.save_merge()
+ else:
+ bb.fetch2.fetcher_parse_save()
+ bb.fetch2.fetcher_parse_done()
+
+ def dump_sigtask(self, fn, task, stampbase, runtime):
+
+ k = fn + "." + task
+ referencestamp = stampbase
+ if isinstance(runtime, str) and runtime.startswith("customfile"):
+ sigfile = stampbase
+ referencestamp = runtime[11:]
+ elif runtime and k in self.taskhash:
+ sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
+ else:
+ sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
+
+ bb.utils.mkdirhier(os.path.dirname(sigfile))
+
+ data = {}
+ data['task'] = task
+ data['basewhitelist'] = self.basewhitelist
+ data['taskwhitelist'] = self.taskwhitelist
+ data['taskdeps'] = self.taskdeps[fn][task]
+ data['basehash'] = self.basehash[k]
+ data['gendeps'] = {}
+ data['varvals'] = {}
+ data['varvals'][task] = self.lookupcache[fn][task]
+ for dep in self.taskdeps[fn][task]:
+ if dep in self.basewhitelist:
+ continue
+ data['gendeps'][dep] = self.gendeps[fn][dep]
+ data['varvals'][dep] = self.lookupcache[fn][dep]
+
+ if runtime and k in self.taskhash:
+ data['runtaskdeps'] = self.runtaskdeps[k]
+ data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
+ data['runtaskhashes'] = {}
+ for dep in data['runtaskdeps']:
+ data['runtaskhashes'][dep] = self.taskhash[dep]
+ data['taskhash'] = self.taskhash[k]
+
+ taint = self.read_taint(fn, task, referencestamp)
+ if taint:
+ data['taint'] = taint
+
+ if runtime and k in self.taints:
+ if 'nostamp:' in self.taints[k]:
+ data['taint'] = self.taints[k]
+
+ computed_basehash = calc_basehash(data)
+ if computed_basehash != self.basehash[k]:
+ bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
+ if runtime and k in self.taskhash:
+ computed_taskhash = calc_taskhash(data)
+ if computed_taskhash != self.taskhash[k]:
+ bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
+ sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
+
+ fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
+ try:
+ with os.fdopen(fd, "wb") as stream:
+ p = pickle.dump(data, stream, -1)
+ stream.flush()
+ os.chmod(tmpfile, 0o664)
+ os.rename(tmpfile, sigfile)
+ except (OSError, IOError) as err:
+ try:
+ os.unlink(tmpfile)
+ except OSError:
+ pass
+ raise err
+
+ def dump_sigfn(self, fn, dataCaches, options):
+ if fn in self.taskdeps:
+ for task in self.taskdeps[fn]:
+ tid = fn + ":" + task
+ (mc, _, _) = bb.runqueue.split_tid(tid)
+ k = fn + "." + task
+ if k not in self.taskhash:
+ continue
+ if dataCaches[mc].basetaskhash[k] != self.basehash[k]:
+ bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
+ bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[k], self.basehash[k]))
+ self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
+
+class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
+ name = "basichash"
+
+ def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
+ if taskname != "do_setscene" and taskname.endswith("_setscene"):
+ k = fn + "." + taskname[:-9]
+ else:
+ k = fn + "." + taskname
+ if clean:
+ h = "*"
+ elif k in self.taskhash:
+ h = self.taskhash[k]
+ else:
+ # If k is not in basehash, then error
+ h = self.basehash[k]
+ return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
+
+ def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
+ return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
+
+ def invalidate_task(self, task, d, fn):
+ bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
+ bb.build.write_taint(task, d, fn)
+
+def dump_this_task(outfile, d):
+ import bb.parse
+ fn = d.getVar("BB_FILENAME")
+ task = "do_" + d.getVar("BB_CURRENTTASK")
+ referencestamp = bb.build.stamp_internal(task, d, None, True)
+ bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
+
+def init_colors(enable_color):
+ """Initialise colour dict for passing to compare_sigfiles()"""
+ # First set up the colours
+ colors = {'color_title': '\033[1;37;40m',
+ 'color_default': '\033[0;37;40m',
+ 'color_add': '\033[1;32;40m',
+ 'color_remove': '\033[1;31;40m',
+ }
+ # Leave all keys present but clear the values
+ if not enable_color:
+ for k in colors.keys():
+ colors[k] = ''
+ return colors
+
+def worddiff_str(oldstr, newstr, colors=None):
+ if not colors:
+ colors = init_colors(False)
+ diff = simplediff.diff(oldstr.split(' '), newstr.split(' '))
+ ret = []
+ for change, value in diff:
+ value = ' '.join(value)
+ if change == '=':
+ ret.append(value)
+ elif change == '+':
+ item = '{color_add}{{+{value}+}}{color_default}'.format(value=value, **colors)
+ ret.append(item)
+ elif change == '-':
+ item = '{color_remove}[-{value}-]{color_default}'.format(value=value, **colors)
+ ret.append(item)
+ whitespace_note = ''
+ if oldstr != newstr and ' '.join(oldstr.split()) == ' '.join(newstr.split()):
+ whitespace_note = ' (whitespace changed)'
+ return '"%s"%s' % (' '.join(ret), whitespace_note)
+
+def list_inline_diff(oldlist, newlist, colors=None):
+ if not colors:
+ colors = init_colors(False)
+ diff = simplediff.diff(oldlist, newlist)
+ ret = []
+ for change, value in diff:
+ value = ' '.join(value)
+ if change == '=':
+ ret.append("'%s'" % value)
+ elif change == '+':
+ item = '{color_add}+{value}{color_default}'.format(value=value, **colors)
+ ret.append(item)
+ elif change == '-':
+ item = '{color_remove}-{value}{color_default}'.format(value=value, **colors)
+ ret.append(item)
+ return '[%s]' % (', '.join(ret))
+
+def clean_basepath(a):
+ mc = None
+ if a.startswith("multiconfig:"):
+ _, mc, a = a.split(":", 2)
+ b = a.rsplit("/", 2)[1] + '/' + a.rsplit("/", 2)[2]
+ if a.startswith("virtual:"):
+ b = b + ":" + a.rsplit(":", 1)[0]
+ if mc:
+ b = b + ":multiconfig:" + mc
+ return b
+
+def clean_basepaths(a):
+ b = {}
+ for x in a:
+ b[clean_basepath(x)] = a[x]
+ return b
+
+def clean_basepaths_list(a):
+ b = []
+ for x in a:
+ b.append(clean_basepath(x))
+ return b
+
+def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
+ output = []
+
+ colors = init_colors(color)
+ def color_format(formatstr, **values):
+ """
+ Return colour formatted string.
+ NOTE: call with the format string, not an already formatted string
+ containing values (otherwise you could have trouble with { and }
+ characters)
+ """
+ if not formatstr.endswith('{color_default}'):
+ formatstr += '{color_default}'
+ # In newer python 3 versions you can pass both of these directly,
+ # but we only require 3.4 at the moment
+ formatparams = {}
+ formatparams.update(colors)
+ formatparams.update(values)
+ return formatstr.format(**formatparams)
+
+ with open(a, 'rb') as f:
+ p1 = pickle.Unpickler(f)
+ a_data = p1.load()
+ with open(b, 'rb') as f:
+ p2 = pickle.Unpickler(f)
+ b_data = p2.load()
+
+ def dict_diff(a, b, whitelist=set()):
+ sa = set(a.keys())
+ sb = set(b.keys())
+ common = sa & sb
+ changed = set()
+ for i in common:
+ if a[i] != b[i] and i not in whitelist:
+ changed.add(i)
+ added = sb - sa
+ removed = sa - sb
+ return changed, added, removed
+
+ def file_checksums_diff(a, b):
+ from collections import Counter
+ # Handle old siginfo format
+ if isinstance(a, dict):
+ a = [(os.path.basename(f), cs) for f, cs in a.items()]
+ if isinstance(b, dict):
+ b = [(os.path.basename(f), cs) for f, cs in b.items()]
+ # Compare lists, ensuring we can handle duplicate filenames if they exist
+ removedcount = Counter(a)
+ removedcount.subtract(b)
+ addedcount = Counter(b)
+ addedcount.subtract(a)
+ added = []
+ for x in b:
+ if addedcount[x] > 0:
+ addedcount[x] -= 1
+ added.append(x)
+ removed = []
+ changed = []
+ for x in a:
+ if removedcount[x] > 0:
+ removedcount[x] -= 1
+ for y in added:
+ if y[0] == x[0]:
+ changed.append((x[0], x[1], y[1]))
+ added.remove(y)
+ break
+ else:
+ removed.append(x)
+ added = [x[0] for x in added]
+ removed = [x[0] for x in removed]
+ return changed, added, removed
+
+ if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
+ output.append(color_format("{color_title}basewhitelist changed{color_default} from '%s' to '%s'") % (a_data['basewhitelist'], b_data['basewhitelist']))
+ if a_data['basewhitelist'] and b_data['basewhitelist']:
+ output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
+
+ if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
+ output.append(color_format("{color_title}taskwhitelist changed{color_default} from '%s' to '%s'") % (a_data['taskwhitelist'], b_data['taskwhitelist']))
+ if a_data['taskwhitelist'] and b_data['taskwhitelist']:
+ output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
+
+ if a_data['taskdeps'] != b_data['taskdeps']:
+ output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
+
+ if a_data['basehash'] != b_data['basehash'] and not collapsed:
+ output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash']))
+
+ changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
+ if changed:
+ for dep in changed:
+ output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
+ if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
+ output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
+ if added:
+ for dep in added:
+ output.append(color_format("{color_title}Dependency on variable %s was added") % (dep))
+ if removed:
+ for dep in removed:
+ output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep))
+
+
+ changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
+ if changed:
+ for dep in changed:
+ oldval = a_data['varvals'][dep]
+ newval = b_data['varvals'][dep]
+ if newval and oldval and ('\n' in oldval or '\n' in newval):
+ diff = difflib.unified_diff(oldval.splitlines(), newval.splitlines(), lineterm='')
+ # Cut off the first two lines, since we aren't interested in
+ # the old/new filename (they are blank anyway in this case)
+ difflines = list(diff)[2:]
+ if color:
+ # Add colour to diff output
+ for i, line in enumerate(difflines):
+ if line.startswith('+'):
+ line = color_format('{color_add}{line}', line=line)
+ difflines[i] = line
+ elif line.startswith('-'):
+ line = color_format('{color_remove}{line}', line=line)
+ difflines[i] = line
+ output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff='\n'.join(difflines)))
+ elif newval and oldval and (' ' in oldval or ' ' in newval):
+ output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff=worddiff_str(oldval, newval, colors)))
+ else:
+ output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval))
+
+ if not 'file_checksum_values' in a_data:
+ a_data['file_checksum_values'] = {}
+ if not 'file_checksum_values' in b_data:
+ b_data['file_checksum_values'] = {}
+
+ changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
+ if changed:
+ for f, old, new in changed:
+ output.append(color_format("{color_title}Checksum for file %s changed{color_default} from %s to %s") % (f, old, new))
+ if added:
+ for f in added:
+ output.append(color_format("{color_title}Dependency on checksum of file %s was added") % (f))
+ if removed:
+ for f in removed:
+ output.append(color_format("{color_title}Dependency on checksum of file %s was removed") % (f))
+
+ if not 'runtaskdeps' in a_data:
+ a_data['runtaskdeps'] = {}
+ if not 'runtaskdeps' in b_data:
+ b_data['runtaskdeps'] = {}
+
+ if not collapsed:
+ if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
+ changed = ["Number of task dependencies changed"]
+ else:
+ changed = []
+ for idx, task in enumerate(a_data['runtaskdeps']):
+ a = a_data['runtaskdeps'][idx]
+ b = b_data['runtaskdeps'][idx]
+ if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
+ changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
+
+ if changed:
+ clean_a = clean_basepaths_list(a_data['runtaskdeps'])
+ clean_b = clean_basepaths_list(b_data['runtaskdeps'])
+ if clean_a != clean_b:
+ output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
+ else:
+ output.append(color_format("{color_title}runtaskdeps changed:"))
+ output.append("\n".join(changed))
+
+
+ if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
+ a = a_data['runtaskhashes']
+ b = b_data['runtaskhashes']
+ changed, added, removed = dict_diff(a, b)
+ if added:
+ for dep in added:
+ bdep_found = False
+ if removed:
+ for bdep in removed:
+ if b[dep] == a[bdep]:
+ #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
+ bdep_found = True
+ if not bdep_found:
+ output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
+ if removed:
+ for dep in removed:
+ adep_found = False
+ if added:
+ for adep in added:
+ if b[adep] == a[dep]:
+ #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
+ adep_found = True
+ if not adep_found:
+ output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
+ if changed:
+ for dep in changed:
+ if not collapsed:
+ output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
+ if callable(recursecb):
+ recout = recursecb(dep, a[dep], b[dep])
+ if recout:
+ if collapsed:
+ output.extend(recout)
+ else:
+ # If a dependent hash changed, might as well print the line above and then defer to the changes in
+ # that hash since in all likelyhood, they're the same changes this task also saw.
+ output = [output[-1]] + recout
+
+ a_taint = a_data.get('taint', None)
+ b_taint = b_data.get('taint', None)
+ if a_taint != b_taint:
+ output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint))
+
+ return output
+
+
+def calc_basehash(sigdata):
+ task = sigdata['task']
+ basedata = sigdata['varvals'][task]
+
+ if basedata is None:
+ basedata = ''
+
+ alldeps = sigdata['taskdeps']
+ for dep in alldeps:
+ basedata = basedata + dep
+ val = sigdata['varvals'][dep]
+ if val is not None:
+ basedata = basedata + str(val)
+
+ return hashlib.md5(basedata.encode("utf-8")).hexdigest()
+
+def calc_taskhash(sigdata):
+ data = sigdata['basehash']
+
+ for dep in sigdata['runtaskdeps']:
+ data = data + sigdata['runtaskhashes'][dep]
+
+ for c in sigdata['file_checksum_values']:
+ if c[1]:
+ data = data + c[1]
+
+ if 'taint' in sigdata:
+ if 'nostamp:' in sigdata['taint']:
+ data = data + sigdata['taint'][8:]
+ else:
+ data = data + sigdata['taint']
+
+ return hashlib.md5(data.encode("utf-8")).hexdigest()
+
+
+def dump_sigfile(a):
+ output = []
+
+ with open(a, 'rb') as f:
+ p1 = pickle.Unpickler(f)
+ a_data = p1.load()
+
+ output.append("basewhitelist: %s" % (a_data['basewhitelist']))
+
+ output.append("taskwhitelist: %s" % (a_data['taskwhitelist']))
+
+ output.append("Task dependencies: %s" % (sorted(a_data['taskdeps'])))
+
+ output.append("basehash: %s" % (a_data['basehash']))
+
+ for dep in a_data['gendeps']:
+ output.append("List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep]))
+
+ for dep in a_data['varvals']:
+ output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep]))
+
+ if 'runtaskdeps' in a_data:
+ output.append("Tasks this task depends on: %s" % (a_data['runtaskdeps']))
+
+ if 'file_checksum_values' in a_data:
+ output.append("This task depends on the checksums of files: %s" % (a_data['file_checksum_values']))
+
+ if 'runtaskhashes' in a_data:
+ for dep in a_data['runtaskhashes']:
+ output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep]))
+
+ if 'taint' in a_data:
+ output.append("Tainted (by forced/invalidated task): %s" % a_data['taint'])
+
+ if 'task' in a_data:
+ computed_basehash = calc_basehash(a_data)
+ output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash']))
+ else:
+ output.append("Unable to compute base hash")
+
+ computed_taskhash = calc_taskhash(a_data)
+ output.append("Computed task hash is %s" % computed_taskhash)
+
+ return output
diff --git a/poky/bitbake/lib/bb/taskdata.py b/poky/bitbake/lib/bb/taskdata.py
new file mode 100644
index 000000000..0ea6c0bfd
--- /dev/null
+++ b/poky/bitbake/lib/bb/taskdata.py
@@ -0,0 +1,578 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'TaskData' implementation
+
+Task data collection and handling
+
+"""
+
+# Copyright (C) 2006 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import logging
+import re
+import bb
+
+logger = logging.getLogger("BitBake.TaskData")
+
+def re_match_strings(target, strings):
+ """
+ Whether or not the string 'target' matches
+ any one string of the strings which can be regular expression string
+ """
+ return any(name == target or re.match(name, target)
+ for name in strings)
+
+class TaskEntry:
+ def __init__(self):
+ self.tdepends = []
+ self.idepends = []
+ self.irdepends = []
+
+class TaskData:
+ """
+ BitBake Task Data implementation
+ """
+ def __init__(self, abort = True, skiplist = None, allowincomplete = False):
+ self.build_targets = {}
+ self.run_targets = {}
+
+ self.external_targets = []
+
+ self.seenfns = []
+ self.taskentries = {}
+
+ self.depids = {}
+ self.rdepids = {}
+
+ self.consider_msgs_cache = []
+
+ self.failed_deps = []
+ self.failed_rdeps = []
+ self.failed_fns = []
+
+ self.abort = abort
+ self.allowincomplete = allowincomplete
+
+ self.skiplist = skiplist
+
+ def add_tasks(self, fn, dataCache):
+ """
+ Add tasks for a given fn to the database
+ """
+
+ task_deps = dataCache.task_deps[fn]
+
+ if fn in self.failed_fns:
+ bb.msg.fatal("TaskData", "Trying to re-add a failed file? Something is broken...")
+
+ # Check if we've already seen this fn
+ if fn in self.seenfns:
+ return
+
+ self.seenfns.append(fn)
+
+ self.add_extra_deps(fn, dataCache)
+
+ # Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends
+ def handle_deps(task, dep_name, depends, seen):
+ if dep_name in task_deps and task in task_deps[dep_name]:
+ ids = []
+ for dep in task_deps[dep_name][task].split():
+ if dep:
+ parts = dep.split(":")
+ if len(parts) != 2:
+ bb.msg.fatal("TaskData", "Error for %s:%s[%s], dependency %s in '%s' does not contain exactly one ':' character.\n Task '%s' should be specified in the form 'packagename:task'" % (fn, task, dep_name, dep, task_deps[dep_name][task], dep_name))
+ ids.append((parts[0], parts[1]))
+ seen(parts[0])
+ depends.extend(ids)
+
+ for task in task_deps['tasks']:
+
+ tid = "%s:%s" % (fn, task)
+ self.taskentries[tid] = TaskEntry()
+
+ # Work out task dependencies
+ parentids = []
+ for dep in task_deps['parents'][task]:
+ if dep not in task_deps['tasks']:
+ bb.debug(2, "Not adding dependeny of %s on %s since %s does not exist" % (task, dep, dep))
+ continue
+ parentid = "%s:%s" % (fn, dep)
+ parentids.append(parentid)
+ self.taskentries[tid].tdepends.extend(parentids)
+
+ # Touch all intertask dependencies
+ handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target)
+ handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target)
+
+ # Work out build dependencies
+ if not fn in self.depids:
+ dependids = set()
+ for depend in dataCache.deps[fn]:
+ dependids.add(depend)
+ self.depids[fn] = list(dependids)
+ logger.debug(2, "Added dependencies %s for %s", str(dataCache.deps[fn]), fn)
+
+ # Work out runtime dependencies
+ if not fn in self.rdepids:
+ rdependids = set()
+ rdepends = dataCache.rundeps[fn]
+ rrecs = dataCache.runrecs[fn]
+ rdependlist = []
+ rreclist = []
+ for package in rdepends:
+ for rdepend in rdepends[package]:
+ rdependlist.append(rdepend)
+ rdependids.add(rdepend)
+ for package in rrecs:
+ for rdepend in rrecs[package]:
+ rreclist.append(rdepend)
+ rdependids.add(rdepend)
+ if rdependlist:
+ logger.debug(2, "Added runtime dependencies %s for %s", str(rdependlist), fn)
+ if rreclist:
+ logger.debug(2, "Added runtime recommendations %s for %s", str(rreclist), fn)
+ self.rdepids[fn] = list(rdependids)
+
+ for dep in self.depids[fn]:
+ self.seen_build_target(dep)
+ if dep in self.failed_deps:
+ self.fail_fn(fn)
+ return
+ for dep in self.rdepids[fn]:
+ self.seen_run_target(dep)
+ if dep in self.failed_rdeps:
+ self.fail_fn(fn)
+ return
+
+ def add_extra_deps(self, fn, dataCache):
+ func = dataCache.extradepsfunc.get(fn, None)
+ if func:
+ bb.providers.buildWorldTargetList(dataCache)
+ pn = dataCache.pkg_fn[fn]
+ params = {'deps': dataCache.deps[fn],
+ 'world_target': dataCache.world_target,
+ 'pkg_pn': dataCache.pkg_pn,
+ 'self_pn': pn}
+ funcname = '_%s_calculate_extra_depends' % pn.replace('-', '_')
+ paramlist = ','.join(params.keys())
+ func = 'def %s(%s):\n%s\n\n%s(%s)' % (funcname, paramlist, func, funcname, paramlist)
+ bb.utils.better_exec(func, params)
+
+
+ def have_build_target(self, target):
+ """
+ Have we a build target matching this name?
+ """
+ if target in self.build_targets and self.build_targets[target]:
+ return True
+ return False
+
+ def have_runtime_target(self, target):
+ """
+ Have we a runtime target matching this name?
+ """
+ if target in self.run_targets and self.run_targets[target]:
+ return True
+ return False
+
+ def seen_build_target(self, name):
+ """
+ Maintain a list of build targets
+ """
+ if name not in self.build_targets:
+ self.build_targets[name] = []
+
+ def add_build_target(self, fn, item):
+ """
+ Add a build target.
+ If already present, append the provider fn to the list
+ """
+ if item in self.build_targets:
+ if fn in self.build_targets[item]:
+ return
+ self.build_targets[item].append(fn)
+ return
+ self.build_targets[item] = [fn]
+
+ def seen_run_target(self, name):
+ """
+ Maintain a list of runtime build targets
+ """
+ if name not in self.run_targets:
+ self.run_targets[name] = []
+
+ def add_runtime_target(self, fn, item):
+ """
+ Add a runtime target.
+ If already present, append the provider fn to the list
+ """
+ if item in self.run_targets:
+ if fn in self.run_targets[item]:
+ return
+ self.run_targets[item].append(fn)
+ return
+ self.run_targets[item] = [fn]
+
+ def mark_external_target(self, target):
+ """
+ Mark a build target as being externally requested
+ """
+ if target not in self.external_targets:
+ self.external_targets.append(target)
+
+ def get_unresolved_build_targets(self, dataCache):
+ """
+ Return a list of build targets who's providers
+ are unknown.
+ """
+ unresolved = []
+ for target in self.build_targets:
+ if re_match_strings(target, dataCache.ignored_dependencies):
+ continue
+ if target in self.failed_deps:
+ continue
+ if not self.build_targets[target]:
+ unresolved.append(target)
+ return unresolved
+
+ def get_unresolved_run_targets(self, dataCache):
+ """
+ Return a list of runtime targets who's providers
+ are unknown.
+ """
+ unresolved = []
+ for target in self.run_targets:
+ if re_match_strings(target, dataCache.ignored_dependencies):
+ continue
+ if target in self.failed_rdeps:
+ continue
+ if not self.run_targets[target]:
+ unresolved.append(target)
+ return unresolved
+
+ def get_provider(self, item):
+ """
+ Return a list of providers of item
+ """
+ return self.build_targets[item]
+
+ def get_dependees(self, item):
+ """
+ Return a list of targets which depend on item
+ """
+ dependees = []
+ for fn in self.depids:
+ if item in self.depids[fn]:
+ dependees.append(fn)
+ return dependees
+
+ def get_rdependees(self, item):
+ """
+ Return a list of targets which depend on runtime item
+ """
+ dependees = []
+ for fn in self.rdepids:
+ if item in self.rdepids[fn]:
+ dependees.append(fn)
+ return dependees
+
+ def get_reasons(self, item, runtime=False):
+ """
+ Get the reason(s) for an item not being provided, if any
+ """
+ reasons = []
+ if self.skiplist:
+ for fn in self.skiplist:
+ skipitem = self.skiplist[fn]
+ if skipitem.pn == item:
+ reasons.append("%s was skipped: %s" % (skipitem.pn, skipitem.skipreason))
+ elif runtime and item in skipitem.rprovides:
+ reasons.append("%s RPROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason))
+ elif not runtime and item in skipitem.provides:
+ reasons.append("%s PROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason))
+ return reasons
+
+ def get_close_matches(self, item, provider_list):
+ import difflib
+ if self.skiplist:
+ skipped = []
+ for fn in self.skiplist:
+ skipped.append(self.skiplist[fn].pn)
+ full_list = provider_list + skipped
+ else:
+ full_list = provider_list
+ return difflib.get_close_matches(item, full_list, cutoff=0.7)
+
+ def add_provider(self, cfgData, dataCache, item):
+ try:
+ self.add_provider_internal(cfgData, dataCache, item)
+ except bb.providers.NoProvider:
+ if self.abort:
+ raise
+ self.remove_buildtarget(item)
+
+ self.mark_external_target(item)
+
+ def add_provider_internal(self, cfgData, dataCache, item):
+ """
+ Add the providers of item to the task data
+ Mark entries were specifically added externally as against dependencies
+ added internally during dependency resolution
+ """
+
+ if re_match_strings(item, dataCache.ignored_dependencies):
+ return
+
+ if not item in dataCache.providers:
+ close_matches = self.get_close_matches(item, list(dataCache.providers.keys()))
+ # Is it in RuntimeProviders ?
+ all_p = bb.providers.getRuntimeProviders(dataCache, item)
+ for fn in all_p:
+ new = dataCache.pkg_fn[fn] + " RPROVIDES " + item
+ if new not in close_matches:
+ close_matches.append(new)
+ bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees(item), reasons=self.get_reasons(item), close_matches=close_matches), cfgData)
+ raise bb.providers.NoProvider(item)
+
+ if self.have_build_target(item):
+ return
+
+ all_p = dataCache.providers[item]
+
+ eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
+ eligible = [p for p in eligible if not p in self.failed_fns]
+
+ if not eligible:
+ bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees(item), reasons=["No eligible PROVIDERs exist for '%s'" % item]), cfgData)
+ raise bb.providers.NoProvider(item)
+
+ if len(eligible) > 1 and foundUnique == False:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.event.fire(bb.event.MultipleProviders(item, providers_list), cfgData)
+ self.consider_msgs_cache.append(item)
+
+ for fn in eligible:
+ if fn in self.failed_fns:
+ continue
+ logger.debug(2, "adding %s to satisfy %s", fn, item)
+ self.add_build_target(fn, item)
+ self.add_tasks(fn, dataCache)
+
+
+ #item = dataCache.pkg_fn[fn]
+
+ def add_rprovider(self, cfgData, dataCache, item):
+ """
+ Add the runtime providers of item to the task data
+ (takes item names from RDEPENDS/PACKAGES namespace)
+ """
+
+ if re_match_strings(item, dataCache.ignored_dependencies):
+ return
+
+ if self.have_runtime_target(item):
+ return
+
+ all_p = bb.providers.getRuntimeProviders(dataCache, item)
+
+ if not all_p:
+ bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees(item), reasons=self.get_reasons(item, True)), cfgData)
+ raise bb.providers.NoRProvider(item)
+
+ eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache)
+ eligible = [p for p in eligible if not p in self.failed_fns]
+
+ if not eligible:
+ bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees(item), reasons=["No eligible RPROVIDERs exist for '%s'" % item]), cfgData)
+ raise bb.providers.NoRProvider(item)
+
+ if len(eligible) > 1 and numberPreferred == 0:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
+ self.consider_msgs_cache.append(item)
+
+ if numberPreferred > 1:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(dataCache.pkg_fn[fn])
+ bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
+ self.consider_msgs_cache.append(item)
+ raise bb.providers.MultipleRProvider(item)
+
+ # run through the list until we find one that we can build
+ for fn in eligible:
+ if fn in self.failed_fns:
+ continue
+ logger.debug(2, "adding '%s' to satisfy runtime '%s'", fn, item)
+ self.add_runtime_target(fn, item)
+ self.add_tasks(fn, dataCache)
+
+ def fail_fn(self, fn, missing_list=None):
+ """
+ Mark a file as failed (unbuildable)
+ Remove any references from build and runtime provider lists
+
+ missing_list, A list of missing requirements for this target
+ """
+ if fn in self.failed_fns:
+ return
+ if not missing_list:
+ missing_list = []
+ logger.debug(1, "File '%s' is unbuildable, removing...", fn)
+ self.failed_fns.append(fn)
+ for target in self.build_targets:
+ if fn in self.build_targets[target]:
+ self.build_targets[target].remove(fn)
+ if len(self.build_targets[target]) == 0:
+ self.remove_buildtarget(target, missing_list)
+ for target in self.run_targets:
+ if fn in self.run_targets[target]:
+ self.run_targets[target].remove(fn)
+ if len(self.run_targets[target]) == 0:
+ self.remove_runtarget(target, missing_list)
+
+ def remove_buildtarget(self, target, missing_list=None):
+ """
+ Mark a build target as failed (unbuildable)
+ Trigger removal of any files that have this as a dependency
+ """
+ if not missing_list:
+ missing_list = [target]
+ else:
+ missing_list = [target] + missing_list
+ logger.verbose("Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", target, missing_list)
+ self.failed_deps.append(target)
+ dependees = self.get_dependees(target)
+ for fn in dependees:
+ self.fail_fn(fn, missing_list)
+ for tid in self.taskentries:
+ for (idepend, idependtask) in self.taskentries[tid].idepends:
+ if idepend == target:
+ fn = tid.rsplit(":",1)[0]
+ self.fail_fn(fn, missing_list)
+
+ if self.abort and target in self.external_targets:
+ logger.error("Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s", target, missing_list)
+ raise bb.providers.NoProvider(target)
+
+ def remove_runtarget(self, target, missing_list=None):
+ """
+ Mark a run target as failed (unbuildable)
+ Trigger removal of any files that have this as a dependency
+ """
+ if not missing_list:
+ missing_list = [target]
+ else:
+ missing_list = [target] + missing_list
+
+ logger.info("Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", target, missing_list)
+ self.failed_rdeps.append(target)
+ dependees = self.get_rdependees(target)
+ for fn in dependees:
+ self.fail_fn(fn, missing_list)
+ for tid in self.taskentries:
+ for (idepend, idependtask) in self.taskentries[tid].irdepends:
+ if idepend == target:
+ fn = tid.rsplit(":",1)[0]
+ self.fail_fn(fn, missing_list)
+
+ def add_unresolved(self, cfgData, dataCache):
+ """
+ Resolve all unresolved build and runtime targets
+ """
+ logger.info("Resolving any missing task queue dependencies")
+ while True:
+ added = 0
+ for target in self.get_unresolved_build_targets(dataCache):
+ try:
+ self.add_provider_internal(cfgData, dataCache, target)
+ added = added + 1
+ except bb.providers.NoProvider:
+ if self.abort and target in self.external_targets and not self.allowincomplete:
+ raise
+ if not self.allowincomplete:
+ self.remove_buildtarget(target)
+ for target in self.get_unresolved_run_targets(dataCache):
+ try:
+ self.add_rprovider(cfgData, dataCache, target)
+ added = added + 1
+ except (bb.providers.NoRProvider, bb.providers.MultipleRProvider):
+ self.remove_runtarget(target)
+ logger.debug(1, "Resolved " + str(added) + " extra dependencies")
+ if added == 0:
+ break
+ # self.dump_data()
+
+ def get_providermap(self, prefix=None):
+ provmap = {}
+ for name in self.build_targets:
+ if prefix and not name.startswith(prefix):
+ continue
+ if self.have_build_target(name):
+ provider = self.get_provider(name)
+ if provider:
+ provmap[name] = provider[0]
+ return provmap
+
+ def dump_data(self):
+ """
+ Dump some debug information on the internal data structures
+ """
+ logger.debug(3, "build_names:")
+ logger.debug(3, ", ".join(self.build_targets))
+
+ logger.debug(3, "run_names:")
+ logger.debug(3, ", ".join(self.run_targets))
+
+ logger.debug(3, "build_targets:")
+ for target in self.build_targets:
+ targets = "None"
+ if target in self.build_targets:
+ targets = self.build_targets[target]
+ logger.debug(3, " %s: %s", target, targets)
+
+ logger.debug(3, "run_targets:")
+ for target in self.run_targets:
+ targets = "None"
+ if target in self.run_targets:
+ targets = self.run_targets[target]
+ logger.debug(3, " %s: %s", target, targets)
+
+ logger.debug(3, "tasks:")
+ for tid in self.taskentries:
+ logger.debug(3, " %s: %s %s %s",
+ tid,
+ self.taskentries[tid].idepends,
+ self.taskentries[tid].irdepends,
+ self.taskentries[tid].tdepends)
+
+ logger.debug(3, "dependency ids (per fn):")
+ for fn in self.depids:
+ logger.debug(3, " %s: %s", fn, self.depids[fn])
+
+ logger.debug(3, "runtime dependency ids (per fn):")
+ for fn in self.rdepids:
+ logger.debug(3, " %s: %s", fn, self.rdepids[fn])
diff --git a/poky/bitbake/lib/bb/tests/__init__.py b/poky/bitbake/lib/bb/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/__init__.py
diff --git a/poky/bitbake/lib/bb/tests/codeparser.py b/poky/bitbake/lib/bb/tests/codeparser.py
new file mode 100644
index 000000000..e30e78c15
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/codeparser.py
@@ -0,0 +1,428 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Test for codeparser.py
+#
+# Copyright (C) 2010 Chris Larson
+# Copyright (C) 2012 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import logging
+import bb
+
+logger = logging.getLogger('BitBake.TestCodeParser')
+
+# bb.data references bb.parse but can't directly import due to circular dependencies.
+# Hack around it for now :(
+import bb.parse
+import bb.data
+
+class ReferenceTest(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+
+ def setEmptyVars(self, varlist):
+ for k in varlist:
+ self.d.setVar(k, "")
+
+ def setValues(self, values):
+ for k, v in values.items():
+ self.d.setVar(k, v)
+
+ def assertReferences(self, refs):
+ self.assertEqual(self.references, refs)
+
+ def assertExecs(self, execs):
+ self.assertEqual(self.execs, execs)
+
+ def assertContains(self, contains):
+ self.assertEqual(self.contains, contains)
+
+class VariableReferenceTest(ReferenceTest):
+
+ def parseExpression(self, exp):
+ parsedvar = self.d.expandWithRefs(exp, None)
+ self.references = parsedvar.references
+
+ def test_simple_reference(self):
+ self.setEmptyVars(["FOO"])
+ self.parseExpression("${FOO}")
+ self.assertReferences(set(["FOO"]))
+
+ def test_nested_reference(self):
+ self.setEmptyVars(["BAR"])
+ self.d.setVar("FOO", "BAR")
+ self.parseExpression("${${FOO}}")
+ self.assertReferences(set(["FOO", "BAR"]))
+
+ def test_python_reference(self):
+ self.setEmptyVars(["BAR"])
+ self.parseExpression("${@d.getVar('BAR') + 'foo'}")
+ self.assertReferences(set(["BAR"]))
+
+class ShellReferenceTest(ReferenceTest):
+
+ def parseExpression(self, exp):
+ parsedvar = self.d.expandWithRefs(exp, None)
+ parser = bb.codeparser.ShellParser("ParserTest", logger)
+ parser.parse_shell(parsedvar.value)
+
+ self.references = parsedvar.references
+ self.execs = parser.execs
+
+ def test_quotes_inside_assign(self):
+ self.parseExpression('foo=foo"bar"baz')
+ self.assertReferences(set([]))
+
+ def test_quotes_inside_arg(self):
+ self.parseExpression('sed s#"bar baz"#"alpha beta"#g')
+ self.assertExecs(set(["sed"]))
+
+ def test_arg_continuation(self):
+ self.parseExpression("sed -i -e s,foo,bar,g \\\n *.pc")
+ self.assertExecs(set(["sed"]))
+
+ def test_dollar_in_quoted(self):
+ self.parseExpression('sed -i -e "foo$" *.pc')
+ self.assertExecs(set(["sed"]))
+
+ def test_quotes_inside_arg_continuation(self):
+ self.setEmptyVars(["bindir", "D", "libdir"])
+ self.parseExpression("""
+sed -i -e s#"moc_location=.*$"#"moc_location=${bindir}/moc4"# \\
+-e s#"uic_location=.*$"#"uic_location=${bindir}/uic4"# \\
+${D}${libdir}/pkgconfig/*.pc
+""")
+ self.assertReferences(set(["bindir", "D", "libdir"]))
+
+ def test_assign_subshell_expansion(self):
+ self.parseExpression("foo=$(echo bar)")
+ self.assertExecs(set(["echo"]))
+
+ def test_shell_unexpanded(self):
+ self.setEmptyVars(["QT_BASE_NAME"])
+ self.parseExpression('echo "${QT_BASE_NAME}"')
+ self.assertExecs(set(["echo"]))
+ self.assertReferences(set(["QT_BASE_NAME"]))
+
+ def test_incomplete_varexp_single_quotes(self):
+ self.parseExpression("sed -i -e 's:IP{:I${:g' $pc")
+ self.assertExecs(set(["sed"]))
+
+
+ def test_until(self):
+ self.parseExpression("until false; do echo true; done")
+ self.assertExecs(set(["false", "echo"]))
+ self.assertReferences(set())
+
+ def test_case(self):
+ self.parseExpression("""
+case $foo in
+*)
+bar
+;;
+esac
+""")
+ self.assertExecs(set(["bar"]))
+ self.assertReferences(set())
+
+ def test_assign_exec(self):
+ self.parseExpression("a=b c='foo bar' alpha 1 2 3")
+ self.assertExecs(set(["alpha"]))
+
+ def test_redirect_to_file(self):
+ self.setEmptyVars(["foo"])
+ self.parseExpression("echo foo >${foo}/bar")
+ self.assertExecs(set(["echo"]))
+ self.assertReferences(set(["foo"]))
+
+ def test_heredoc(self):
+ self.setEmptyVars(["theta"])
+ self.parseExpression("""
+cat <<END
+alpha
+beta
+${theta}
+END
+""")
+ self.assertReferences(set(["theta"]))
+
+ def test_redirect_from_heredoc(self):
+ v = ["B", "SHADOW_MAILDIR", "SHADOW_MAILFILE", "SHADOW_UTMPDIR", "SHADOW_LOGDIR", "bindir"]
+ self.setEmptyVars(v)
+ self.parseExpression("""
+cat <<END >${B}/cachedpaths
+shadow_cv_maildir=${SHADOW_MAILDIR}
+shadow_cv_mailfile=${SHADOW_MAILFILE}
+shadow_cv_utmpdir=${SHADOW_UTMPDIR}
+shadow_cv_logdir=${SHADOW_LOGDIR}
+shadow_cv_passwd_dir=${bindir}
+END
+""")
+ self.assertReferences(set(v))
+ self.assertExecs(set(["cat"]))
+
+# def test_incomplete_command_expansion(self):
+# self.assertRaises(reftracker.ShellSyntaxError, reftracker.execs,
+# bbvalue.shparse("cp foo`", self.d), self.d)
+
+# def test_rogue_dollarsign(self):
+# self.setValues({"D" : "/tmp"})
+# self.parseExpression("install -d ${D}$")
+# self.assertReferences(set(["D"]))
+# self.assertExecs(set(["install"]))
+
+
+class PythonReferenceTest(ReferenceTest):
+
+ def setUp(self):
+ self.d = bb.data.init()
+ if hasattr(bb.utils, "_context"):
+ self.context = bb.utils._context
+ else:
+ import builtins
+ self.context = builtins.__dict__
+
+ def parseExpression(self, exp):
+ parsedvar = self.d.expandWithRefs(exp, None)
+ parser = bb.codeparser.PythonParser("ParserTest", logger)
+ parser.parse_python(parsedvar.value)
+
+ self.references = parsedvar.references | parser.references
+ self.execs = parser.execs
+ self.contains = parser.contains
+
+ @staticmethod
+ def indent(value):
+ """Python Snippets have to be indented, python values don't have to
+be. These unit tests are testing snippets."""
+ return " " + value
+
+ def test_getvar_reference(self):
+ self.parseExpression("d.getVar('foo')")
+ self.assertReferences(set(["foo"]))
+ self.assertExecs(set())
+
+ def test_getvar_computed_reference(self):
+ self.parseExpression("d.getVar('f' + 'o' + 'o')")
+ self.assertReferences(set())
+ self.assertExecs(set())
+
+ def test_getvar_exec_reference(self):
+ self.parseExpression("eval('d.getVar(\"foo\")')")
+ self.assertReferences(set())
+ self.assertExecs(set(["eval"]))
+
+ def test_var_reference(self):
+ self.context["foo"] = lambda x: x
+ self.setEmptyVars(["FOO"])
+ self.parseExpression("foo('${FOO}')")
+ self.assertReferences(set(["FOO"]))
+ self.assertExecs(set(["foo"]))
+ del self.context["foo"]
+
+ def test_var_exec(self):
+ for etype in ("func", "task"):
+ self.d.setVar("do_something", "echo 'hi mom! ${FOO}'")
+ self.d.setVarFlag("do_something", etype, True)
+ self.parseExpression("bb.build.exec_func('do_something', d)")
+ self.assertReferences(set([]))
+ self.assertExecs(set(["do_something"]))
+
+ def test_function_reference(self):
+ self.context["testfunc"] = lambda msg: bb.msg.note(1, None, msg)
+ self.d.setVar("FOO", "Hello, World!")
+ self.parseExpression("testfunc('${FOO}')")
+ self.assertReferences(set(["FOO"]))
+ self.assertExecs(set(["testfunc"]))
+ del self.context["testfunc"]
+
+ def test_qualified_function_reference(self):
+ self.parseExpression("time.time()")
+ self.assertExecs(set(["time.time"]))
+
+ def test_qualified_function_reference_2(self):
+ self.parseExpression("os.path.dirname('/foo/bar')")
+ self.assertExecs(set(["os.path.dirname"]))
+
+ def test_qualified_function_reference_nested(self):
+ self.parseExpression("time.strftime('%Y%m%d',time.gmtime())")
+ self.assertExecs(set(["time.strftime", "time.gmtime"]))
+
+ def test_function_reference_chained(self):
+ self.context["testget"] = lambda: "\tstrip me "
+ self.parseExpression("testget().strip()")
+ self.assertExecs(set(["testget"]))
+ del self.context["testget"]
+
+ def test_contains(self):
+ self.parseExpression('bb.utils.contains("TESTVAR", "one", "true", "false", d)')
+ self.assertContains({'TESTVAR': {'one'}})
+
+ def test_contains_multi(self):
+ self.parseExpression('bb.utils.contains("TESTVAR", "one two", "true", "false", d)')
+ self.assertContains({'TESTVAR': {'one two'}})
+
+ def test_contains_any(self):
+ self.parseExpression('bb.utils.contains_any("TESTVAR", "hello", "true", "false", d)')
+ self.assertContains({'TESTVAR': {'hello'}})
+
+ def test_contains_any_multi(self):
+ self.parseExpression('bb.utils.contains_any("TESTVAR", "one two three", "true", "false", d)')
+ self.assertContains({'TESTVAR': {'one', 'two', 'three'}})
+
+ def test_contains_filter(self):
+ self.parseExpression('bb.utils.filter("TESTVAR", "hello there world", d)')
+ self.assertContains({'TESTVAR': {'hello', 'there', 'world'}})
+
+
+class DependencyReferenceTest(ReferenceTest):
+
+ pydata = """
+d.getVar('somevar')
+def test(d):
+ foo = 'bar %s' % 'foo'
+def test2(d):
+ d.getVar(foo)
+ d.getVar('bar', False)
+ test2(d)
+
+def a():
+ \"\"\"some
+ stuff
+ \"\"\"
+ return "heh"
+
+test(d)
+
+d.expand(d.getVar("something", False))
+d.expand("${inexpand} somethingelse")
+d.getVar(a(), False)
+"""
+
+ def test_python(self):
+ self.d.setVar("FOO", self.pydata)
+ self.setEmptyVars(["inexpand", "a", "test2", "test"])
+ self.d.setVarFlags("FOO", {
+ "func": True,
+ "python": True,
+ "lineno": 1,
+ "filename": "example.bb",
+ })
+
+ deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
+
+ self.assertEqual(deps, set(["somevar", "bar", "something", "inexpand", "test", "test2", "a"]))
+
+
+ shelldata = """
+foo () {
+bar
+}
+{
+echo baz
+$(heh)
+eval `moo`
+}
+a=b
+c=d
+(
+true && false
+test -f foo
+testval=something
+$testval
+) || aiee
+! inverted
+echo ${somevar}
+
+case foo in
+bar)
+echo bar
+;;
+baz)
+echo baz
+;;
+foo*)
+echo foo
+;;
+esac
+"""
+
+ def test_shell(self):
+ execs = ["bar", "echo", "heh", "moo", "true", "aiee"]
+ self.d.setVar("somevar", "heh")
+ self.d.setVar("inverted", "echo inverted...")
+ self.d.setVarFlag("inverted", "func", True)
+ self.d.setVar("FOO", self.shelldata)
+ self.d.setVarFlags("FOO", {"func": True})
+ self.setEmptyVars(execs)
+
+ deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
+
+ self.assertEqual(deps, set(["somevar", "inverted"] + execs))
+
+
+ def test_vardeps(self):
+ self.d.setVar("oe_libinstall", "echo test")
+ self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
+ self.d.setVarFlag("FOO", "vardeps", "oe_libinstall")
+
+ deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
+
+ self.assertEqual(deps, set(["oe_libinstall"]))
+
+ def test_vardeps_expand(self):
+ self.d.setVar("oe_libinstall", "echo test")
+ self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
+ self.d.setVarFlag("FOO", "vardeps", "${@'oe_libinstall'}")
+
+ deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
+
+ self.assertEqual(deps, set(["oe_libinstall"]))
+
+ def test_contains_vardeps(self):
+ expr = '${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)} \
+ ${@bb.utils.contains("TESTVAR", "testval testval2", "yetanothervalue", "", d)} \
+ ${@bb.utils.contains("TESTVAR", "testval2 testval3", "blah", "", d)} \
+ ${@bb.utils.contains_any("TESTVAR", "testval2 testval3", "lastone", "", d)}'
+ parsedvar = self.d.expandWithRefs(expr, None)
+ # Check contains
+ self.assertEqual(parsedvar.contains, {'TESTVAR': {'testval2 testval3', 'anothervalue', 'somevalue', 'testval testval2', 'testval2', 'testval3'}})
+ # Check dependencies
+ self.d.setVar('ANOTHERVAR', expr)
+ self.d.setVar('TESTVAR', 'anothervalue testval testval2')
+ deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), self.d)
+ self.assertEqual(sorted(values.splitlines()),
+ sorted([expr,
+ 'TESTVAR{anothervalue} = Set',
+ 'TESTVAR{somevalue} = Unset',
+ 'TESTVAR{testval testval2} = Set',
+ 'TESTVAR{testval2 testval3} = Unset',
+ 'TESTVAR{testval2} = Set',
+ 'TESTVAR{testval3} = Unset'
+ ]))
+ # Check final value
+ self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone'])
+
+ #Currently no wildcard support
+ #def test_vardeps_wildcards(self):
+ # self.d.setVar("oe_libinstall", "echo test")
+ # self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
+ # self.d.setVarFlag("FOO", "vardeps", "oe_*")
+ # self.assertEquals(deps, set(["oe_libinstall"]))
+
+
diff --git a/poky/bitbake/lib/bb/tests/cow.py b/poky/bitbake/lib/bb/tests/cow.py
new file mode 100644
index 000000000..d149d84d0
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/cow.py
@@ -0,0 +1,136 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Tests for Copy-on-Write (cow.py)
+#
+# Copyright 2006 Holger Freyther <freyther@handhelds.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import os
+
+class COWTestCase(unittest.TestCase):
+ """
+ Test case for the COW module from mithro
+ """
+
+ def testGetSet(self):
+ """
+ Test and set
+ """
+ from bb.COW import COWDictBase
+ a = COWDictBase.copy()
+
+ self.assertEqual(False, 'a' in a)
+
+ a['a'] = 'a'
+ a['b'] = 'b'
+ self.assertEqual(True, 'a' in a)
+ self.assertEqual(True, 'b' in a)
+ self.assertEqual('a', a['a'] )
+ self.assertEqual('b', a['b'] )
+
+ def testCopyCopy(self):
+ """
+ Test the copy of copies
+ """
+
+ from bb.COW import COWDictBase
+
+ # create two COW dict 'instances'
+ b = COWDictBase.copy()
+ c = COWDictBase.copy()
+
+ # assign some keys to one instance, some keys to another
+ b['a'] = 10
+ b['c'] = 20
+ c['a'] = 30
+
+ # test separation of the two instances
+ self.assertEqual(False, 'c' in c)
+ self.assertEqual(30, c['a'])
+ self.assertEqual(10, b['a'])
+
+ # test copy
+ b_2 = b.copy()
+ c_2 = c.copy()
+
+ self.assertEqual(False, 'c' in c_2)
+ self.assertEqual(10, b_2['a'])
+
+ b_2['d'] = 40
+ self.assertEqual(False, 'd' in c_2)
+ self.assertEqual(True, 'd' in b_2)
+ self.assertEqual(40, b_2['d'])
+ self.assertEqual(False, 'd' in b)
+ self.assertEqual(False, 'd' in c)
+
+ c_2['d'] = 30
+ self.assertEqual(True, 'd' in c_2)
+ self.assertEqual(True, 'd' in b_2)
+ self.assertEqual(30, c_2['d'])
+ self.assertEqual(40, b_2['d'])
+ self.assertEqual(False, 'd' in b)
+ self.assertEqual(False, 'd' in c)
+
+ # test copy of the copy
+ c_3 = c_2.copy()
+ b_3 = b_2.copy()
+ b_3_2 = b_2.copy()
+
+ c_3['e'] = 4711
+ self.assertEqual(4711, c_3['e'])
+ self.assertEqual(False, 'e' in c_2)
+ self.assertEqual(False, 'e' in b_3)
+ self.assertEqual(False, 'e' in b_3_2)
+ self.assertEqual(False, 'e' in b_2)
+
+ b_3['e'] = 'viel'
+ self.assertEqual('viel', b_3['e'])
+ self.assertEqual(4711, c_3['e'])
+ self.assertEqual(False, 'e' in c_2)
+ self.assertEqual(True, 'e' in b_3)
+ self.assertEqual(False, 'e' in b_3_2)
+ self.assertEqual(False, 'e' in b_2)
+
+ def testCow(self):
+ from bb.COW import COWDictBase
+ c = COWDictBase.copy()
+ c['123'] = 1027
+ c['other'] = 4711
+ c['d'] = { 'abc' : 10, 'bcd' : 20 }
+
+ copy = c.copy()
+
+ self.assertEqual(1027, c['123'])
+ self.assertEqual(4711, c['other'])
+ self.assertEqual({'abc':10, 'bcd':20}, c['d'])
+ self.assertEqual(1027, copy['123'])
+ self.assertEqual(4711, copy['other'])
+ self.assertEqual({'abc':10, 'bcd':20}, copy['d'])
+
+ # cow it now
+ copy['123'] = 1028
+ copy['other'] = 4712
+ copy['d']['abc'] = 20
+
+
+ self.assertEqual(1027, c['123'])
+ self.assertEqual(4711, c['other'])
+ self.assertEqual({'abc':10, 'bcd':20}, c['d'])
+ self.assertEqual(1028, copy['123'])
+ self.assertEqual(4712, copy['other'])
+ self.assertEqual({'abc':20, 'bcd':20}, copy['d'])
diff --git a/poky/bitbake/lib/bb/tests/data.py b/poky/bitbake/lib/bb/tests/data.py
new file mode 100644
index 000000000..a4a9dd30f
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/data.py
@@ -0,0 +1,607 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Tests for the Data Store (data.py/data_smart.py)
+#
+# Copyright (C) 2010 Chris Larson
+# Copyright (C) 2012 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import bb
+import bb.data
+import bb.parse
+import logging
+
+class LogRecord():
+ def __enter__(self):
+ logs = []
+ class LogHandler(logging.Handler):
+ def emit(self, record):
+ logs.append(record)
+ logger = logging.getLogger("BitBake")
+ handler = LogHandler()
+ self.handler = handler
+ logger.addHandler(handler)
+ return logs
+ def __exit__(self, type, value, traceback):
+ logger = logging.getLogger("BitBake")
+ logger.removeHandler(self.handler)
+ return
+
+def logContains(item, logs):
+ for l in logs:
+ m = l.getMessage()
+ if item in m:
+ return True
+ return False
+
+class DataExpansions(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d["foo"] = "value_of_foo"
+ self.d["bar"] = "value_of_bar"
+ self.d["value_of_foo"] = "value_of_'value_of_foo'"
+
+ def test_one_var(self):
+ val = self.d.expand("${foo}")
+ self.assertEqual(str(val), "value_of_foo")
+
+ def test_indirect_one_var(self):
+ val = self.d.expand("${${foo}}")
+ self.assertEqual(str(val), "value_of_'value_of_foo'")
+
+ def test_indirect_and_another(self):
+ val = self.d.expand("${${foo}} ${bar}")
+ self.assertEqual(str(val), "value_of_'value_of_foo' value_of_bar")
+
+ def test_python_snippet(self):
+ val = self.d.expand("${@5*12}")
+ self.assertEqual(str(val), "60")
+
+ def test_expand_in_python_snippet(self):
+ val = self.d.expand("${@'boo ' + '${foo}'}")
+ self.assertEqual(str(val), "boo value_of_foo")
+
+ def test_python_snippet_getvar(self):
+ val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
+ self.assertEqual(str(val), "value_of_foo value_of_bar")
+
+ def test_python_unexpanded(self):
+ self.d.setVar("bar", "${unsetvar}")
+ val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
+ self.assertEqual(str(val), "${@d.getVar('foo') + ' ${unsetvar}'}")
+
+ def test_python_snippet_syntax_error(self):
+ self.d.setVar("FOO", "${@foo = 5}")
+ self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
+
+ def test_python_snippet_runtime_error(self):
+ self.d.setVar("FOO", "${@int('test')}")
+ self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
+
+ def test_python_snippet_error_path(self):
+ self.d.setVar("FOO", "foo value ${BAR}")
+ self.d.setVar("BAR", "bar value ${@int('test')}")
+ self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
+
+ def test_value_containing_value(self):
+ val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
+ self.assertEqual(str(val), "value_of_foo value_of_bar")
+
+ def test_reference_undefined_var(self):
+ val = self.d.expand("${undefinedvar} meh")
+ self.assertEqual(str(val), "${undefinedvar} meh")
+
+ def test_double_reference(self):
+ self.d.setVar("BAR", "bar value")
+ self.d.setVar("FOO", "${BAR} foo ${BAR}")
+ val = self.d.getVar("FOO")
+ self.assertEqual(str(val), "bar value foo bar value")
+
+ def test_direct_recursion(self):
+ self.d.setVar("FOO", "${FOO}")
+ self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
+
+ def test_indirect_recursion(self):
+ self.d.setVar("FOO", "${BAR}")
+ self.d.setVar("BAR", "${BAZ}")
+ self.d.setVar("BAZ", "${FOO}")
+ self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
+
+ def test_recursion_exception(self):
+ self.d.setVar("FOO", "${BAR}")
+ self.d.setVar("BAR", "${${@'FOO'}}")
+ self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
+
+ def test_incomplete_varexp_single_quotes(self):
+ self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc")
+ val = self.d.getVar("FOO")
+ self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc")
+
+ def test_nonstring(self):
+ self.d.setVar("TEST", 5)
+ val = self.d.getVar("TEST")
+ self.assertEqual(str(val), "5")
+
+ def test_rename(self):
+ self.d.renameVar("foo", "newfoo")
+ self.assertEqual(self.d.getVar("newfoo", False), "value_of_foo")
+ self.assertEqual(self.d.getVar("foo", False), None)
+
+ def test_deletion(self):
+ self.d.delVar("foo")
+ self.assertEqual(self.d.getVar("foo", False), None)
+
+ def test_keys(self):
+ keys = list(self.d.keys())
+ self.assertCountEqual(keys, ['value_of_foo', 'foo', 'bar'])
+
+ def test_keys_deletion(self):
+ newd = bb.data.createCopy(self.d)
+ newd.delVar("bar")
+ keys = list(newd.keys())
+ self.assertCountEqual(keys, ['value_of_foo', 'foo'])
+
+class TestNestedExpansions(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d["foo"] = "foo"
+ self.d["bar"] = "bar"
+ self.d["value_of_foobar"] = "187"
+
+ def test_refs(self):
+ val = self.d.expand("${value_of_${foo}${bar}}")
+ self.assertEqual(str(val), "187")
+
+ #def test_python_refs(self):
+ # val = self.d.expand("${@${@3}**2 + ${@4}**2}")
+ # self.assertEqual(str(val), "25")
+
+ def test_ref_in_python_ref(self):
+ val = self.d.expand("${@'${foo}' + 'bar'}")
+ self.assertEqual(str(val), "foobar")
+
+ def test_python_ref_in_ref(self):
+ val = self.d.expand("${${@'f'+'o'+'o'}}")
+ self.assertEqual(str(val), "foo")
+
+ def test_deep_nesting(self):
+ depth = 100
+ val = self.d.expand("${" * depth + "foo" + "}" * depth)
+ self.assertEqual(str(val), "foo")
+
+ #def test_deep_python_nesting(self):
+ # depth = 50
+ # val = self.d.expand("${@" * depth + "1" + "+1}" * depth)
+ # self.assertEqual(str(val), str(depth + 1))
+
+ def test_mixed(self):
+ val = self.d.expand("${value_of_${@('${foo}'+'bar')[0:3]}${${@'BAR'.lower()}}}")
+ self.assertEqual(str(val), "187")
+
+ def test_runtime(self):
+ val = self.d.expand("${${@'value_of' + '_f'+'o'+'o'+'b'+'a'+'r'}}")
+ self.assertEqual(str(val), "187")
+
+class TestMemoize(unittest.TestCase):
+ def test_memoized(self):
+ d = bb.data.init()
+ d.setVar("FOO", "bar")
+ self.assertTrue(d.getVar("FOO", False) is d.getVar("FOO", False))
+
+ def test_not_memoized(self):
+ d1 = bb.data.init()
+ d2 = bb.data.init()
+ d1.setVar("FOO", "bar")
+ d2.setVar("FOO", "bar2")
+ self.assertTrue(d1.getVar("FOO", False) is not d2.getVar("FOO", False))
+
+ def test_changed_after_memoized(self):
+ d = bb.data.init()
+ d.setVar("foo", "value of foo")
+ self.assertEqual(str(d.getVar("foo", False)), "value of foo")
+ d.setVar("foo", "second value of foo")
+ self.assertEqual(str(d.getVar("foo", False)), "second value of foo")
+
+ def test_same_value(self):
+ d = bb.data.init()
+ d.setVar("foo", "value of")
+ d.setVar("bar", "value of")
+ self.assertEqual(d.getVar("foo", False),
+ d.getVar("bar", False))
+
+class TestConcat(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d.setVar("FOO", "foo")
+ self.d.setVar("VAL", "val")
+ self.d.setVar("BAR", "bar")
+
+ def test_prepend(self):
+ self.d.setVar("TEST", "${VAL}")
+ self.d.prependVar("TEST", "${FOO}:")
+ self.assertEqual(self.d.getVar("TEST"), "foo:val")
+
+ def test_append(self):
+ self.d.setVar("TEST", "${VAL}")
+ self.d.appendVar("TEST", ":${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "val:bar")
+
+ def test_multiple_append(self):
+ self.d.setVar("TEST", "${VAL}")
+ self.d.prependVar("TEST", "${FOO}:")
+ self.d.appendVar("TEST", ":val2")
+ self.d.appendVar("TEST", ":${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
+
+class TestConcatOverride(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d.setVar("FOO", "foo")
+ self.d.setVar("VAL", "val")
+ self.d.setVar("BAR", "bar")
+
+ def test_prepend(self):
+ self.d.setVar("TEST", "${VAL}")
+ self.d.setVar("TEST_prepend", "${FOO}:")
+ self.assertEqual(self.d.getVar("TEST"), "foo:val")
+
+ def test_append(self):
+ self.d.setVar("TEST", "${VAL}")
+ self.d.setVar("TEST_append", ":${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "val:bar")
+
+ def test_multiple_append(self):
+ self.d.setVar("TEST", "${VAL}")
+ self.d.setVar("TEST_prepend", "${FOO}:")
+ self.d.setVar("TEST_append", ":val2")
+ self.d.setVar("TEST_append", ":${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
+
+ def test_append_unset(self):
+ self.d.setVar("TEST_prepend", "${FOO}:")
+ self.d.setVar("TEST_append", ":val2")
+ self.d.setVar("TEST_append", ":${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar")
+
+ def test_remove(self):
+ self.d.setVar("TEST", "${VAL} ${BAR}")
+ self.d.setVar("TEST_remove", "val")
+ self.assertEqual(self.d.getVar("TEST"), "bar")
+
+ def test_remove_cleared(self):
+ self.d.setVar("TEST", "${VAL} ${BAR}")
+ self.d.setVar("TEST_remove", "val")
+ self.d.setVar("TEST", "${VAL} ${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "val bar")
+
+ # Ensure the value is unchanged if we have an inactive remove override
+ # (including that whitespace is preserved)
+ def test_remove_inactive_override(self):
+ self.d.setVar("TEST", "${VAL} ${BAR} 123")
+ self.d.setVar("TEST_remove_inactiveoverride", "val")
+ self.assertEqual(self.d.getVar("TEST"), "val bar 123")
+
+ def test_doubleref_remove(self):
+ self.d.setVar("TEST", "${VAL} ${BAR}")
+ self.d.setVar("TEST_remove", "val")
+ self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
+ self.assertEqual(self.d.getVar("TEST_TEST"), "bar bar")
+
+ def test_empty_remove(self):
+ self.d.setVar("TEST", "")
+ self.d.setVar("TEST_remove", "val")
+ self.assertEqual(self.d.getVar("TEST"), "")
+
+ def test_remove_expansion(self):
+ self.d.setVar("BAR", "Z")
+ self.d.setVar("TEST", "${BAR}/X Y")
+ self.d.setVar("TEST_remove", "${BAR}/X")
+ self.assertEqual(self.d.getVar("TEST"), "Y")
+
+ def test_remove_expansion_items(self):
+ self.d.setVar("TEST", "A B C D")
+ self.d.setVar("BAR", "B D")
+ self.d.setVar("TEST_remove", "${BAR}")
+ self.assertEqual(self.d.getVar("TEST"), "A C")
+
+class TestOverrides(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d.setVar("OVERRIDES", "foo:bar:local")
+ self.d.setVar("TEST", "testvalue")
+
+ def test_no_override(self):
+ self.assertEqual(self.d.getVar("TEST"), "testvalue")
+
+ def test_one_override(self):
+ self.d.setVar("TEST_bar", "testvalue2")
+ self.assertEqual(self.d.getVar("TEST"), "testvalue2")
+
+ def test_one_override_unset(self):
+ self.d.setVar("TEST2_bar", "testvalue2")
+
+ self.assertEqual(self.d.getVar("TEST2"), "testvalue2")
+ self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2_bar'])
+
+ def test_multiple_override(self):
+ self.d.setVar("TEST_bar", "testvalue2")
+ self.d.setVar("TEST_local", "testvalue3")
+ self.d.setVar("TEST_foo", "testvalue4")
+ self.assertEqual(self.d.getVar("TEST"), "testvalue3")
+ self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST_foo', 'OVERRIDES', 'TEST_bar', 'TEST_local'])
+
+ def test_multiple_combined_overrides(self):
+ self.d.setVar("TEST_local_foo_bar", "testvalue3")
+ self.assertEqual(self.d.getVar("TEST"), "testvalue3")
+
+ def test_multiple_overrides_unset(self):
+ self.d.setVar("TEST2_local_foo_bar", "testvalue3")
+ self.assertEqual(self.d.getVar("TEST2"), "testvalue3")
+
+ def test_keyexpansion_override(self):
+ self.d.setVar("LOCAL", "local")
+ self.d.setVar("TEST_bar", "testvalue2")
+ self.d.setVar("TEST_${LOCAL}", "testvalue3")
+ self.d.setVar("TEST_foo", "testvalue4")
+ bb.data.expandKeys(self.d)
+ self.assertEqual(self.d.getVar("TEST"), "testvalue3")
+
+ def test_rename_override(self):
+ self.d.setVar("ALTERNATIVE_ncurses-tools_class-target", "a")
+ self.d.setVar("OVERRIDES", "class-target")
+ self.d.renameVar("ALTERNATIVE_ncurses-tools", "ALTERNATIVE_lib32-ncurses-tools")
+ self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools"), "a")
+
+ def test_underscore_override(self):
+ self.d.setVar("TEST_bar", "testvalue2")
+ self.d.setVar("TEST_some_val", "testvalue3")
+ self.d.setVar("TEST_foo", "testvalue4")
+ self.d.setVar("OVERRIDES", "foo:bar:some_val")
+ self.assertEqual(self.d.getVar("TEST"), "testvalue3")
+
+class TestKeyExpansion(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d.setVar("FOO", "foo")
+ self.d.setVar("BAR", "foo")
+
+ def test_keyexpand(self):
+ self.d.setVar("VAL_${FOO}", "A")
+ self.d.setVar("VAL_${BAR}", "B")
+ with LogRecord() as logs:
+ bb.data.expandKeys(self.d)
+ self.assertTrue(logContains("Variable key VAL_${FOO} (A) replaces original key VAL_foo (B)", logs))
+ self.assertEqual(self.d.getVar("VAL_foo"), "A")
+
+class TestFlags(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d.setVar("foo", "value of foo")
+ self.d.setVarFlag("foo", "flag1", "value of flag1")
+ self.d.setVarFlag("foo", "flag2", "value of flag2")
+
+ def test_setflag(self):
+ self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
+ self.assertEqual(self.d.getVarFlag("foo", "flag2", False), "value of flag2")
+
+ def test_delflag(self):
+ self.d.delVarFlag("foo", "flag2")
+ self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
+ self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None)
+
+
+class Contains(unittest.TestCase):
+ def setUp(self):
+ self.d = bb.data.init()
+ self.d.setVar("SOMEFLAG", "a b c")
+
+ def test_contains(self):
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "a", True, False, self.d))
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "b", True, False, self.d))
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "c", True, False, self.d))
+
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "a b", True, False, self.d))
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "b c", True, False, self.d))
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "c a", True, False, self.d))
+
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "a b c", True, False, self.d))
+ self.assertTrue(bb.utils.contains("SOMEFLAG", "c b a", True, False, self.d))
+
+ self.assertFalse(bb.utils.contains("SOMEFLAG", "x", True, False, self.d))
+ self.assertFalse(bb.utils.contains("SOMEFLAG", "a x", True, False, self.d))
+ self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b", True, False, self.d))
+ self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b a", True, False, self.d))
+
+ def test_contains_any(self):
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a", True, False, self.d))
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b", True, False, self.d))
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c", True, False, self.d))
+
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a b", True, False, self.d))
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b c", True, False, self.d))
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c a", True, False, self.d))
+
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a x", True, False, self.d))
+ self.assertTrue(bb.utils.contains_any("SOMEFLAG", "x c", True, False, self.d))
+
+ self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d))
+ self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
+
+
+class Serialize(unittest.TestCase):
+
+ def test_serialize(self):
+ import tempfile
+ import pickle
+ d = bb.data.init()
+ d.enableTracking()
+ d.setVar('HELLO', 'world')
+ d.setVarFlag('HELLO', 'other', 'planet')
+ with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
+ tmpfilename = tmpfile.name
+ pickle.dump(d, tmpfile)
+
+ with open(tmpfilename, 'rb') as f:
+ newd = pickle.load(f)
+
+ os.remove(tmpfilename)
+
+ self.assertEqual(d, newd)
+ self.assertEqual(newd.getVar('HELLO'), 'world')
+ self.assertEqual(newd.getVarFlag('HELLO', 'other'), 'planet')
+
+
+# Remote datastore tests
+# These really only test the interface, since in actual usage we have a
+# tinfoil connector that does everything over RPC, and this doesn't test
+# that.
+
+class TestConnector:
+ d = None
+ def __init__(self, d):
+ self.d = d
+ def getVar(self, name):
+ return self.d._findVar(name)
+ def getKeys(self):
+ return set(self.d.keys())
+ def getVarHistory(self, name):
+ return self.d.varhistory.variable(name)
+ def expandPythonRef(self, varname, expr, d):
+ localdata = self.d.createCopy()
+ for key in d.localkeys():
+ localdata.setVar(d.getVar(key))
+ varparse = bb.data_smart.VariableParse(varname, localdata)
+ return varparse.python_sub(expr)
+ def setVar(self, name, value):
+ self.d.setVar(name, value)
+ def setVarFlag(self, name, flag, value):
+ self.d.setVarFlag(name, flag, value)
+ def delVar(self, name):
+ self.d.delVar(name)
+ return False
+ def delVarFlag(self, name, flag):
+ self.d.delVarFlag(name, flag)
+ return False
+ def renameVar(self, name, newname):
+ self.d.renameVar(name, newname)
+ return False
+
+class Remote(unittest.TestCase):
+ def test_remote(self):
+
+ d1 = bb.data.init()
+ d1.enableTracking()
+ d2 = bb.data.init()
+ d2.enableTracking()
+ connector = TestConnector(d1)
+
+ d2.setVar('_remote_data', connector)
+
+ d1.setVar('HELLO', 'world')
+ d1.setVarFlag('OTHER', 'flagname', 'flagvalue')
+ self.assertEqual(d2.getVar('HELLO'), 'world')
+ self.assertEqual(d2.expand('${HELLO}'), 'world')
+ self.assertEqual(d2.expand('${@d.getVar("HELLO")}'), 'world')
+ self.assertIn('flagname', d2.getVarFlags('OTHER'))
+ self.assertEqual(d2.getVarFlag('OTHER', 'flagname'), 'flagvalue')
+ self.assertEqual(d1.varhistory.variable('HELLO'), d2.varhistory.variable('HELLO'))
+ # Test setVar on client side affects server
+ d2.setVar('HELLO', 'other-world')
+ self.assertEqual(d1.getVar('HELLO'), 'other-world')
+ # Test setVarFlag on client side affects server
+ d2.setVarFlag('HELLO', 'flagname', 'flagvalue')
+ self.assertEqual(d1.getVarFlag('HELLO', 'flagname'), 'flagvalue')
+ # Test client side data is incorporated in python expansion (which is done on server)
+ d2.setVar('FOO', 'bar')
+ self.assertEqual(d2.expand('${@d.getVar("FOO")}'), 'bar')
+ # Test overrides work
+ d1.setVar('FOO_test', 'baz')
+ d1.appendVar('OVERRIDES', ':test')
+ self.assertEqual(d2.getVar('FOO'), 'baz')
+
+
+# Remote equivalents of local test classes
+# Note that these aren't perfect since we only test in one direction
+
+class RemoteDataExpansions(DataExpansions):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1["foo"] = "value_of_foo"
+ self.d1["bar"] = "value_of_bar"
+ self.d1["value_of_foo"] = "value_of_'value_of_foo'"
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
+
+class TestRemoteNestedExpansions(TestNestedExpansions):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1["foo"] = "foo"
+ self.d1["bar"] = "bar"
+ self.d1["value_of_foobar"] = "187"
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
+
+class TestRemoteConcat(TestConcat):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1.setVar("FOO", "foo")
+ self.d1.setVar("VAL", "val")
+ self.d1.setVar("BAR", "bar")
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
+
+class TestRemoteConcatOverride(TestConcatOverride):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1.setVar("FOO", "foo")
+ self.d1.setVar("VAL", "val")
+ self.d1.setVar("BAR", "bar")
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
+
+class TestRemoteOverrides(TestOverrides):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1.setVar("OVERRIDES", "foo:bar:local")
+ self.d1.setVar("TEST", "testvalue")
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
+
+class TestRemoteKeyExpansion(TestKeyExpansion):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1.setVar("FOO", "foo")
+ self.d1.setVar("BAR", "foo")
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
+
+class TestRemoteFlags(TestFlags):
+ def setUp(self):
+ self.d1 = bb.data.init()
+ self.d = bb.data.init()
+ self.d1.setVar("foo", "value of foo")
+ self.d1.setVarFlag("foo", "flag1", "value of flag1")
+ self.d1.setVarFlag("foo", "flag2", "value of flag2")
+ connector = TestConnector(self.d1)
+ self.d.setVar('_remote_data', connector)
diff --git a/poky/bitbake/lib/bb/tests/event.py b/poky/bitbake/lib/bb/tests/event.py
new file mode 100644
index 000000000..d3a5f6269
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/event.py
@@ -0,0 +1,986 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Tests for the Event implementation (event.py)
+#
+# Copyright (C) 2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import bb
+import logging
+import bb.compat
+import bb.event
+import importlib
+import threading
+import time
+import pickle
+from unittest.mock import Mock
+from unittest.mock import call
+from bb.msg import BBLogFormatter
+
+
+class EventQueueStubBase(object):
+ """ Base class for EventQueueStub classes """
+ def __init__(self):
+ self.event_calls = []
+ return
+
+ def _store_event_data_string(self, event):
+ if isinstance(event, logging.LogRecord):
+ formatter = BBLogFormatter("%(levelname)s: %(message)s")
+ self.event_calls.append(formatter.format(event))
+ else:
+ self.event_calls.append(bb.event.getName(event))
+ return
+
+
+class EventQueueStub(EventQueueStubBase):
+ """ Class used as specification for UI event handler queue stub objects """
+ def __init__(self):
+ super(EventQueueStub, self).__init__()
+
+ def send(self, event):
+ super(EventQueueStub, self)._store_event_data_string(event)
+
+
+class PickleEventQueueStub(EventQueueStubBase):
+ """ Class used as specification for UI event handler queue stub objects
+ with sendpickle method """
+ def __init__(self):
+ super(PickleEventQueueStub, self).__init__()
+
+ def sendpickle(self, pickled_event):
+ event = pickle.loads(pickled_event)
+ super(PickleEventQueueStub, self)._store_event_data_string(event)
+
+
+class UIClientStub(object):
+ """ Class used as specification for UI event handler stub objects """
+ def __init__(self):
+ self.event = None
+
+
+class EventHandlingTest(unittest.TestCase):
+ """ Event handling test class """
+
+
+ def setUp(self):
+ self._test_process = Mock()
+ ui_client1 = UIClientStub()
+ ui_client2 = UIClientStub()
+ self._test_ui1 = Mock(wraps=ui_client1)
+ self._test_ui2 = Mock(wraps=ui_client2)
+ importlib.reload(bb.event)
+
+ def _create_test_handlers(self):
+ """ Method used to create a test handler ordered dictionary """
+ test_handlers = bb.compat.OrderedDict()
+ test_handlers["handler1"] = self._test_process.handler1
+ test_handlers["handler2"] = self._test_process.handler2
+ return test_handlers
+
+ def test_class_handlers(self):
+ """ Test set_class_handlers and get_class_handlers methods """
+ test_handlers = self._create_test_handlers()
+ bb.event.set_class_handlers(test_handlers)
+ self.assertEqual(test_handlers,
+ bb.event.get_class_handlers())
+
+ def test_handlers(self):
+ """ Test set_handlers and get_handlers """
+ test_handlers = self._create_test_handlers()
+ bb.event.set_handlers(test_handlers)
+ self.assertEqual(test_handlers,
+ bb.event.get_handlers())
+
+ def test_clean_class_handlers(self):
+ """ Test clean_class_handlers method """
+ cleanDict = bb.compat.OrderedDict()
+ self.assertEqual(cleanDict,
+ bb.event.clean_class_handlers())
+
+ def test_register(self):
+ """ Test register method for class handlers """
+ result = bb.event.register("handler", self._test_process.handler)
+ self.assertEqual(result, bb.event.Registered)
+ handlers_dict = bb.event.get_class_handlers()
+ self.assertIn("handler", handlers_dict)
+
+ def test_already_registered(self):
+ """ Test detection of an already registed class handler """
+ bb.event.register("handler", self._test_process.handler)
+ handlers_dict = bb.event.get_class_handlers()
+ self.assertIn("handler", handlers_dict)
+ result = bb.event.register("handler", self._test_process.handler)
+ self.assertEqual(result, bb.event.AlreadyRegistered)
+
+ def test_register_from_string(self):
+ """ Test register method receiving code in string """
+ result = bb.event.register("string_handler", " return True")
+ self.assertEqual(result, bb.event.Registered)
+ handlers_dict = bb.event.get_class_handlers()
+ self.assertIn("string_handler", handlers_dict)
+
+ def test_register_with_mask(self):
+ """ Test register method with event masking """
+ mask = ["bb.event.OperationStarted",
+ "bb.event.OperationCompleted"]
+ result = bb.event.register("event_handler",
+ self._test_process.event_handler,
+ mask)
+ self.assertEqual(result, bb.event.Registered)
+ handlers_dict = bb.event.get_class_handlers()
+ self.assertIn("event_handler", handlers_dict)
+
+ def test_remove(self):
+ """ Test remove method for class handlers """
+ test_handlers = self._create_test_handlers()
+ bb.event.set_class_handlers(test_handlers)
+ count = len(test_handlers)
+ bb.event.remove("handler1", None)
+ test_handlers = bb.event.get_class_handlers()
+ self.assertEqual(len(test_handlers), count - 1)
+ with self.assertRaises(KeyError):
+ bb.event.remove("handler1", None)
+
+ def test_execute_handler(self):
+ """ Test execute_handler method for class handlers """
+ mask = ["bb.event.OperationProgress"]
+ result = bb.event.register("event_handler",
+ self._test_process.event_handler,
+ mask)
+ self.assertEqual(result, bb.event.Registered)
+ event = bb.event.OperationProgress(current=10, total=100)
+ bb.event.execute_handler("event_handler",
+ self._test_process.event_handler,
+ event,
+ None)
+ self._test_process.event_handler.assert_called_once_with(event)
+
+ def test_fire_class_handlers(self):
+ """ Test fire_class_handlers method """
+ mask = ["bb.event.OperationStarted"]
+ result = bb.event.register("event_handler1",
+ self._test_process.event_handler1,
+ mask)
+ self.assertEqual(result, bb.event.Registered)
+ result = bb.event.register("event_handler2",
+ self._test_process.event_handler2,
+ "*")
+ self.assertEqual(result, bb.event.Registered)
+ event1 = bb.event.OperationStarted()
+ event2 = bb.event.OperationCompleted(total=123)
+ bb.event.fire_class_handlers(event1, None)
+ bb.event.fire_class_handlers(event2, None)
+ bb.event.fire_class_handlers(event2, None)
+ expected_event_handler1 = [call(event1)]
+ expected_event_handler2 = [call(event1),
+ call(event2),
+ call(event2)]
+ self.assertEqual(self._test_process.event_handler1.call_args_list,
+ expected_event_handler1)
+ self.assertEqual(self._test_process.event_handler2.call_args_list,
+ expected_event_handler2)
+
+ def test_class_handler_filters(self):
+ """ Test filters for class handlers """
+ mask = ["bb.event.OperationStarted"]
+ result = bb.event.register("event_handler1",
+ self._test_process.event_handler1,
+ mask)
+ self.assertEqual(result, bb.event.Registered)
+ result = bb.event.register("event_handler2",
+ self._test_process.event_handler2,
+ "*")
+ self.assertEqual(result, bb.event.Registered)
+ bb.event.set_eventfilter(
+ lambda name, handler, event, d :
+ name == 'event_handler2' and
+ bb.event.getName(event) == "OperationStarted")
+ event1 = bb.event.OperationStarted()
+ event2 = bb.event.OperationCompleted(total=123)
+ bb.event.fire_class_handlers(event1, None)
+ bb.event.fire_class_handlers(event2, None)
+ bb.event.fire_class_handlers(event2, None)
+ expected_event_handler1 = []
+ expected_event_handler2 = [call(event1)]
+ self.assertEqual(self._test_process.event_handler1.call_args_list,
+ expected_event_handler1)
+ self.assertEqual(self._test_process.event_handler2.call_args_list,
+ expected_event_handler2)
+
+ def test_change_handler_event_mapping(self):
+ """ Test changing the event mapping for class handlers """
+ event1 = bb.event.OperationStarted()
+ event2 = bb.event.OperationCompleted(total=123)
+
+ # register handler for all events
+ result = bb.event.register("event_handler1",
+ self._test_process.event_handler1,
+ "*")
+ self.assertEqual(result, bb.event.Registered)
+ bb.event.fire_class_handlers(event1, None)
+ bb.event.fire_class_handlers(event2, None)
+ expected = [call(event1), call(event2)]
+ self.assertEqual(self._test_process.event_handler1.call_args_list,
+ expected)
+
+ # unregister handler and register it only for OperationStarted
+ bb.event.remove("event_handler1",
+ self._test_process.event_handler1)
+ mask = ["bb.event.OperationStarted"]
+ result = bb.event.register("event_handler1",
+ self._test_process.event_handler1,
+ mask)
+ self.assertEqual(result, bb.event.Registered)
+ bb.event.fire_class_handlers(event1, None)
+ bb.event.fire_class_handlers(event2, None)
+ expected = [call(event1), call(event2), call(event1)]
+ self.assertEqual(self._test_process.event_handler1.call_args_list,
+ expected)
+
+ # unregister handler and register it only for OperationCompleted
+ bb.event.remove("event_handler1",
+ self._test_process.event_handler1)
+ mask = ["bb.event.OperationCompleted"]
+ result = bb.event.register("event_handler1",
+ self._test_process.event_handler1,
+ mask)
+ self.assertEqual(result, bb.event.Registered)
+ bb.event.fire_class_handlers(event1, None)
+ bb.event.fire_class_handlers(event2, None)
+ expected = [call(event1), call(event2), call(event1), call(event2)]
+ self.assertEqual(self._test_process.event_handler1.call_args_list,
+ expected)
+
+ def test_register_UIHhandler(self):
+ """ Test register_UIHhandler method """
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+
+ def test_UIHhandler_already_registered(self):
+ """ Test registering an UIHhandler already existing """
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 2)
+
+ def test_unregister_UIHhandler(self):
+ """ Test unregister_UIHhandler method """
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+ result = bb.event.unregister_UIHhandler(1)
+ self.assertIs(result, None)
+
+ def test_fire_ui_handlers(self):
+ """ Test fire_ui_handlers method """
+ self._test_ui1.event = Mock(spec_set=EventQueueStub)
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+ self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
+ result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+ self.assertEqual(result, 2)
+ event1 = bb.event.OperationStarted()
+ bb.event.fire_ui_handlers(event1, None)
+ expected = [call(event1)]
+ self.assertEqual(self._test_ui1.event.send.call_args_list,
+ expected)
+ expected = [call(pickle.dumps(event1))]
+ self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
+ expected)
+
+ def test_ui_handler_mask_filter(self):
+ """ Test filters for UI handlers """
+ mask = ["bb.event.OperationStarted"]
+ debug_domains = {}
+ self._test_ui1.event = Mock(spec_set=EventQueueStub)
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
+ self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
+ result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+ bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
+
+ event1 = bb.event.OperationStarted()
+ event2 = bb.event.OperationCompleted(total=1)
+
+ bb.event.fire_ui_handlers(event1, None)
+ bb.event.fire_ui_handlers(event2, None)
+ expected = [call(event1)]
+ self.assertEqual(self._test_ui1.event.send.call_args_list,
+ expected)
+ expected = [call(pickle.dumps(event1))]
+ self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
+ expected)
+
+ def test_ui_handler_log_filter(self):
+ """ Test log filters for UI handlers """
+ mask = ["*"]
+ debug_domains = {'BitBake.Foo': logging.WARNING}
+
+ self._test_ui1.event = EventQueueStub()
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
+ self._test_ui2.event = PickleEventQueueStub()
+ result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+ bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
+
+ event1 = bb.event.OperationStarted()
+ bb.event.fire_ui_handlers(event1, None) # All events match
+
+ event_log_handler = bb.event.LogHandler()
+ logger = logging.getLogger("BitBake")
+ logger.addHandler(event_log_handler)
+ logger1 = logging.getLogger("BitBake.Foo")
+ logger1.warning("Test warning LogRecord1") # Matches debug_domains level
+ logger1.info("Test info LogRecord") # Filtered out
+ logger2 = logging.getLogger("BitBake.Bar")
+ logger2.error("Test error LogRecord") # Matches filter base level
+ logger2.warning("Test warning LogRecord2") # Filtered out
+ logger.removeHandler(event_log_handler)
+
+ expected = ['OperationStarted',
+ 'WARNING: Test warning LogRecord1',
+ 'ERROR: Test error LogRecord']
+ self.assertEqual(self._test_ui1.event.event_calls, expected)
+ self.assertEqual(self._test_ui2.event.event_calls, expected)
+
+ def test_fire(self):
+ """ Test fire method used to trigger class and ui event handlers """
+ mask = ["bb.event.ConfigParsed"]
+ result = bb.event.register("event_handler1",
+ self._test_process.event_handler1,
+ mask)
+
+ self._test_ui1.event = Mock(spec_set=EventQueueStub)
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+
+ event1 = bb.event.ConfigParsed()
+ bb.event.fire(event1, None)
+ expected = [call(event1)]
+ self.assertEqual(self._test_process.event_handler1.call_args_list,
+ expected)
+ self.assertEqual(self._test_ui1.event.send.call_args_list,
+ expected)
+
+ def test_fire_from_worker(self):
+ """ Test fire_from_worker method """
+ self._test_ui1.event = Mock(spec_set=EventQueueStub)
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+ event1 = bb.event.ConfigParsed()
+ bb.event.fire_from_worker(event1, None)
+ expected = [call(event1)]
+ self.assertEqual(self._test_ui1.event.send.call_args_list,
+ expected)
+
+ def test_worker_fire(self):
+ """ Test the triggering of bb.event.worker_fire callback """
+ bb.event.worker_fire = Mock()
+ event = bb.event.Event()
+ bb.event.fire(event, None)
+ expected = [call(event, None)]
+ self.assertEqual(bb.event.worker_fire.call_args_list, expected)
+
+ def test_print_ui_queue(self):
+ """ Test print_ui_queue method """
+ event1 = bb.event.OperationStarted()
+ event2 = bb.event.OperationCompleted(total=123)
+ bb.event.fire(event1, None)
+ bb.event.fire(event2, None)
+ event_log_handler = bb.event.LogHandler()
+ logger = logging.getLogger("BitBake")
+ logger.addHandler(event_log_handler)
+ logger.info("Test info LogRecord")
+ logger.warning("Test warning LogRecord")
+ with self.assertLogs("BitBake", level="INFO") as cm:
+ bb.event.print_ui_queue()
+ logger.removeHandler(event_log_handler)
+ self.assertEqual(cm.output,
+ ["INFO:BitBake:Test info LogRecord",
+ "WARNING:BitBake:Test warning LogRecord"])
+
+ def _set_threadlock_test_mockups(self):
+ """ Create UI event handler mockups used in enable and disable
+ threadlock tests """
+ def ui1_event_send(event):
+ if type(event) is bb.event.ConfigParsed:
+ self._threadlock_test_calls.append("w1_ui1")
+ if type(event) is bb.event.OperationStarted:
+ self._threadlock_test_calls.append("w2_ui1")
+ time.sleep(2)
+
+ def ui2_event_send(event):
+ if type(event) is bb.event.ConfigParsed:
+ self._threadlock_test_calls.append("w1_ui2")
+ if type(event) is bb.event.OperationStarted:
+ self._threadlock_test_calls.append("w2_ui2")
+ time.sleep(2)
+
+ self._threadlock_test_calls = []
+ self._test_ui1.event = EventQueueStub()
+ self._test_ui1.event.send = ui1_event_send
+ result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+ self.assertEqual(result, 1)
+ self._test_ui2.event = EventQueueStub()
+ self._test_ui2.event.send = ui2_event_send
+ result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+ self.assertEqual(result, 2)
+
+ def _set_and_run_threadlock_test_workers(self):
+ """ Create and run the workers used to trigger events in enable and
+ disable threadlock tests """
+ worker1 = threading.Thread(target=self._thread_lock_test_worker1)
+ worker2 = threading.Thread(target=self._thread_lock_test_worker2)
+ worker1.start()
+ time.sleep(1)
+ worker2.start()
+ worker1.join()
+ worker2.join()
+
+ def _thread_lock_test_worker1(self):
+ """ First worker used to fire the ConfigParsed event for enable and
+ disable threadlocks tests """
+ bb.event.fire(bb.event.ConfigParsed(), None)
+
+ def _thread_lock_test_worker2(self):
+ """ Second worker used to fire the OperationStarted event for enable
+ and disable threadlocks tests """
+ bb.event.fire(bb.event.OperationStarted(), None)
+
+ def test_enable_threadlock(self):
+ """ Test enable_threadlock method """
+ self._set_threadlock_test_mockups()
+ bb.event.enable_threadlock()
+ self._set_and_run_threadlock_test_workers()
+ # Calls to UI handlers should be in order as all the registered
+ # handlers for the event coming from the first worker should be
+ # called before processing the event from the second worker.
+ self.assertEqual(self._threadlock_test_calls,
+ ["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
+
+
+ def test_disable_threadlock(self):
+ """ Test disable_threadlock method """
+ self._set_threadlock_test_mockups()
+ bb.event.disable_threadlock()
+ self._set_and_run_threadlock_test_workers()
+ # Calls to UI handlers should be intertwined together. Thanks to the
+ # delay in the registered handlers for the event coming from the first
+ # worker, the event coming from the second worker starts being
+ # processed before finishing handling the first worker event.
+ self.assertEqual(self._threadlock_test_calls,
+ ["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
+
+
+class EventClassesTest(unittest.TestCase):
+ """ Event classes test class """
+
+ _worker_pid = 54321
+
+ def setUp(self):
+ bb.event.worker_pid = EventClassesTest._worker_pid
+
+ def test_Event(self):
+ """ Test the Event base class """
+ event = bb.event.Event()
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_HeartbeatEvent(self):
+ """ Test the HeartbeatEvent class """
+ time = 10
+ event = bb.event.HeartbeatEvent(time)
+ self.assertEqual(event.time, time)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_OperationStarted(self):
+ """ Test OperationStarted event class """
+ msg = "Foo Bar"
+ event = bb.event.OperationStarted(msg)
+ self.assertEqual(event.msg, msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_OperationCompleted(self):
+ """ Test OperationCompleted event class """
+ msg = "Foo Bar"
+ total = 123
+ event = bb.event.OperationCompleted(total, msg)
+ self.assertEqual(event.msg, msg)
+ self.assertEqual(event.total, total)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_OperationProgress(self):
+ """ Test OperationProgress event class """
+ msg = "Foo Bar"
+ total = 123
+ current = 111
+ event = bb.event.OperationProgress(current, total, msg)
+ self.assertEqual(event.msg, msg + ": %s/%s" % (current, total))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ConfigParsed(self):
+ """ Test the ConfigParsed class """
+ event = bb.event.ConfigParsed()
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_MultiConfigParsed(self):
+ """ Test MultiConfigParsed event class """
+ mcdata = {"foobar": "Foo Bar"}
+ event = bb.event.MultiConfigParsed(mcdata)
+ self.assertEqual(event.mcdata, mcdata)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_RecipeEvent(self):
+ """ Test RecipeEvent event base class """
+ callback = lambda a: 2 * a
+ event = bb.event.RecipeEvent(callback)
+ self.assertEqual(event.fn(1), callback(1))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_RecipePreFinalise(self):
+ """ Test RecipePreFinalise event class """
+ callback = lambda a: 2 * a
+ event = bb.event.RecipePreFinalise(callback)
+ self.assertEqual(event.fn(1), callback(1))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_RecipeTaskPreProcess(self):
+ """ Test RecipeTaskPreProcess event class """
+ callback = lambda a: 2 * a
+ tasklist = [("foobar", callback)]
+ event = bb.event.RecipeTaskPreProcess(callback, tasklist)
+ self.assertEqual(event.fn(1), callback(1))
+ self.assertEqual(event.tasklist, tasklist)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_RecipeParsed(self):
+ """ Test RecipeParsed event base class """
+ callback = lambda a: 2 * a
+ event = bb.event.RecipeParsed(callback)
+ self.assertEqual(event.fn(1), callback(1))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_StampUpdate(self):
+ targets = ["foo", "bar"]
+ stampfns = [lambda:"foobar"]
+ event = bb.event.StampUpdate(targets, stampfns)
+ self.assertEqual(event.targets, targets)
+ self.assertEqual(event.stampPrefix, stampfns)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_BuildBase(self):
+ """ Test base class for bitbake build events """
+ name = "foo"
+ pkgs = ["bar"]
+ failures = 123
+ event = bb.event.BuildBase(name, pkgs, failures)
+ self.assertEqual(event.name, name)
+ self.assertEqual(event.pkgs, pkgs)
+ self.assertEqual(event.getFailures(), failures)
+ name = event.name = "bar"
+ pkgs = event.pkgs = ["foo"]
+ self.assertEqual(event.name, name)
+ self.assertEqual(event.pkgs, pkgs)
+ self.assertEqual(event.getFailures(), failures)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_BuildInit(self):
+ """ Test class for bitbake build invocation events """
+ event = bb.event.BuildInit()
+ self.assertEqual(event.name, None)
+ self.assertEqual(event.pkgs, [])
+ self.assertEqual(event.getFailures(), 0)
+ name = event.name = "bar"
+ pkgs = event.pkgs = ["foo"]
+ self.assertEqual(event.name, name)
+ self.assertEqual(event.pkgs, pkgs)
+ self.assertEqual(event.getFailures(), 0)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_BuildStarted(self):
+ """ Test class for build started events """
+ name = "foo"
+ pkgs = ["bar"]
+ failures = 123
+ event = bb.event.BuildStarted(name, pkgs, failures)
+ self.assertEqual(event.name, name)
+ self.assertEqual(event.pkgs, pkgs)
+ self.assertEqual(event.getFailures(), failures)
+ self.assertEqual(event.msg, "Building Started")
+ name = event.name = "bar"
+ pkgs = event.pkgs = ["foo"]
+ msg = event.msg = "foobar"
+ self.assertEqual(event.name, name)
+ self.assertEqual(event.pkgs, pkgs)
+ self.assertEqual(event.getFailures(), failures)
+ self.assertEqual(event.msg, msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_BuildCompleted(self):
+ """ Test class for build completed events """
+ total = 1000
+ name = "foo"
+ pkgs = ["bar"]
+ failures = 123
+ interrupted = 1
+ event = bb.event.BuildCompleted(total, name, pkgs, failures,
+ interrupted)
+ self.assertEqual(event.name, name)
+ self.assertEqual(event.pkgs, pkgs)
+ self.assertEqual(event.getFailures(), failures)
+ self.assertEqual(event.msg, "Building Failed")
+ event2 = bb.event.BuildCompleted(total, name, pkgs)
+ self.assertEqual(event2.name, name)
+ self.assertEqual(event2.pkgs, pkgs)
+ self.assertEqual(event2.getFailures(), 0)
+ self.assertEqual(event2.msg, "Building Succeeded")
+ self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+ def test_DiskFull(self):
+ """ Test DiskFull event class """
+ dev = "/dev/foo"
+ type = "ext4"
+ freespace = "104M"
+ mountpoint = "/"
+ event = bb.event.DiskFull(dev, type, freespace, mountpoint)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_MonitorDiskEvent(self):
+ """ Test MonitorDiskEvent class """
+ available_bytes = 10000000
+ free_bytes = 90000000
+ total_bytes = 1000000000
+ du = bb.event.DiskUsageSample(available_bytes, free_bytes,
+ total_bytes)
+ event = bb.event.MonitorDiskEvent(du)
+ self.assertEqual(event.disk_usage.available_bytes, available_bytes)
+ self.assertEqual(event.disk_usage.free_bytes, free_bytes)
+ self.assertEqual(event.disk_usage.total_bytes, total_bytes)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_NoProvider(self):
+ """ Test NoProvider event class """
+ item = "foobar"
+ event1 = bb.event.NoProvider(item)
+ self.assertEqual(event1.getItem(), item)
+ self.assertEqual(event1.isRuntime(), False)
+ self.assertEqual(str(event1), "Nothing PROVIDES 'foobar'")
+ runtime = True
+ dependees = ["foo", "bar"]
+ reasons = None
+ close_matches = ["foibar", "footbar"]
+ event2 = bb.event.NoProvider(item, runtime, dependees, reasons,
+ close_matches)
+ self.assertEqual(event2.isRuntime(), True)
+ expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
+ " on or otherwise requires it). Close matches:\n"
+ " foibar\n"
+ " footbar")
+ self.assertEqual(str(event2), expected)
+ reasons = ["Item does not exist on database"]
+ close_matches = ["foibar", "footbar"]
+ event3 = bb.event.NoProvider(item, runtime, dependees, reasons,
+ close_matches)
+ expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
+ " on or otherwise requires it)\n"
+ "Item does not exist on database")
+ self.assertEqual(str(event3), expected)
+ self.assertEqual(event3.pid, EventClassesTest._worker_pid)
+
+ def test_MultipleProviders(self):
+ """ Test MultipleProviders event class """
+ item = "foobar"
+ candidates = ["foobarv1", "foobars"]
+ event1 = bb.event.MultipleProviders(item, candidates)
+ self.assertEqual(event1.isRuntime(), False)
+ self.assertEqual(event1.getItem(), item)
+ self.assertEqual(event1.getCandidates(), candidates)
+ expected = ("Multiple providers are available for foobar (foobarv1,"
+ " foobars)\n"
+ "Consider defining a PREFERRED_PROVIDER entry to match "
+ "foobar")
+ self.assertEqual(str(event1), expected)
+ runtime = True
+ event2 = bb.event.MultipleProviders(item, candidates, runtime)
+ self.assertEqual(event2.isRuntime(), runtime)
+ expected = ("Multiple providers are available for runtime foobar "
+ "(foobarv1, foobars)\n"
+ "Consider defining a PREFERRED_RPROVIDER entry to match "
+ "foobar")
+ self.assertEqual(str(event2), expected)
+ self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+ def test_ParseStarted(self):
+ """ Test ParseStarted event class """
+ total = 123
+ event = bb.event.ParseStarted(total)
+ self.assertEqual(event.msg, "Recipe parsing Started")
+ self.assertEqual(event.total, total)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ParseCompleted(self):
+ """ Test ParseCompleted event class """
+ cached = 10
+ parsed = 13
+ skipped = 7
+ virtuals = 2
+ masked = 1
+ errors = 0
+ total = 23
+ event = bb.event.ParseCompleted(cached, parsed, skipped, masked,
+ virtuals, errors, total)
+ self.assertEqual(event.msg, "Recipe parsing Completed")
+ expected = [cached, parsed, skipped, virtuals, masked, errors,
+ cached + parsed, total]
+ actual = [event.cached, event.parsed, event.skipped, event.virtuals,
+ event.masked, event.errors, event.sofar, event.total]
+ self.assertEqual(str(actual), str(expected))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ParseProgress(self):
+ """ Test ParseProgress event class """
+ current = 10
+ total = 100
+ event = bb.event.ParseProgress(current, total)
+ self.assertEqual(event.msg,
+ "Recipe parsing" + ": %s/%s" % (current, total))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_CacheLoadStarted(self):
+ """ Test CacheLoadStarted event class """
+ total = 123
+ event = bb.event.CacheLoadStarted(total)
+ self.assertEqual(event.msg, "Loading cache Started")
+ self.assertEqual(event.total, total)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_CacheLoadProgress(self):
+ """ Test CacheLoadProgress event class """
+ current = 10
+ total = 100
+ event = bb.event.CacheLoadProgress(current, total)
+ self.assertEqual(event.msg,
+ "Loading cache" + ": %s/%s" % (current, total))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_CacheLoadCompleted(self):
+ """ Test CacheLoadCompleted event class """
+ total = 23
+ num_entries = 12
+ event = bb.event.CacheLoadCompleted(total, num_entries)
+ self.assertEqual(event.msg, "Loading cache Completed")
+ expected = [total, num_entries]
+ actual = [event.total, event.num_entries]
+ self.assertEqual(str(actual), str(expected))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_TreeDataPreparationStarted(self):
+ """ Test TreeDataPreparationStarted event class """
+ event = bb.event.TreeDataPreparationStarted()
+ self.assertEqual(event.msg, "Preparing tree data Started")
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_TreeDataPreparationProgress(self):
+ """ Test TreeDataPreparationProgress event class """
+ current = 10
+ total = 100
+ event = bb.event.TreeDataPreparationProgress(current, total)
+ self.assertEqual(event.msg,
+ "Preparing tree data" + ": %s/%s" % (current, total))
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_TreeDataPreparationCompleted(self):
+ """ Test TreeDataPreparationCompleted event class """
+ total = 23
+ event = bb.event.TreeDataPreparationCompleted(total)
+ self.assertEqual(event.msg, "Preparing tree data Completed")
+ self.assertEqual(event.total, total)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_DepTreeGenerated(self):
+ """ Test DepTreeGenerated event class """
+ depgraph = Mock()
+ event = bb.event.DepTreeGenerated(depgraph)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_TargetsTreeGenerated(self):
+ """ Test TargetsTreeGenerated event class """
+ model = Mock()
+ event = bb.event.TargetsTreeGenerated(model)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ReachableStamps(self):
+ """ Test ReachableStamps event class """
+ stamps = [Mock(), Mock()]
+ event = bb.event.ReachableStamps(stamps)
+ self.assertEqual(event.stamps, stamps)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_FilesMatchingFound(self):
+ """ Test FilesMatchingFound event class """
+ pattern = "foo.*bar"
+ matches = ["foobar"]
+ event = bb.event.FilesMatchingFound(pattern, matches)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ConfigFilesFound(self):
+ """ Test ConfigFilesFound event class """
+ variable = "FOO_BAR"
+ values = ["foo", "bar"]
+ event = bb.event.ConfigFilesFound(variable, values)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ConfigFilePathFound(self):
+ """ Test ConfigFilePathFound event class """
+ path = "/foo/bar"
+ event = bb.event.ConfigFilePathFound(path)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_message_classes(self):
+ """ Test message event classes """
+ msg = "foobar foo bar"
+ event = bb.event.MsgBase(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+ event = bb.event.MsgDebug(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+ event = bb.event.MsgNote(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+ event = bb.event.MsgWarn(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+ event = bb.event.MsgError(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+ event = bb.event.MsgFatal(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+ event = bb.event.MsgPlain(msg)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_LogExecTTY(self):
+ """ Test LogExecTTY event class """
+ msg = "foo bar"
+ prog = "foo.sh"
+ sleep_delay = 10
+ retries = 3
+ event = bb.event.LogExecTTY(msg, prog, sleep_delay, retries)
+ self.assertEqual(event.msg, msg)
+ self.assertEqual(event.prog, prog)
+ self.assertEqual(event.sleep_delay, sleep_delay)
+ self.assertEqual(event.retries, retries)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def _throw_zero_division_exception(self):
+ a = 1 / 0
+ return
+
+ def _worker_handler(self, event, d):
+ self._returned_event = event
+ return
+
+ def test_LogHandler(self):
+ """ Test LogHandler class """
+ logger = logging.getLogger("TestEventClasses")
+ logger.propagate = False
+ handler = bb.event.LogHandler(logging.INFO)
+ logger.addHandler(handler)
+ bb.event.worker_fire = self._worker_handler
+ try:
+ self._throw_zero_division_exception()
+ except ZeroDivisionError as ex:
+ logger.exception(ex)
+ event = self._returned_event
+ try:
+ pe = pickle.dumps(event)
+ newevent = pickle.loads(pe)
+ except:
+ self.fail('Logged event is not serializable')
+ self.assertEqual(event.taskpid, EventClassesTest._worker_pid)
+
+ def test_MetadataEvent(self):
+ """ Test MetadataEvent class """
+ eventtype = "footype"
+ eventdata = {"foo": "bar"}
+ event = bb.event.MetadataEvent(eventtype, eventdata)
+ self.assertEqual(event.type, eventtype)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ProcessStarted(self):
+ """ Test ProcessStarted class """
+ processname = "foo"
+ total = 9783128974
+ event = bb.event.ProcessStarted(processname, total)
+ self.assertEqual(event.processname, processname)
+ self.assertEqual(event.total, total)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ProcessProgress(self):
+ """ Test ProcessProgress class """
+ processname = "foo"
+ progress = 243224
+ event = bb.event.ProcessProgress(processname, progress)
+ self.assertEqual(event.processname, processname)
+ self.assertEqual(event.progress, progress)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_ProcessFinished(self):
+ """ Test ProcessFinished class """
+ processname = "foo"
+ total = 1242342344
+ event = bb.event.ProcessFinished(processname)
+ self.assertEqual(event.processname, processname)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_SanityCheck(self):
+ """ Test SanityCheck class """
+ event1 = bb.event.SanityCheck()
+ self.assertEqual(event1.generateevents, True)
+ self.assertEqual(event1.pid, EventClassesTest._worker_pid)
+ generateevents = False
+ event2 = bb.event.SanityCheck(generateevents)
+ self.assertEqual(event2.generateevents, generateevents)
+ self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+ def test_SanityCheckPassed(self):
+ """ Test SanityCheckPassed class """
+ event = bb.event.SanityCheckPassed()
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+ def test_SanityCheckFailed(self):
+ """ Test SanityCheckFailed class """
+ msg = "The sanity test failed."
+ event1 = bb.event.SanityCheckFailed(msg)
+ self.assertEqual(event1.pid, EventClassesTest._worker_pid)
+ network_error = True
+ event2 = bb.event.SanityCheckFailed(msg, network_error)
+ self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+ def test_network_event_classes(self):
+ """ Test network event classes """
+ event1 = bb.event.NetworkTest()
+ generateevents = False
+ self.assertEqual(event1.pid, EventClassesTest._worker_pid)
+ event2 = bb.event.NetworkTest(generateevents)
+ self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+ event3 = bb.event.NetworkTestPassed()
+ self.assertEqual(event3.pid, EventClassesTest._worker_pid)
+ event4 = bb.event.NetworkTestFailed()
+ self.assertEqual(event4.pid, EventClassesTest._worker_pid)
+
+ def test_FindSigInfoResult(self):
+ """ Test FindSigInfoResult event class """
+ result = [Mock()]
+ event = bb.event.FindSigInfoResult(result)
+ self.assertEqual(event.result, result)
+ self.assertEqual(event.pid, EventClassesTest._worker_pid)
diff --git a/poky/bitbake/lib/bb/tests/fetch.py b/poky/bitbake/lib/bb/tests/fetch.py
new file mode 100644
index 000000000..74859f9d3
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/fetch.py
@@ -0,0 +1,1573 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Tests for the Fetcher (fetch2/)
+#
+# Copyright (C) 2012 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import hashlib
+import tempfile
+import subprocess
+import collections
+import os
+from bb.fetch2 import URI
+from bb.fetch2 import FetchMethod
+import bb
+
+def skipIfNoNetwork():
+ if os.environ.get("BB_SKIP_NETTESTS") == "yes":
+ return unittest.skip("Network tests being skipped")
+ return lambda f: f
+
+class URITest(unittest.TestCase):
+ test_uris = {
+ "http://www.google.com/index.html" : {
+ 'uri': 'http://www.google.com/index.html',
+ 'scheme': 'http',
+ 'hostname': 'www.google.com',
+ 'port': None,
+ 'hostport': 'www.google.com',
+ 'path': '/index.html',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': False
+ },
+ "http://www.google.com/index.html;param1=value1" : {
+ 'uri': 'http://www.google.com/index.html;param1=value1',
+ 'scheme': 'http',
+ 'hostname': 'www.google.com',
+ 'port': None,
+ 'hostport': 'www.google.com',
+ 'path': '/index.html',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {
+ 'param1': 'value1'
+ },
+ 'query': {},
+ 'relative': False
+ },
+ "http://www.example.org/index.html?param1=value1" : {
+ 'uri': 'http://www.example.org/index.html?param1=value1',
+ 'scheme': 'http',
+ 'hostname': 'www.example.org',
+ 'port': None,
+ 'hostport': 'www.example.org',
+ 'path': '/index.html',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {
+ 'param1': 'value1'
+ },
+ 'relative': False
+ },
+ "http://www.example.org/index.html?qparam1=qvalue1;param2=value2" : {
+ 'uri': 'http://www.example.org/index.html?qparam1=qvalue1;param2=value2',
+ 'scheme': 'http',
+ 'hostname': 'www.example.org',
+ 'port': None,
+ 'hostport': 'www.example.org',
+ 'path': '/index.html',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {
+ 'param2': 'value2'
+ },
+ 'query': {
+ 'qparam1': 'qvalue1'
+ },
+ 'relative': False
+ },
+ "http://www.example.com:8080/index.html" : {
+ 'uri': 'http://www.example.com:8080/index.html',
+ 'scheme': 'http',
+ 'hostname': 'www.example.com',
+ 'port': 8080,
+ 'hostport': 'www.example.com:8080',
+ 'path': '/index.html',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': False
+ },
+ "cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : {
+ 'uri': 'cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg',
+ 'scheme': 'cvs',
+ 'hostname': 'cvs.handhelds.org',
+ 'port': None,
+ 'hostport': 'cvs.handhelds.org',
+ 'path': '/cvs',
+ 'userinfo': 'anoncvs',
+ 'username': 'anoncvs',
+ 'password': '',
+ 'params': {
+ 'module': 'familiar/dist/ipkg'
+ },
+ 'query': {},
+ 'relative': False
+ },
+ "cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg": {
+ 'uri': 'cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg',
+ 'scheme': 'cvs',
+ 'hostname': 'cvs.handhelds.org',
+ 'port': None,
+ 'hostport': 'cvs.handhelds.org',
+ 'path': '/cvs',
+ 'userinfo': 'anoncvs:anonymous',
+ 'username': 'anoncvs',
+ 'password': 'anonymous',
+ 'params': collections.OrderedDict([
+ ('tag', 'V0-99-81'),
+ ('module', 'familiar/dist/ipkg')
+ ]),
+ 'query': {},
+ 'relative': False
+ },
+ "file://example.diff": { # NOTE: Not RFC compliant!
+ 'uri': 'file:example.diff',
+ 'scheme': 'file',
+ 'hostname': '',
+ 'port': None,
+ 'hostport': '',
+ 'path': 'example.diff',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': True
+ },
+ "file:example.diff": { # NOTE: RFC compliant version of the former
+ 'uri': 'file:example.diff',
+ 'scheme': 'file',
+ 'hostname': '',
+ 'port': None,
+ 'hostport': '',
+ 'path': 'example.diff',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': True
+ },
+ "file:///tmp/example.diff": {
+ 'uri': 'file:///tmp/example.diff',
+ 'scheme': 'file',
+ 'hostname': '',
+ 'port': None,
+ 'hostport': '',
+ 'path': '/tmp/example.diff',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': False
+ },
+ "git:///path/example.git": {
+ 'uri': 'git:///path/example.git',
+ 'scheme': 'git',
+ 'hostname': '',
+ 'port': None,
+ 'hostport': '',
+ 'path': '/path/example.git',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': False
+ },
+ "git:path/example.git": {
+ 'uri': 'git:path/example.git',
+ 'scheme': 'git',
+ 'hostname': '',
+ 'port': None,
+ 'hostport': '',
+ 'path': 'path/example.git',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': True
+ },
+ "git://example.net/path/example.git": {
+ 'uri': 'git://example.net/path/example.git',
+ 'scheme': 'git',
+ 'hostname': 'example.net',
+ 'port': None,
+ 'hostport': 'example.net',
+ 'path': '/path/example.git',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {},
+ 'query': {},
+ 'relative': False
+ },
+ "http://somesite.net;someparam=1": {
+ 'uri': 'http://somesite.net;someparam=1',
+ 'scheme': 'http',
+ 'hostname': 'somesite.net',
+ 'port': None,
+ 'hostport': 'somesite.net',
+ 'path': '',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {"someparam" : "1"},
+ 'query': {},
+ 'relative': False
+ },
+ "file://somelocation;someparam=1": {
+ 'uri': 'file:somelocation;someparam=1',
+ 'scheme': 'file',
+ 'hostname': '',
+ 'port': None,
+ 'hostport': '',
+ 'path': 'somelocation',
+ 'userinfo': '',
+ 'userinfo': '',
+ 'username': '',
+ 'password': '',
+ 'params': {"someparam" : "1"},
+ 'query': {},
+ 'relative': True
+ }
+
+ }
+
+ def test_uri(self):
+ for test_uri, ref in self.test_uris.items():
+ uri = URI(test_uri)
+
+ self.assertEqual(str(uri), ref['uri'])
+
+ # expected attributes
+ self.assertEqual(uri.scheme, ref['scheme'])
+
+ self.assertEqual(uri.userinfo, ref['userinfo'])
+ self.assertEqual(uri.username, ref['username'])
+ self.assertEqual(uri.password, ref['password'])
+
+ self.assertEqual(uri.hostname, ref['hostname'])
+ self.assertEqual(uri.port, ref['port'])
+ self.assertEqual(uri.hostport, ref['hostport'])
+
+ self.assertEqual(uri.path, ref['path'])
+ self.assertEqual(uri.params, ref['params'])
+
+ self.assertEqual(uri.relative, ref['relative'])
+
+ def test_dict(self):
+ for test in self.test_uris.values():
+ uri = URI()
+
+ self.assertEqual(uri.scheme, '')
+ self.assertEqual(uri.userinfo, '')
+ self.assertEqual(uri.username, '')
+ self.assertEqual(uri.password, '')
+ self.assertEqual(uri.hostname, '')
+ self.assertEqual(uri.port, None)
+ self.assertEqual(uri.path, '')
+ self.assertEqual(uri.params, {})
+
+
+ uri.scheme = test['scheme']
+ self.assertEqual(uri.scheme, test['scheme'])
+
+ uri.userinfo = test['userinfo']
+ self.assertEqual(uri.userinfo, test['userinfo'])
+ self.assertEqual(uri.username, test['username'])
+ self.assertEqual(uri.password, test['password'])
+
+ # make sure changing the values doesn't do anything unexpected
+ uri.username = 'changeme'
+ self.assertEqual(uri.username, 'changeme')
+ self.assertEqual(uri.password, test['password'])
+ uri.password = 'insecure'
+ self.assertEqual(uri.username, 'changeme')
+ self.assertEqual(uri.password, 'insecure')
+
+ # reset back after our trickery
+ uri.userinfo = test['userinfo']
+ self.assertEqual(uri.userinfo, test['userinfo'])
+ self.assertEqual(uri.username, test['username'])
+ self.assertEqual(uri.password, test['password'])
+
+ uri.hostname = test['hostname']
+ self.assertEqual(uri.hostname, test['hostname'])
+ self.assertEqual(uri.hostport, test['hostname'])
+
+ uri.port = test['port']
+ self.assertEqual(uri.port, test['port'])
+ self.assertEqual(uri.hostport, test['hostport'])
+
+ uri.path = test['path']
+ self.assertEqual(uri.path, test['path'])
+
+ uri.params = test['params']
+ self.assertEqual(uri.params, test['params'])
+
+ uri.query = test['query']
+ self.assertEqual(uri.query, test['query'])
+
+ self.assertEqual(str(uri), test['uri'])
+
+ uri.params = {}
+ self.assertEqual(uri.params, {})
+ self.assertEqual(str(uri), (str(uri).split(";"))[0])
+
+class FetcherTest(unittest.TestCase):
+
+ def setUp(self):
+ self.origdir = os.getcwd()
+ self.d = bb.data.init()
+ self.tempdir = tempfile.mkdtemp()
+ self.dldir = os.path.join(self.tempdir, "download")
+ os.mkdir(self.dldir)
+ self.d.setVar("DL_DIR", self.dldir)
+ self.unpackdir = os.path.join(self.tempdir, "unpacked")
+ os.mkdir(self.unpackdir)
+ persistdir = os.path.join(self.tempdir, "persistdata")
+ self.d.setVar("PERSISTENT_DIR", persistdir)
+
+ def tearDown(self):
+ os.chdir(self.origdir)
+ if os.environ.get("BB_TMPDIR_NOCLEAN") == "yes":
+ print("Not cleaning up %s. Please remove manually." % self.tempdir)
+ else:
+ bb.utils.prunedir(self.tempdir)
+
+class MirrorUriTest(FetcherTest):
+
+ replaceuris = {
+ ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "http://somewhere.org/somedir/")
+ : "http://somewhere.org/somedir/git2_git.invalid.infradead.org.mtd-utils.git.tar.gz",
+ ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/somedir/\\2;protocol=http")
+ : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
+ ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/somedir/\\2;protocol=http")
+ : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
+ ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/\\2;protocol=http")
+ : "git://somewhere.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
+ ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890", "git://someserver.org/bitbake", "git://git.openembedded.org/bitbake")
+ : "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890",
+ ("file://sstate-xyz.tgz", "file://.*", "file:///somewhere/1234/sstate-cache")
+ : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
+ ("file://sstate-xyz.tgz", "file://.*", "file:///somewhere/1234/sstate-cache/")
+ : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
+ ("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org/somedir3")
+ : "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz",
+ ("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz")
+ : "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz",
+ ("http://www.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", "http://www.apache.org/dist", "http://archive.apache.org/dist")
+ : "http://archive.apache.org/dist/subversion/subversion-1.7.1.tar.bz2",
+ ("http://www.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", "http://.*/.*", "file:///somepath/downloads/")
+ : "file:///somepath/downloads/subversion-1.7.1.tar.bz2",
+ ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/BASENAME;protocol=http")
+ : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
+ ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/BASENAME;protocol=http")
+ : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
+ ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/MIRRORNAME;protocol=http")
+ : "git://somewhere.org/somedir/git.invalid.infradead.org.foo.mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
+
+ #Renaming files doesn't work
+ #("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz"
+ #("file://sstate-xyz.tgz", "file://.*/.*", "file:///somewhere/1234/sstate-cache") : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
+ }
+
+ mirrorvar = "http://.*/.* file:///somepath/downloads/ \n" \
+ "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n" \
+ "https://.*/.* file:///someotherpath/downloads/ \n" \
+ "http://.*/.* file:///someotherpath/downloads/ \n"
+
+ def test_urireplace(self):
+ for k, v in self.replaceuris.items():
+ ud = bb.fetch.FetchData(k[0], self.d)
+ ud.setup_localpath(self.d)
+ mirrors = bb.fetch2.mirror_from_string("%s %s" % (k[1], k[2]))
+ newuris, uds = bb.fetch2.build_mirroruris(ud, mirrors, self.d)
+ self.assertEqual([v], newuris)
+
+ def test_urilist1(self):
+ fetcher = bb.fetch.FetchData("http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d)
+ mirrors = bb.fetch2.mirror_from_string(self.mirrorvar)
+ uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
+ self.assertEqual(uris, ['file:///somepath/downloads/bitbake-1.0.tar.gz', 'file:///someotherpath/downloads/bitbake-1.0.tar.gz'])
+
+ def test_urilist2(self):
+ # Catch https:// -> files:// bug
+ fetcher = bb.fetch.FetchData("https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d)
+ mirrors = bb.fetch2.mirror_from_string(self.mirrorvar)
+ uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
+ self.assertEqual(uris, ['file:///someotherpath/downloads/bitbake-1.0.tar.gz'])
+
+ def test_mirror_of_mirror(self):
+ # Test if mirror of a mirror works
+ mirrorvar = self.mirrorvar + " http://.*/.* http://otherdownloads.yoctoproject.org/downloads/ \n"
+ mirrorvar = mirrorvar + " http://otherdownloads.yoctoproject.org/.* http://downloads2.yoctoproject.org/downloads/ \n"
+ fetcher = bb.fetch.FetchData("http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d)
+ mirrors = bb.fetch2.mirror_from_string(mirrorvar)
+ uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
+ self.assertEqual(uris, ['file:///somepath/downloads/bitbake-1.0.tar.gz',
+ 'file:///someotherpath/downloads/bitbake-1.0.tar.gz',
+ 'http://otherdownloads.yoctoproject.org/downloads/bitbake-1.0.tar.gz',
+ 'http://downloads2.yoctoproject.org/downloads/bitbake-1.0.tar.gz'])
+
+ recmirrorvar = "https://.*/[^/]* http://AAAA/A/A/A/ \n" \
+ "https://.*/[^/]* https://BBBB/B/B/B/ \n"
+
+ def test_recursive(self):
+ fetcher = bb.fetch.FetchData("https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d)
+ mirrors = bb.fetch2.mirror_from_string(self.recmirrorvar)
+ uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
+ self.assertEqual(uris, ['http://AAAA/A/A/A/bitbake/bitbake-1.0.tar.gz',
+ 'https://BBBB/B/B/B/bitbake/bitbake-1.0.tar.gz',
+ 'http://AAAA/A/A/A/B/B/bitbake/bitbake-1.0.tar.gz'])
+
+class FetcherLocalTest(FetcherTest):
+ def setUp(self):
+ def touch(fn):
+ with open(fn, 'a'):
+ os.utime(fn, None)
+
+ super(FetcherLocalTest, self).setUp()
+ self.localsrcdir = os.path.join(self.tempdir, 'localsrc')
+ os.makedirs(self.localsrcdir)
+ touch(os.path.join(self.localsrcdir, 'a'))
+ touch(os.path.join(self.localsrcdir, 'b'))
+ os.makedirs(os.path.join(self.localsrcdir, 'dir'))
+ touch(os.path.join(self.localsrcdir, 'dir', 'c'))
+ touch(os.path.join(self.localsrcdir, 'dir', 'd'))
+ os.makedirs(os.path.join(self.localsrcdir, 'dir', 'subdir'))
+ touch(os.path.join(self.localsrcdir, 'dir', 'subdir', 'e'))
+ self.d.setVar("FILESPATH", self.localsrcdir)
+
+ def fetchUnpack(self, uris):
+ fetcher = bb.fetch.Fetch(uris, self.d)
+ fetcher.download()
+ fetcher.unpack(self.unpackdir)
+ flst = []
+ for root, dirs, files in os.walk(self.unpackdir):
+ for f in files:
+ flst.append(os.path.relpath(os.path.join(root, f), self.unpackdir))
+ flst.sort()
+ return flst
+
+ def test_local(self):
+ tree = self.fetchUnpack(['file://a', 'file://dir/c'])
+ self.assertEqual(tree, ['a', 'dir/c'])
+
+ def test_local_wildcard(self):
+ tree = self.fetchUnpack(['file://a', 'file://dir/*'])
+ self.assertEqual(tree, ['a', 'dir/c', 'dir/d', 'dir/subdir/e'])
+
+ def test_local_dir(self):
+ tree = self.fetchUnpack(['file://a', 'file://dir'])
+ self.assertEqual(tree, ['a', 'dir/c', 'dir/d', 'dir/subdir/e'])
+
+ def test_local_subdir(self):
+ tree = self.fetchUnpack(['file://dir/subdir'])
+ self.assertEqual(tree, ['dir/subdir/e'])
+
+ def test_local_subdir_file(self):
+ tree = self.fetchUnpack(['file://dir/subdir/e'])
+ self.assertEqual(tree, ['dir/subdir/e'])
+
+ def test_local_subdirparam(self):
+ tree = self.fetchUnpack(['file://a;subdir=bar', 'file://dir;subdir=foo/moo'])
+ self.assertEqual(tree, ['bar/a', 'foo/moo/dir/c', 'foo/moo/dir/d', 'foo/moo/dir/subdir/e'])
+
+ def test_local_deepsubdirparam(self):
+ tree = self.fetchUnpack(['file://dir/subdir/e;subdir=bar'])
+ self.assertEqual(tree, ['bar/dir/subdir/e'])
+
+ def test_local_absolutedir(self):
+ # Unpacking to an absolute path that is a subdirectory of the root
+ # should work
+ tree = self.fetchUnpack(['file://a;subdir=%s' % os.path.join(self.unpackdir, 'bar')])
+
+ # Unpacking to an absolute path outside of the root should fail
+ with self.assertRaises(bb.fetch2.UnpackError):
+ self.fetchUnpack(['file://a;subdir=/bin/sh'])
+
+class FetcherNoNetworkTest(FetcherTest):
+ def setUp(self):
+ super().setUp()
+ # all test cases are based on not having network
+ self.d.setVar("BB_NO_NETWORK", "1")
+
+ def test_missing(self):
+ string = "this is a test file\n".encode("utf-8")
+ self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest())
+ self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest())
+
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ with self.assertRaises(bb.fetch2.NetworkAccess):
+ fetcher.download()
+
+ def test_valid_missing_donestamp(self):
+ # create the file in the download directory with correct hash
+ string = "this is a test file\n".encode("utf-8")
+ with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb") as f:
+ f.write(string)
+
+ self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest())
+ self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest())
+
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ fetcher.download()
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+
+ def test_invalid_missing_donestamp(self):
+ # create an invalid file in the download directory with incorrect hash
+ string = "this is a test file\n".encode("utf-8")
+ with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"):
+ pass
+
+ self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest())
+ self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest())
+
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ with self.assertRaises(bb.fetch2.NetworkAccess):
+ fetcher.download()
+ # the existing file should not exist or should have be moved to "bad-checksum"
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+
+ def test_nochecksums_missing(self):
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ # ssh fetch does not support checksums
+ fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ # attempts to download with missing donestamp
+ with self.assertRaises(bb.fetch2.NetworkAccess):
+ fetcher.download()
+
+ def test_nochecksums_missing_donestamp(self):
+ # create a file in the download directory
+ with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"):
+ pass
+
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ # ssh fetch does not support checksums
+ fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ # attempts to download with missing donestamp
+ with self.assertRaises(bb.fetch2.NetworkAccess):
+ fetcher.download()
+
+ def test_nochecksums_has_donestamp(self):
+ # create a file in the download directory with the donestamp
+ with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"):
+ pass
+ with open(os.path.join(self.dldir, "test-file.tar.gz.done"), "wb"):
+ pass
+
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ # ssh fetch does not support checksums
+ fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ # should not fetch
+ fetcher.download()
+ # both files should still exist
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+
+ def test_nochecksums_missing_has_donestamp(self):
+ # create a file in the download directory with the donestamp
+ with open(os.path.join(self.dldir, "test-file.tar.gz.done"), "wb"):
+ pass
+
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+ # ssh fetch does not support checksums
+ fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
+ with self.assertRaises(bb.fetch2.NetworkAccess):
+ fetcher.download()
+ # both files should still exist
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
+ self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
+
+class FetcherNetworkTest(FetcherTest):
+ @skipIfNoNetwork()
+ def test_fetch(self):
+ fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
+ fetcher.download()
+ self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+ self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892)
+ self.d.setVar("BB_NO_NETWORK", "1")
+ fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
+ fetcher.download()
+ fetcher.unpack(self.unpackdir)
+ self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9)
+ self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9)
+
+ @skipIfNoNetwork()
+ def test_fetch_mirror(self):
+ self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+ fetcher.download()
+ self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+ @skipIfNoNetwork()
+ def test_fetch_mirror_of_mirror(self):
+ self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* http://downloads.yoctoproject.org/releases/bitbake")
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+ fetcher.download()
+ self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+ @skipIfNoNetwork()
+ def test_fetch_file_mirror_of_mirror(self):
+ self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* http://downloads.yoctoproject.org/releases/bitbake")
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+ os.mkdir(self.dldir + "/some2where")
+ fetcher.download()
+ self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+ @skipIfNoNetwork()
+ def test_fetch_premirror(self):
+ self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
+ fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+ fetcher.download()
+ self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+ @skipIfNoNetwork()
+ def gitfetcher(self, url1, url2):
+ def checkrevision(self, fetcher):
+ fetcher.unpack(self.unpackdir)
+ revision = bb.process.run("git rev-parse HEAD", shell=True, cwd=self.unpackdir + "/git")[0].strip()
+ self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
+
+ self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1")
+ self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
+ fetcher = bb.fetch.Fetch([url1], self.d)
+ fetcher.download()
+ checkrevision(self, fetcher)
+ # Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works
+ bb.utils.prunedir(self.dldir + "/git2/")
+ bb.utils.prunedir(self.unpackdir)
+ self.d.setVar("BB_NO_NETWORK", "1")
+ fetcher = bb.fetch.Fetch([url2], self.d)
+ fetcher.download()
+ checkrevision(self, fetcher)
+
+ @skipIfNoNetwork()
+ def test_gitfetch(self):
+ url1 = url2 = "git://git.openembedded.org/bitbake"
+ self.gitfetcher(url1, url2)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_goodsrcrev(self):
+ # SRCREV is set but matches rev= parameter
+ url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
+ self.gitfetcher(url1, url2)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_badsrcrev(self):
+ # SRCREV is set but does not match rev= parameter
+ url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5"
+ self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_tagandrev(self):
+ # SRCREV is set but does not match rev= parameter
+ url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
+ self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_localusehead(self):
+ # Create dummy local Git repo
+ src_dir = tempfile.mkdtemp(dir=self.tempdir,
+ prefix='gitfetch_localusehead_')
+ src_dir = os.path.abspath(src_dir)
+ bb.process.run("git init", cwd=src_dir)
+ bb.process.run("git commit --allow-empty -m'Dummy commit'",
+ cwd=src_dir)
+ # Use other branch than master
+ bb.process.run("git checkout -b my-devel", cwd=src_dir)
+ bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
+ cwd=src_dir)
+ stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
+ orig_rev = stdout[0].strip()
+
+ # Fetch and check revision
+ self.d.setVar("SRCREV", "AUTOINC")
+ url = "git://" + src_dir + ";protocol=file;usehead=1"
+ fetcher = bb.fetch.Fetch([url], self.d)
+ fetcher.download()
+ fetcher.unpack(self.unpackdir)
+ stdout = bb.process.run("git rev-parse HEAD",
+ cwd=os.path.join(self.unpackdir, 'git'))
+ unpack_rev = stdout[0].strip()
+ self.assertEqual(orig_rev, unpack_rev)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_remoteusehead(self):
+ url = "git://git.openembedded.org/bitbake;usehead=1"
+ self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_premirror(self):
+ url1 = "git://git.openembedded.org/bitbake"
+ url2 = "git://someserver.org/bitbake"
+ self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
+ self.gitfetcher(url1, url2)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_premirror2(self):
+ url1 = url2 = "git://someserver.org/bitbake"
+ self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
+ self.gitfetcher(url1, url2)
+
+ @skipIfNoNetwork()
+ def test_gitfetch_premirror3(self):
+ realurl = "git://git.openembedded.org/bitbake"
+ dummyurl = "git://someserver.org/bitbake"
+ self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
+ os.chdir(self.tempdir)
+ bb.process.run("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True)
+ self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir))
+ self.gitfetcher(dummyurl, dummyurl)
+
+ @skipIfNoNetwork()
+ def test_git_submodule(self):
+ fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d)
+ fetcher.download()
+ # Previous cwd has been deleted
+ os.chdir(os.path.dirname(self.unpackdir))
+ fetcher.unpack(self.unpackdir)
+
+
+class TrustedNetworksTest(FetcherTest):
+ def test_trusted_network(self):
+ # Ensure trusted_network returns False when the host IS in the list.
+ url = "git://Someserver.org/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org someserver.org server2.org server3.org")
+ self.assertTrue(bb.fetch.trusted_network(self.d, url))
+
+ def test_wild_trusted_network(self):
+ # Ensure trusted_network returns true when the *.host IS in the list.
+ url = "git://Someserver.org/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org *.someserver.org server2.org server3.org")
+ self.assertTrue(bb.fetch.trusted_network(self.d, url))
+
+ def test_prefix_wild_trusted_network(self):
+ # Ensure trusted_network returns true when the prefix matches *.host.
+ url = "git://git.Someserver.org/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org *.someserver.org server2.org server3.org")
+ self.assertTrue(bb.fetch.trusted_network(self.d, url))
+
+ def test_two_prefix_wild_trusted_network(self):
+ # Ensure trusted_network returns true when the prefix matches *.host.
+ url = "git://something.git.Someserver.org/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org *.someserver.org server2.org server3.org")
+ self.assertTrue(bb.fetch.trusted_network(self.d, url))
+
+ def test_port_trusted_network(self):
+ # Ensure trusted_network returns True, even if the url specifies a port.
+ url = "git://someserver.org:8080/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "someserver.org")
+ self.assertTrue(bb.fetch.trusted_network(self.d, url))
+
+ def test_untrusted_network(self):
+ # Ensure trusted_network returns False when the host is NOT in the list.
+ url = "git://someserver.org/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org server2.org server3.org")
+ self.assertFalse(bb.fetch.trusted_network(self.d, url))
+
+ def test_wild_untrusted_network(self):
+ # Ensure trusted_network returns False when the host is NOT in the list.
+ url = "git://*.someserver.org/foo;rev=1"
+ self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org server2.org server3.org")
+ self.assertFalse(bb.fetch.trusted_network(self.d, url))
+
+class URLHandle(unittest.TestCase):
+
+ datatable = {
+ "http://www.google.com/index.html" : ('http', 'www.google.com', '/index.html', '', '', {}),
+ "cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}),
+ "cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', collections.OrderedDict([('tag', 'V0-99-81'), ('module', 'familiar/dist/ipkg')])),
+ "git://git.openembedded.org/bitbake;branch=@foo" : ('git', 'git.openembedded.org', '/bitbake', '', '', {'branch': '@foo'}),
+ "file://somelocation;someparam=1": ('file', '', 'somelocation', '', '', {'someparam': '1'}),
+ }
+ # we require a pathname to encodeurl but users can still pass such urls to
+ # decodeurl and we need to handle them
+ decodedata = datatable.copy()
+ decodedata.update({
+ "http://somesite.net;someparam=1": ('http', 'somesite.net', '', '', '', {'someparam': '1'}),
+ })
+
+ def test_decodeurl(self):
+ for k, v in self.decodedata.items():
+ result = bb.fetch.decodeurl(k)
+ self.assertEqual(result, v)
+
+ def test_encodeurl(self):
+ for k, v in self.datatable.items():
+ result = bb.fetch.encodeurl(v)
+ self.assertEqual(result, k)
+
+class FetchLatestVersionTest(FetcherTest):
+
+ test_git_uris = {
+ # version pattern "X.Y.Z"
+ ("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
+ : "1.99.4",
+ # version pattern "vX.Y"
+ ("mtd-utils", "git://git.infradead.org/mtd-utils.git", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "")
+ : "1.5.0",
+ # version pattern "pkg_name-X.Y"
+ ("presentproto", "git://anongit.freedesktop.org/git/xorg/proto/presentproto", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "")
+ : "1.0",
+ # version pattern "pkg_name-vX.Y.Z"
+ ("dtc", "git://git.qemu.org/dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
+ : "1.4.0",
+ # combination version pattern
+ ("sysprof", "git://git.gnome.org/sysprof", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
+ : "1.2.0",
+ ("u-boot-mkimage", "git://git.denx.de/u-boot.git;branch=master;protocol=git", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "")
+ : "2014.01",
+ # version pattern "yyyymmdd"
+ ("mobile-broadband-provider-info", "git://git.gnome.org/mobile-broadband-provider-info", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
+ : "20120614",
+ # packages with a valid UPSTREAM_CHECK_GITTAGREGEX
+ ("xf86-video-omap", "git://anongit.freedesktop.org/xorg/driver/xf86-video-omap", "ae0394e687f1a77e966cf72f895da91840dffb8f", "(?P<pver>(\d+\.(\d\.?)*))")
+ : "0.4.3",
+ ("build-appliance-image", "git://git.yoctoproject.org/poky", "b37dd451a52622d5b570183a81583cc34c2ff555", "(?P<pver>(([0-9][\.|_]?)+[0-9]))")
+ : "11.0.0",
+ ("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot", "cd437ecbd8986c894442f8fce1e0061e20f04dee", "chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
+ : "1.3.59",
+ ("remake", "git://github.com/rocky/remake.git", "f05508e521987c8494c92d9c2871aec46307d51d", "(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
+ : "3.82+dbg0.9",
+ }
+
+ test_wget_uris = {
+ # packages with versions inside directory name
+ ("util-linux", "http://kernel.org/pub/linux/utils/util-linux/v2.23/util-linux-2.24.2.tar.bz2", "", "")
+ : "2.24.2",
+ ("enchant", "http://www.abisource.com/downloads/enchant/1.6.0/enchant-1.6.0.tar.gz", "", "")
+ : "1.6.0",
+ ("cmake", "http://www.cmake.org/files/v2.8/cmake-2.8.12.1.tar.gz", "", "")
+ : "2.8.12.1",
+ # packages with versions only in current directory
+ ("eglic", "http://downloads.yoctoproject.org/releases/eglibc/eglibc-2.18-svnr23787.tar.bz2", "", "")
+ : "2.19",
+ ("gnu-config", "http://downloads.yoctoproject.org/releases/gnu-config/gnu-config-20120814.tar.bz2", "", "")
+ : "20120814",
+ # packages with "99" in the name of possible version
+ ("pulseaudio", "http://freedesktop.org/software/pulseaudio/releases/pulseaudio-4.0.tar.xz", "", "")
+ : "5.0",
+ ("xserver-xorg", "http://xorg.freedesktop.org/releases/individual/xserver/xorg-server-1.15.1.tar.bz2", "", "")
+ : "1.15.1",
+ # packages with valid UPSTREAM_CHECK_URI and UPSTREAM_CHECK_REGEX
+ ("cups", "http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2", "https://github.com/apple/cups/releases", "(?P<name>cups\-)(?P<pver>((\d+[\.\-_]*)+))\-source\.tar\.gz")
+ : "2.0.0",
+ ("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://www.oracle.com/technetwork/products/berkeleydb/downloads/index-082944.html", "http://download.oracle.com/otn/berkeley-db/(?P<name>db-)(?P<pver>((\d+[\.\-_]*)+))\.tar\.gz")
+ : "6.1.19",
+ }
+
+ @skipIfNoNetwork()
+ def test_git_latest_versionstring(self):
+ for k, v in self.test_git_uris.items():
+ self.d.setVar("PN", k[0])
+ self.d.setVar("SRCREV", k[2])
+ self.d.setVar("UPSTREAM_CHECK_GITTAGREGEX", k[3])
+ ud = bb.fetch2.FetchData(k[1], self.d)
+ pupver= ud.method.latest_versionstring(ud, self.d)
+ verstring = pupver[0]
+ self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
+ r = bb.utils.vercmp_string(v, verstring)
+ self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
+
+ @skipIfNoNetwork()
+ def test_wget_latest_versionstring(self):
+ for k, v in self.test_wget_uris.items():
+ self.d.setVar("PN", k[0])
+ self.d.setVar("UPSTREAM_CHECK_URI", k[2])
+ self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
+ ud = bb.fetch2.FetchData(k[1], self.d)
+ pupver = ud.method.latest_versionstring(ud, self.d)
+ verstring = pupver[0]
+ self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
+ r = bb.utils.vercmp_string(v, verstring)
+ self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
+
+
+class FetchCheckStatusTest(FetcherTest):
+ test_wget_uris = ["http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2",
+ "http://www.cups.org/",
+ "http://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz",
+ "http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
+ "http://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz",
+ "https://yoctoproject.org/",
+ "https://yoctoproject.org/documentation",
+ "http://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz",
+ "http://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz",
+ "ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz",
+ "http://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz",
+ "https://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz",
+ "https://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz",
+ # GitHub releases are hosted on Amazon S3, which doesn't support HEAD
+ "https://github.com/kergoth/tslib/releases/download/1.1/tslib-1.1.tar.xz"
+ ]
+
+ @skipIfNoNetwork()
+ def test_wget_checkstatus(self):
+ fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d)
+ for u in self.test_wget_uris:
+ with self.subTest(url=u):
+ ud = fetch.ud[u]
+ m = ud.method
+ ret = m.checkstatus(fetch, ud, self.d)
+ self.assertTrue(ret, msg="URI %s, can't check status" % (u))
+
+ @skipIfNoNetwork()
+ def test_wget_checkstatus_connection_cache(self):
+ from bb.fetch2 import FetchConnectionCache
+
+ connection_cache = FetchConnectionCache()
+ fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d,
+ connection_cache = connection_cache)
+
+ for u in self.test_wget_uris:
+ with self.subTest(url=u):
+ ud = fetch.ud[u]
+ m = ud.method
+ ret = m.checkstatus(fetch, ud, self.d)
+ self.assertTrue(ret, msg="URI %s, can't check status" % (u))
+
+ connection_cache.close_connections()
+
+
+class GitMakeShallowTest(FetcherTest):
+ def setUp(self):
+ FetcherTest.setUp(self)
+ self.gitdir = os.path.join(self.tempdir, 'gitshallow')
+ bb.utils.mkdirhier(self.gitdir)
+ bb.process.run('git init', cwd=self.gitdir)
+
+ def assertRefs(self, expected_refs):
+ actual_refs = self.git(['for-each-ref', '--format=%(refname)']).splitlines()
+ full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs).splitlines()
+ self.assertEqual(sorted(full_expected), sorted(actual_refs))
+
+ def assertRevCount(self, expected_count, args=None):
+ if args is None:
+ args = ['HEAD']
+ revs = self.git(['rev-list'] + args)
+ actual_count = len(revs.splitlines())
+ self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count))
+
+ def git(self, cmd):
+ if isinstance(cmd, str):
+ cmd = 'git ' + cmd
+ else:
+ cmd = ['git'] + cmd
+ return bb.process.run(cmd, cwd=self.gitdir)[0]
+
+ def make_shallow(self, args=None):
+ if args is None:
+ args = ['HEAD']
+ return bb.process.run([bb.fetch2.git.Git.make_shallow_path] + args, cwd=self.gitdir)
+
+ def add_empty_file(self, path, msg=None):
+ if msg is None:
+ msg = path
+ open(os.path.join(self.gitdir, path), 'w').close()
+ self.git(['add', path])
+ self.git(['commit', '-m', msg, path])
+
+ def test_make_shallow_single_branch_no_merge(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2)
+ self.make_shallow()
+ self.assertRevCount(1)
+
+ def test_make_shallow_single_branch_one_merge(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('checkout -b a_branch')
+ self.add_empty_file('c')
+ self.git('checkout master')
+ self.add_empty_file('d')
+ self.git('merge --no-ff --no-edit a_branch')
+ self.git('branch -d a_branch')
+ self.add_empty_file('e')
+ self.assertRevCount(6)
+ self.make_shallow(['HEAD~2'])
+ self.assertRevCount(5)
+
+ def test_make_shallow_at_merge(self):
+ self.add_empty_file('a')
+ self.git('checkout -b a_branch')
+ self.add_empty_file('b')
+ self.git('checkout master')
+ self.git('merge --no-ff --no-edit a_branch')
+ self.git('branch -d a_branch')
+ self.assertRevCount(3)
+ self.make_shallow()
+ self.assertRevCount(1)
+
+ def test_make_shallow_annotated_tag(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('tag -a -m a_tag a_tag')
+ self.assertRevCount(2)
+ self.make_shallow(['a_tag'])
+ self.assertRevCount(1)
+
+ def test_make_shallow_multi_ref(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('checkout -b a_branch')
+ self.add_empty_file('c')
+ self.git('checkout master')
+ self.add_empty_file('d')
+ self.git('checkout -b a_branch_2')
+ self.add_empty_file('a_tag')
+ self.git('tag a_tag')
+ self.git('checkout master')
+ self.git('branch -D a_branch_2')
+ self.add_empty_file('e')
+ self.assertRevCount(6, ['--all'])
+ self.make_shallow()
+ self.assertRevCount(5, ['--all'])
+
+ def test_make_shallow_multi_ref_trim(self):
+ self.add_empty_file('a')
+ self.git('checkout -b a_branch')
+ self.add_empty_file('c')
+ self.git('checkout master')
+ self.assertRevCount(1)
+ self.assertRevCount(2, ['--all'])
+ self.assertRefs(['master', 'a_branch'])
+ self.make_shallow(['-r', 'master', 'HEAD'])
+ self.assertRevCount(1, ['--all'])
+ self.assertRefs(['master'])
+
+ def test_make_shallow_noop(self):
+ self.add_empty_file('a')
+ self.assertRevCount(1)
+ self.make_shallow()
+ self.assertRevCount(1)
+
+ @skipIfNoNetwork()
+ def test_make_shallow_bitbake(self):
+ self.git('remote add origin https://github.com/openembedded/bitbake')
+ self.git('fetch --tags origin')
+ orig_revs = len(self.git('rev-list --all').splitlines())
+ self.make_shallow(['refs/tags/1.10.0'])
+ self.assertRevCount(orig_revs - 1746, ['--all'])
+
+class GitShallowTest(FetcherTest):
+ def setUp(self):
+ FetcherTest.setUp(self)
+ self.gitdir = os.path.join(self.tempdir, 'git')
+ self.srcdir = os.path.join(self.tempdir, 'gitsource')
+
+ bb.utils.mkdirhier(self.srcdir)
+ self.git('init', cwd=self.srcdir)
+ self.d.setVar('WORKDIR', self.tempdir)
+ self.d.setVar('S', self.gitdir)
+ self.d.delVar('PREMIRRORS')
+ self.d.delVar('MIRRORS')
+
+ uri = 'git://%s;protocol=file;subdir=${S}' % self.srcdir
+ self.d.setVar('SRC_URI', uri)
+ self.d.setVar('SRCREV', '${AUTOREV}')
+ self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}')
+
+ self.d.setVar('BB_GIT_SHALLOW', '1')
+ self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '0')
+ self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
+
+ def assertRefs(self, expected_refs, cwd=None):
+ if cwd is None:
+ cwd = self.gitdir
+ actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines()
+ full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines()
+ self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs)))
+
+ def assertRevCount(self, expected_count, args=None, cwd=None):
+ if args is None:
+ args = ['HEAD']
+ if cwd is None:
+ cwd = self.gitdir
+ revs = self.git(['rev-list'] + args, cwd=cwd)
+ actual_count = len(revs.splitlines())
+ self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count))
+
+ def git(self, cmd, cwd=None):
+ if isinstance(cmd, str):
+ cmd = 'git ' + cmd
+ else:
+ cmd = ['git'] + cmd
+ if cwd is None:
+ cwd = self.gitdir
+ return bb.process.run(cmd, cwd=cwd)[0]
+
+ def add_empty_file(self, path, cwd=None, msg=None):
+ if msg is None:
+ msg = path
+ if cwd is None:
+ cwd = self.srcdir
+ open(os.path.join(cwd, path), 'w').close()
+ self.git(['add', path], cwd)
+ self.git(['commit', '-m', msg, path], cwd)
+
+ def fetch(self, uri=None):
+ if uri is None:
+ uris = self.d.getVar('SRC_URI', True).split()
+ uri = uris[0]
+ d = self.d
+ else:
+ d = self.d.createCopy()
+ d.setVar('SRC_URI', uri)
+ uri = d.expand(uri)
+ uris = [uri]
+
+ fetcher = bb.fetch2.Fetch(uris, d)
+ fetcher.download()
+ ud = fetcher.ud[uri]
+ return fetcher, ud
+
+ def fetch_and_unpack(self, uri=None):
+ fetcher, ud = self.fetch(uri)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+ assert os.path.exists(self.d.getVar('S'))
+ return fetcher, ud
+
+ def fetch_shallow(self, uri=None, disabled=False, keepclone=False):
+ """Fetch a uri, generating a shallow tarball, then unpack using it"""
+ fetcher, ud = self.fetch_and_unpack(uri)
+ assert os.path.exists(ud.clonedir), 'Git clone in DLDIR (%s) does not exist for uri %s' % (ud.clonedir, uri)
+
+ # Confirm that the unpacked repo is unshallow
+ if not disabled:
+ assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0]))
+
+ # fetch and unpack, from the shallow tarball
+ bb.utils.remove(self.gitdir, recurse=True)
+ bb.utils.remove(ud.clonedir, recurse=True)
+
+ # confirm that the unpacked repo is used when no git clone or git
+ # mirror tarball is available
+ fetcher, ud = self.fetch_and_unpack(uri)
+ if not disabled:
+ assert os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is not shallow' % self.gitdir
+ else:
+ assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is shallow' % self.gitdir
+ return fetcher, ud
+
+ def test_shallow_disabled(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW', '0')
+ self.fetch_shallow(disabled=True)
+ self.assertRevCount(2)
+
+ def test_shallow_nobranch(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ srcrev = self.git('rev-parse HEAD', cwd=self.srcdir).strip()
+ self.d.setVar('SRCREV', srcrev)
+ uri = self.d.getVar('SRC_URI', True).split()[0]
+ uri = '%s;nobranch=1;bare=1' % uri
+
+ self.fetch_shallow(uri)
+ self.assertRevCount(1)
+
+ # shallow refs are used to ensure the srcrev sticks around when we
+ # have no other branches referencing it
+ self.assertRefs(['refs/shallow/default'])
+
+ def test_shallow_default_depth_1(self):
+ # Create initial git repo
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.fetch_shallow()
+ self.assertRevCount(1)
+
+ def test_shallow_depth_0_disables(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ self.fetch_shallow(disabled=True)
+ self.assertRevCount(2)
+
+ def test_shallow_depth_default_override(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '2')
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '1')
+ self.fetch_shallow()
+ self.assertRevCount(1)
+
+ def test_shallow_depth_default_override_disable(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.add_empty_file('c')
+ self.assertRevCount(3, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '2')
+ self.fetch_shallow()
+ self.assertRevCount(2)
+
+ def test_current_shallow_out_of_date_clone(self):
+ # Create initial git repo
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.add_empty_file('c')
+ self.assertRevCount(3, cwd=self.srcdir)
+
+ # Clone and generate mirror tarball
+ fetcher, ud = self.fetch()
+
+ # Ensure we have a current mirror tarball, but an out of date clone
+ self.git('update-ref refs/heads/master refs/heads/master~1', cwd=ud.clonedir)
+ self.assertRevCount(2, cwd=ud.clonedir)
+
+ # Fetch and unpack, from the current tarball, not the out of date clone
+ bb.utils.remove(self.gitdir, recurse=True)
+ fetcher, ud = self.fetch()
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+ self.assertRevCount(1)
+
+ def test_shallow_single_branch_no_merge(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.fetch_shallow()
+ self.assertRevCount(1)
+ assert os.path.exists(os.path.join(self.gitdir, 'a'))
+ assert os.path.exists(os.path.join(self.gitdir, 'b'))
+
+ def test_shallow_no_dangling(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.fetch_shallow()
+ self.assertRevCount(1)
+ assert not self.git('fsck --dangling')
+
+ def test_shallow_srcrev_branch_truncation(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ b_commit = self.git('rev-parse HEAD', cwd=self.srcdir).rstrip()
+ self.add_empty_file('c')
+ self.assertRevCount(3, cwd=self.srcdir)
+
+ self.d.setVar('SRCREV', b_commit)
+ self.fetch_shallow()
+
+ # The 'c' commit was removed entirely, and 'a' was removed from history
+ self.assertRevCount(1, ['--all'])
+ self.assertEqual(self.git('rev-parse HEAD').strip(), b_commit)
+ assert os.path.exists(os.path.join(self.gitdir, 'a'))
+ assert os.path.exists(os.path.join(self.gitdir, 'b'))
+ assert not os.path.exists(os.path.join(self.gitdir, 'c'))
+
+ def test_shallow_ref_pruning(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('branch a_branch', cwd=self.srcdir)
+ self.assertRefs(['master', 'a_branch'], cwd=self.srcdir)
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.fetch_shallow()
+
+ self.assertRefs(['master', 'origin/master'])
+ self.assertRevCount(1)
+
+ def test_shallow_submodules(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ smdir = os.path.join(self.tempdir, 'gitsubmodule')
+ bb.utils.mkdirhier(smdir)
+ self.git('init', cwd=smdir)
+ self.add_empty_file('asub', cwd=smdir)
+
+ self.git('submodule init', cwd=self.srcdir)
+ self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
+ self.git('submodule update', cwd=self.srcdir)
+ self.git('commit -m submodule -a', cwd=self.srcdir)
+
+ uri = 'gitsm://%s;protocol=file;subdir=${S}' % self.srcdir
+ fetcher, ud = self.fetch_shallow(uri)
+
+ self.assertRevCount(1)
+ assert './.git/modules/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0]
+ assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule'))
+
+ if any(os.path.exists(os.path.join(p, 'git-annex')) for p in os.environ.get('PATH').split(':')):
+ def test_shallow_annex(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('annex init', cwd=self.srcdir)
+ open(os.path.join(self.srcdir, 'c'), 'w').close()
+ self.git('annex add c', cwd=self.srcdir)
+ self.git('commit -m annex-c -a', cwd=self.srcdir)
+ bb.process.run('chmod u+w -R %s' % os.path.join(self.srcdir, '.git', 'annex'))
+
+ uri = 'gitannex://%s;protocol=file;subdir=${S}' % self.srcdir
+ fetcher, ud = self.fetch_shallow(uri)
+
+ self.assertRevCount(1)
+ assert './.git/annex/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0]
+ assert os.path.exists(os.path.join(self.gitdir, 'c'))
+
+ def test_shallow_multi_one_uri(self):
+ # Create initial git repo
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('checkout -b a_branch', cwd=self.srcdir)
+ self.add_empty_file('c')
+ self.add_empty_file('d')
+ self.git('checkout master', cwd=self.srcdir)
+ self.git('tag v0.0 a_branch', cwd=self.srcdir)
+ self.add_empty_file('e')
+ self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
+ self.add_empty_file('f')
+ self.assertRevCount(7, cwd=self.srcdir)
+
+ uri = self.d.getVar('SRC_URI', True).split()[0]
+ uri = '%s;branch=master,a_branch;name=master,a_branch' % uri
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
+ self.d.setVar('SRCREV_master', '${AUTOREV}')
+ self.d.setVar('SRCREV_a_branch', '${AUTOREV}')
+
+ self.fetch_shallow(uri)
+
+ self.assertRevCount(5)
+ self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
+
+ def test_shallow_multi_one_uri_depths(self):
+ # Create initial git repo
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('checkout -b a_branch', cwd=self.srcdir)
+ self.add_empty_file('c')
+ self.add_empty_file('d')
+ self.git('checkout master', cwd=self.srcdir)
+ self.add_empty_file('e')
+ self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
+ self.add_empty_file('f')
+ self.assertRevCount(7, cwd=self.srcdir)
+
+ uri = self.d.getVar('SRC_URI', True).split()[0]
+ uri = '%s;branch=master,a_branch;name=master,a_branch' % uri
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH_master', '3')
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH_a_branch', '1')
+ self.d.setVar('SRCREV_master', '${AUTOREV}')
+ self.d.setVar('SRCREV_a_branch', '${AUTOREV}')
+
+ self.fetch_shallow(uri)
+
+ self.assertRevCount(4, ['--all'])
+ self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
+
+ def test_shallow_clone_preferred_over_shallow(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ # Fetch once to generate the shallow tarball
+ fetcher, ud = self.fetch()
+ assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0]))
+
+ # Fetch and unpack with both the clonedir and shallow tarball available
+ bb.utils.remove(self.gitdir, recurse=True)
+ fetcher, ud = self.fetch_and_unpack()
+
+ # The unpacked tree should *not* be shallow
+ self.assertRevCount(2)
+ assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow'))
+
+ def test_shallow_mirrors(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ # Fetch once to generate the shallow tarball
+ fetcher, ud = self.fetch()
+ mirrortarball = ud.mirrortarballs[0]
+ assert os.path.exists(os.path.join(self.dldir, mirrortarball))
+
+ # Set up the mirror
+ mirrordir = os.path.join(self.tempdir, 'mirror')
+ bb.utils.mkdirhier(mirrordir)
+ self.d.setVar('PREMIRRORS', 'git://.*/.* file://%s/\n' % mirrordir)
+
+ os.rename(os.path.join(self.dldir, mirrortarball),
+ os.path.join(mirrordir, mirrortarball))
+
+ # Fetch from the mirror
+ bb.utils.remove(self.dldir, recurse=True)
+ bb.utils.remove(self.gitdir, recurse=True)
+ self.fetch_and_unpack()
+ self.assertRevCount(1)
+
+ def test_shallow_invalid_depth(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '-12')
+ with self.assertRaises(bb.fetch2.FetchError):
+ self.fetch()
+
+ def test_shallow_invalid_depth_default(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '-12')
+ with self.assertRaises(bb.fetch2.FetchError):
+ self.fetch()
+
+ def test_shallow_extra_refs(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('branch a_branch', cwd=self.srcdir)
+ self.assertRefs(['master', 'a_branch'], cwd=self.srcdir)
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/a_branch')
+ self.fetch_shallow()
+
+ self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
+ self.assertRevCount(1)
+
+ def test_shallow_extra_refs_wildcard(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('branch a_branch', cwd=self.srcdir)
+ self.git('tag v1.0', cwd=self.srcdir)
+ self.assertRefs(['master', 'a_branch', 'v1.0'], cwd=self.srcdir)
+ self.assertRevCount(2, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*')
+ self.fetch_shallow()
+
+ self.assertRefs(['master', 'origin/master', 'v1.0'])
+ self.assertRevCount(1)
+
+ def test_shallow_missing_extra_refs(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/foo')
+ with self.assertRaises(bb.fetch2.FetchError):
+ self.fetch()
+
+ def test_shallow_missing_extra_refs_wildcard(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*')
+ self.fetch()
+
+ def test_shallow_remove_revs(self):
+ # Create initial git repo
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+ self.git('checkout -b a_branch', cwd=self.srcdir)
+ self.add_empty_file('c')
+ self.add_empty_file('d')
+ self.git('checkout master', cwd=self.srcdir)
+ self.git('tag v0.0 a_branch', cwd=self.srcdir)
+ self.add_empty_file('e')
+ self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
+ self.git('branch -d a_branch', cwd=self.srcdir)
+ self.add_empty_file('f')
+ self.assertRevCount(7, cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
+
+ self.fetch_shallow()
+
+ self.assertRevCount(5)
+
+ def test_shallow_invalid_revs(self):
+ self.add_empty_file('a')
+ self.add_empty_file('b')
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
+
+ with self.assertRaises(bb.fetch2.FetchError):
+ self.fetch()
+
+ @skipIfNoNetwork()
+ def test_bitbake(self):
+ self.git('remote add --mirror=fetch origin git://github.com/openembedded/bitbake', cwd=self.srcdir)
+ self.git('config core.bare true', cwd=self.srcdir)
+ self.git('fetch', cwd=self.srcdir)
+
+ self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+ # Note that the 1.10.0 tag is annotated, so this also tests
+ # reference of an annotated vs unannotated tag
+ self.d.setVar('BB_GIT_SHALLOW_REVS', '1.10.0')
+
+ self.fetch_shallow()
+
+ # Confirm that the history of 1.10.0 was removed
+ orig_revs = len(self.git('rev-list master', cwd=self.srcdir).splitlines())
+ revs = len(self.git('rev-list master').splitlines())
+ self.assertNotEqual(orig_revs, revs)
+ self.assertRefs(['master', 'origin/master'])
+ self.assertRevCount(orig_revs - 1758)
diff --git a/poky/bitbake/lib/bb/tests/parse.py b/poky/bitbake/lib/bb/tests/parse.py
new file mode 100644
index 000000000..8f16ba4f4
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/parse.py
@@ -0,0 +1,185 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Test for lib/bb/parse/
+#
+# Copyright (C) 2015 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import tempfile
+import logging
+import bb
+import os
+
+logger = logging.getLogger('BitBake.TestParse')
+
+import bb.parse
+import bb.data
+import bb.siggen
+
+class ParseTest(unittest.TestCase):
+
+ testfile = """
+A = "1"
+B = "2"
+do_install() {
+ echo "hello"
+}
+
+C = "3"
+"""
+
+ def setUp(self):
+ self.d = bb.data.init()
+ bb.parse.siggen = bb.siggen.init(self.d)
+
+ def parsehelper(self, content, suffix = ".bb"):
+
+ f = tempfile.NamedTemporaryFile(suffix = suffix)
+ f.write(bytes(content, "utf-8"))
+ f.flush()
+ os.chdir(os.path.dirname(f.name))
+ return f
+
+ def test_parse_simple(self):
+ f = self.parsehelper(self.testfile)
+ d = bb.parse.handle(f.name, self.d)['']
+ self.assertEqual(d.getVar("A"), "1")
+ self.assertEqual(d.getVar("B"), "2")
+ self.assertEqual(d.getVar("C"), "3")
+
+ def test_parse_incomplete_function(self):
+ testfileB = self.testfile.replace("}", "")
+ f = self.parsehelper(testfileB)
+ with self.assertRaises(bb.parse.ParseError):
+ d = bb.parse.handle(f.name, self.d)['']
+
+ unsettest = """
+A = "1"
+B = "2"
+B[flag] = "3"
+
+unset A
+unset B[flag]
+"""
+
+ def test_parse_unset(self):
+ f = self.parsehelper(self.unsettest)
+ d = bb.parse.handle(f.name, self.d)['']
+ self.assertEqual(d.getVar("A"), None)
+ self.assertEqual(d.getVarFlag("A","flag"), None)
+ self.assertEqual(d.getVar("B"), "2")
+
+ exporttest = """
+A = "a"
+export B = "b"
+export C
+exportD = "d"
+"""
+
+ def test_parse_exports(self):
+ f = self.parsehelper(self.exporttest)
+ d = bb.parse.handle(f.name, self.d)['']
+ self.assertEqual(d.getVar("A"), "a")
+ self.assertIsNone(d.getVarFlag("A", "export"))
+ self.assertEqual(d.getVar("B"), "b")
+ self.assertEqual(d.getVarFlag("B", "export"), 1)
+ self.assertIsNone(d.getVar("C"))
+ self.assertEqual(d.getVarFlag("C", "export"), 1)
+ self.assertIsNone(d.getVar("D"))
+ self.assertIsNone(d.getVarFlag("D", "export"))
+ self.assertEqual(d.getVar("exportD"), "d")
+ self.assertIsNone(d.getVarFlag("exportD", "export"))
+
+
+ overridetest = """
+RRECOMMENDS_${PN} = "a"
+RRECOMMENDS_${PN}_libc = "b"
+OVERRIDES = "libc:${PN}"
+PN = "gtk+"
+"""
+
+ def test_parse_overrides(self):
+ f = self.parsehelper(self.overridetest)
+ d = bb.parse.handle(f.name, self.d)['']
+ self.assertEqual(d.getVar("RRECOMMENDS"), "b")
+ bb.data.expandKeys(d)
+ self.assertEqual(d.getVar("RRECOMMENDS"), "b")
+ d.setVar("RRECOMMENDS_gtk+", "c")
+ self.assertEqual(d.getVar("RRECOMMENDS"), "c")
+
+ overridetest2 = """
+EXTRA_OECONF = ""
+EXTRA_OECONF_class-target = "b"
+EXTRA_OECONF_append = " c"
+"""
+
+ def test_parse_overrides(self):
+ f = self.parsehelper(self.overridetest2)
+ d = bb.parse.handle(f.name, self.d)['']
+ d.appendVar("EXTRA_OECONF", " d")
+ d.setVar("OVERRIDES", "class-target")
+ self.assertEqual(d.getVar("EXTRA_OECONF"), "b c d")
+
+ overridetest3 = """
+DESCRIPTION = "A"
+DESCRIPTION_${PN}-dev = "${DESCRIPTION} B"
+PN = "bc"
+"""
+
+ def test_parse_combinations(self):
+ f = self.parsehelper(self.overridetest3)
+ d = bb.parse.handle(f.name, self.d)['']
+ bb.data.expandKeys(d)
+ self.assertEqual(d.getVar("DESCRIPTION_bc-dev"), "A B")
+ d.setVar("DESCRIPTION", "E")
+ d.setVar("DESCRIPTION_bc-dev", "C D")
+ d.setVar("OVERRIDES", "bc-dev")
+ self.assertEqual(d.getVar("DESCRIPTION"), "C D")
+
+
+ classextend = """
+VAR_var_override1 = "B"
+EXTRA = ":override1"
+OVERRIDES = "nothing${EXTRA}"
+
+BBCLASSEXTEND = "###CLASS###"
+"""
+ classextend_bbclass = """
+EXTRA = ""
+python () {
+ d.renameVar("VAR_var", "VAR_var2")
+}
+"""
+
+ #
+ # Test based upon a real world data corruption issue. One
+ # data store changing a variable poked through into a different data
+ # store. This test case replicates that issue where the value 'B' would
+ # become unset/disappear.
+ #
+ def test_parse_classextend_contamination(self):
+ cls = self.parsehelper(self.classextend_bbclass, suffix=".bbclass")
+ #clsname = os.path.basename(cls.name).replace(".bbclass", "")
+ self.classextend = self.classextend.replace("###CLASS###", cls.name)
+ f = self.parsehelper(self.classextend)
+ alldata = bb.parse.handle(f.name, self.d)
+ d1 = alldata['']
+ d2 = alldata[cls.name]
+ self.assertEqual(d1.getVar("VAR_var"), "B")
+ self.assertEqual(d2.getVar("VAR_var"), None)
+
diff --git a/poky/bitbake/lib/bb/tests/utils.py b/poky/bitbake/lib/bb/tests/utils.py
new file mode 100644
index 000000000..2f4ccf3c6
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/utils.py
@@ -0,0 +1,603 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Tests for utils.py
+#
+# Copyright (C) 2012 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import bb
+import os
+import tempfile
+import re
+
+class VerCmpString(unittest.TestCase):
+
+ def test_vercmpstring(self):
+ result = bb.utils.vercmp_string('1', '2')
+ self.assertTrue(result < 0)
+ result = bb.utils.vercmp_string('2', '1')
+ self.assertTrue(result > 0)
+ result = bb.utils.vercmp_string('1', '1.0')
+ self.assertTrue(result < 0)
+ result = bb.utils.vercmp_string('1', '1.1')
+ self.assertTrue(result < 0)
+ result = bb.utils.vercmp_string('1.1', '1_p2')
+ self.assertTrue(result < 0)
+ result = bb.utils.vercmp_string('1.0', '1.0+1.1-beta1')
+ self.assertTrue(result < 0)
+ result = bb.utils.vercmp_string('1.1', '1.0+1.1-beta1')
+ self.assertTrue(result > 0)
+
+ def test_explode_dep_versions(self):
+ correctresult = {"foo" : ["= 1.10"]}
+ result = bb.utils.explode_dep_versions2("foo (= 1.10)")
+ self.assertEqual(result, correctresult)
+ result = bb.utils.explode_dep_versions2("foo (=1.10)")
+ self.assertEqual(result, correctresult)
+ result = bb.utils.explode_dep_versions2("foo ( = 1.10)")
+ self.assertEqual(result, correctresult)
+ result = bb.utils.explode_dep_versions2("foo ( =1.10)")
+ self.assertEqual(result, correctresult)
+ result = bb.utils.explode_dep_versions2("foo ( = 1.10 )")
+ self.assertEqual(result, correctresult)
+ result = bb.utils.explode_dep_versions2("foo ( =1.10 )")
+ self.assertEqual(result, correctresult)
+
+ def test_vercmp_string_op(self):
+ compareops = [('1', '1', '=', True),
+ ('1', '1', '==', True),
+ ('1', '1', '!=', False),
+ ('1', '1', '>', False),
+ ('1', '1', '<', False),
+ ('1', '1', '>=', True),
+ ('1', '1', '<=', True),
+ ('1', '0', '=', False),
+ ('1', '0', '==', False),
+ ('1', '0', '!=', True),
+ ('1', '0', '>', True),
+ ('1', '0', '<', False),
+ ('1', '0', '>>', True),
+ ('1', '0', '<<', False),
+ ('1', '0', '>=', True),
+ ('1', '0', '<=', False),
+ ('0', '1', '=', False),
+ ('0', '1', '==', False),
+ ('0', '1', '!=', True),
+ ('0', '1', '>', False),
+ ('0', '1', '<', True),
+ ('0', '1', '>>', False),
+ ('0', '1', '<<', True),
+ ('0', '1', '>=', False),
+ ('0', '1', '<=', True)]
+
+ for arg1, arg2, op, correctresult in compareops:
+ result = bb.utils.vercmp_string_op(arg1, arg2, op)
+ self.assertEqual(result, correctresult, 'vercmp_string_op("%s", "%s", "%s") != %s' % (arg1, arg2, op, correctresult))
+
+ # Check that clearly invalid operator raises an exception
+ self.assertRaises(bb.utils.VersionStringException, bb.utils.vercmp_string_op, '0', '0', '$')
+
+
+class Path(unittest.TestCase):
+ def test_unsafe_delete_path(self):
+ checkitems = [('/', True),
+ ('//', True),
+ ('///', True),
+ (os.getcwd().count(os.sep) * ('..' + os.sep), True),
+ (os.environ.get('HOME', '/home/test'), True),
+ ('/home/someone', True),
+ ('/home/other/', True),
+ ('/home/other/subdir', False),
+ ('', False)]
+ for arg1, correctresult in checkitems:
+ result = bb.utils._check_unsafe_delete_path(arg1)
+ self.assertEqual(result, correctresult, '_check_unsafe_delete_path("%s") != %s' % (arg1, correctresult))
+
+
+class EditMetadataFile(unittest.TestCase):
+ _origfile = """
+# A comment
+HELLO = "oldvalue"
+
+THIS = "that"
+
+# Another comment
+NOCHANGE = "samevalue"
+OTHER = 'anothervalue'
+
+MULTILINE = "a1 \\
+ a2 \\
+ a3"
+
+MULTILINE2 := " \\
+ b1 \\
+ b2 \\
+ b3 \\
+ "
+
+
+MULTILINE3 = " \\
+ c1 \\
+ c2 \\
+ c3 \\
+"
+
+do_functionname() {
+ command1 ${VAL1} ${VAL2}
+ command2 ${VAL3} ${VAL4}
+}
+"""
+ def _testeditfile(self, varvalues, compareto, dummyvars=None):
+ if dummyvars is None:
+ dummyvars = []
+ with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+ tf.write(self._origfile)
+ tf.close()
+ try:
+ varcalls = []
+ def handle_file(varname, origvalue, op, newlines):
+ self.assertIn(varname, varvalues, 'Callback called for variable %s not in the list!' % varname)
+ self.assertNotIn(varname, dummyvars, 'Callback called for variable %s in dummy list!' % varname)
+ varcalls.append(varname)
+ return varvalues[varname]
+
+ bb.utils.edit_metadata_file(tf.name, varvalues.keys(), handle_file)
+ with open(tf.name) as f:
+ modfile = f.readlines()
+ # Ensure the output matches the expected output
+ self.assertEqual(compareto.splitlines(True), modfile)
+ # Ensure the callback function was called for every variable we asked for
+ # (plus allow testing behaviour when a requested variable is not present)
+ self.assertEqual(sorted(varvalues.keys()), sorted(varcalls + dummyvars))
+ finally:
+ os.remove(tf.name)
+
+
+ def test_edit_metadata_file_nochange(self):
+ # Test file doesn't get modified with nothing to do
+ self._testeditfile({}, self._origfile)
+ # Test file doesn't get modified with only dummy variables
+ self._testeditfile({'DUMMY1': ('should_not_set', None, 0, True),
+ 'DUMMY2': ('should_not_set_again', None, 0, True)}, self._origfile, dummyvars=['DUMMY1', 'DUMMY2'])
+ # Test file doesn't get modified with some the same values
+ self._testeditfile({'THIS': ('that', None, 0, True),
+ 'OTHER': ('anothervalue', None, 0, True),
+ 'MULTILINE3': (' c1 c2 c3 ', None, 4, False)}, self._origfile)
+
+ def test_edit_metadata_file_1(self):
+
+ newfile1 = """
+# A comment
+HELLO = "newvalue"
+
+THIS = "that"
+
+# Another comment
+NOCHANGE = "samevalue"
+OTHER = 'anothervalue'
+
+MULTILINE = "a1 \\
+ a2 \\
+ a3"
+
+MULTILINE2 := " \\
+ b1 \\
+ b2 \\
+ b3 \\
+ "
+
+
+MULTILINE3 = " \\
+ c1 \\
+ c2 \\
+ c3 \\
+"
+
+do_functionname() {
+ command1 ${VAL1} ${VAL2}
+ command2 ${VAL3} ${VAL4}
+}
+"""
+ self._testeditfile({'HELLO': ('newvalue', None, 4, True)}, newfile1)
+
+
+ def test_edit_metadata_file_2(self):
+
+ newfile2 = """
+# A comment
+HELLO = "oldvalue"
+
+THIS = "that"
+
+# Another comment
+NOCHANGE = "samevalue"
+OTHER = 'anothervalue'
+
+MULTILINE = " \\
+ d1 \\
+ d2 \\
+ d3 \\
+ "
+
+MULTILINE2 := " \\
+ b1 \\
+ b2 \\
+ b3 \\
+ "
+
+
+MULTILINE3 = "nowsingle"
+
+do_functionname() {
+ command1 ${VAL1} ${VAL2}
+ command2 ${VAL3} ${VAL4}
+}
+"""
+ self._testeditfile({'MULTILINE': (['d1','d2','d3'], None, 4, False),
+ 'MULTILINE3': ('nowsingle', None, 4, True),
+ 'NOTPRESENT': (['a', 'b'], None, 4, False)}, newfile2, dummyvars=['NOTPRESENT'])
+
+
+ def test_edit_metadata_file_3(self):
+
+ newfile3 = """
+# A comment
+HELLO = "oldvalue"
+
+# Another comment
+NOCHANGE = "samevalue"
+OTHER = "yetanothervalue"
+
+MULTILINE = "e1 \\
+ e2 \\
+ e3 \\
+ "
+
+MULTILINE2 := "f1 \\
+\tf2 \\
+\t"
+
+
+MULTILINE3 = " \\
+ c1 \\
+ c2 \\
+ c3 \\
+"
+
+do_functionname() {
+ othercommand_one a b c
+ othercommand_two d e f
+}
+"""
+
+ self._testeditfile({'do_functionname()': (['othercommand_one a b c', 'othercommand_two d e f'], None, 4, False),
+ 'MULTILINE2': (['f1', 'f2'], None, '\t', True),
+ 'MULTILINE': (['e1', 'e2', 'e3'], None, -1, True),
+ 'THIS': (None, None, 0, False),
+ 'OTHER': ('yetanothervalue', None, 0, True)}, newfile3)
+
+
+ def test_edit_metadata_file_4(self):
+
+ newfile4 = """
+# A comment
+HELLO = "oldvalue"
+
+THIS = "that"
+
+# Another comment
+OTHER = 'anothervalue'
+
+MULTILINE = "a1 \\
+ a2 \\
+ a3"
+
+MULTILINE2 := " \\
+ b1 \\
+ b2 \\
+ b3 \\
+ "
+
+
+"""
+
+ self._testeditfile({'NOCHANGE': (None, None, 0, False),
+ 'MULTILINE3': (None, None, 0, False),
+ 'THIS': ('that', None, 0, False),
+ 'do_functionname()': (None, None, 0, False)}, newfile4)
+
+
+ def test_edit_metadata(self):
+ newfile5 = """
+# A comment
+HELLO = "hithere"
+
+# A new comment
+THIS += "that"
+
+# Another comment
+NOCHANGE = "samevalue"
+OTHER = 'anothervalue'
+
+MULTILINE = "a1 \\
+ a2 \\
+ a3"
+
+MULTILINE2 := " \\
+ b1 \\
+ b2 \\
+ b3 \\
+ "
+
+
+MULTILINE3 = " \\
+ c1 \\
+ c2 \\
+ c3 \\
+"
+
+NEWVAR = "value"
+
+do_functionname() {
+ command1 ${VAL1} ${VAL2}
+ command2 ${VAL3} ${VAL4}
+}
+"""
+
+
+ def handle_var(varname, origvalue, op, newlines):
+ if varname == 'THIS':
+ newlines.append('# A new comment\n')
+ elif varname == 'do_functionname()':
+ newlines.append('NEWVAR = "value"\n')
+ newlines.append('\n')
+ valueitem = varvalues.get(varname, None)
+ if valueitem:
+ return valueitem
+ else:
+ return (origvalue, op, 0, True)
+
+ varvalues = {'HELLO': ('hithere', None, 0, True), 'THIS': ('that', '+=', 0, True)}
+ varlist = ['HELLO', 'THIS', 'do_functionname()']
+ (updated, newlines) = bb.utils.edit_metadata(self._origfile.splitlines(True), varlist, handle_var)
+ self.assertTrue(updated, 'List should be updated but isn\'t')
+ self.assertEqual(newlines, newfile5.splitlines(True))
+
+ # Make sure the orig value matches what we expect it to be
+ def test_edit_metadata_origvalue(self):
+ origfile = """
+MULTILINE = " stuff \\
+ morestuff"
+"""
+ expected_value = "stuff morestuff"
+ global value_in_callback
+ value_in_callback = ""
+
+ def handle_var(varname, origvalue, op, newlines):
+ global value_in_callback
+ value_in_callback = origvalue
+ return (origvalue, op, -1, False)
+
+ bb.utils.edit_metadata(origfile.splitlines(True),
+ ['MULTILINE'],
+ handle_var)
+
+ testvalue = re.sub('\s+', ' ', value_in_callback.strip())
+ self.assertEqual(expected_value, testvalue)
+
+class EditBbLayersConf(unittest.TestCase):
+
+ def _test_bblayers_edit(self, before, after, add, remove, notadded, notremoved):
+ with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+ tf.write(before)
+ tf.close()
+ try:
+ actual_notadded, actual_notremoved = bb.utils.edit_bblayers_conf(tf.name, add, remove)
+ with open(tf.name) as f:
+ actual_after = f.readlines()
+ self.assertEqual(after.splitlines(True), actual_after)
+ self.assertEqual(notadded, actual_notadded)
+ self.assertEqual(notremoved, actual_notremoved)
+ finally:
+ os.remove(tf.name)
+
+
+ def test_bblayers_remove(self):
+ before = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ /home/user/path/subpath/layer3 \
+ /home/user/path/layer4 \
+ "
+"""
+ after = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ /home/user/path/layer1 \
+ /home/user/path/subpath/layer3 \
+ /home/user/path/layer4 \
+ "
+"""
+ self._test_bblayers_edit(before, after,
+ None,
+ '/home/user/path/layer2',
+ [],
+ [])
+
+
+ def test_bblayers_add(self):
+ before = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ /home/user/path/subpath/layer3 \
+ /home/user/path/layer4 \
+ "
+"""
+ after = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ /home/user/path/subpath/layer3 \
+ /home/user/path/layer4 \
+ /other/path/to/layer5 \
+ "
+"""
+ self._test_bblayers_edit(before, after,
+ '/other/path/to/layer5/',
+ None,
+ [],
+ [])
+
+
+ def test_bblayers_add_remove(self):
+ before = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ /home/user/path/subpath/layer3 \
+ /home/user/path/layer4 \
+ "
+"""
+ after = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ /home/user/path/layer4 \
+ /other/path/to/layer5 \
+ "
+"""
+ self._test_bblayers_edit(before, after,
+ ['/other/path/to/layer5', '/home/user/path/layer2/'], '/home/user/path/subpath/layer3/',
+ ['/home/user/path/layer2'],
+ [])
+
+
+ def test_bblayers_add_remove_home(self):
+ before = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ ~/path/layer1 \
+ ~/path/layer2 \
+ ~/otherpath/layer3 \
+ ~/path/layer4 \
+ "
+"""
+ after = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS = " \
+ ~/path/layer2 \
+ ~/path/layer4 \
+ ~/path2/layer5 \
+ "
+"""
+ self._test_bblayers_edit(before, after,
+ [os.environ['HOME'] + '/path/layer4', '~/path2/layer5'],
+ [os.environ['HOME'] + '/otherpath/layer3', '~/path/layer1', '~/path/notinlist'],
+ [os.environ['HOME'] + '/path/layer4'],
+ ['~/path/notinlist'])
+
+
+ def test_bblayers_add_remove_plusequals(self):
+ before = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS += " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ "
+"""
+ after = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS += " \
+ /home/user/path/layer2 \
+ /home/user/path/layer3 \
+ "
+"""
+ self._test_bblayers_edit(before, after,
+ '/home/user/path/layer3',
+ '/home/user/path/layer1',
+ [],
+ [])
+
+
+ def test_bblayers_add_remove_plusequals2(self):
+ before = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS += " \
+ /home/user/path/layer1 \
+ /home/user/path/layer2 \
+ /home/user/path/layer3 \
+ "
+BBLAYERS += "/home/user/path/layer4"
+BBLAYERS += "/home/user/path/layer5"
+"""
+ after = r"""
+# A comment
+
+BBPATH = "${TOPDIR}"
+BBFILES ?= ""
+BBLAYERS += " \
+ /home/user/path/layer2 \
+ /home/user/path/layer3 \
+ "
+BBLAYERS += "/home/user/path/layer5"
+BBLAYERS += "/home/user/otherpath/layer6"
+"""
+ self._test_bblayers_edit(before, after,
+ ['/home/user/otherpath/layer6', '/home/user/path/layer3'], ['/home/user/path/layer1', '/home/user/path/layer4', '/home/user/path/layer7'],
+ ['/home/user/path/layer3'],
+ ['/home/user/path/layer7'])
diff --git a/poky/bitbake/lib/bb/tinfoil.py b/poky/bitbake/lib/bb/tinfoil.py
new file mode 100644
index 000000000..368264f39
--- /dev/null
+++ b/poky/bitbake/lib/bb/tinfoil.py
@@ -0,0 +1,900 @@
+# tinfoil: a simple wrapper around cooker for bitbake-based command-line utilities
+#
+# Copyright (C) 2012-2017 Intel Corporation
+# Copyright (C) 2011 Mentor Graphics Corporation
+# Copyright (C) 2006-2012 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import logging
+import os
+import sys
+import atexit
+import re
+from collections import OrderedDict, defaultdict
+
+import bb.cache
+import bb.cooker
+import bb.providers
+import bb.taskdata
+import bb.utils
+import bb.command
+import bb.remotedata
+from bb.cookerdata import CookerConfiguration, ConfigParameters
+from bb.main import setup_bitbake, BitBakeConfigParameters, BBMainException
+import bb.fetch2
+
+
+# We need this in order to shut down the connection to the bitbake server,
+# otherwise the process will never properly exit
+_server_connections = []
+def _terminate_connections():
+ for connection in _server_connections:
+ connection.terminate()
+atexit.register(_terminate_connections)
+
+class TinfoilUIException(Exception):
+ """Exception raised when the UI returns non-zero from its main function"""
+ def __init__(self, returncode):
+ self.returncode = returncode
+ def __repr__(self):
+ return 'UI module main returned %d' % self.returncode
+
+class TinfoilCommandFailed(Exception):
+ """Exception raised when run_command fails"""
+
+class TinfoilDataStoreConnector:
+ """Connector object used to enable access to datastore objects via tinfoil"""
+
+ def __init__(self, tinfoil, dsindex):
+ self.tinfoil = tinfoil
+ self.dsindex = dsindex
+ def getVar(self, name):
+ value = self.tinfoil.run_command('dataStoreConnectorFindVar', self.dsindex, name)
+ overrides = None
+ if isinstance(value, dict):
+ if '_connector_origtype' in value:
+ value['_content'] = self.tinfoil._reconvert_type(value['_content'], value['_connector_origtype'])
+ del value['_connector_origtype']
+ if '_connector_overrides' in value:
+ overrides = value['_connector_overrides']
+ del value['_connector_overrides']
+ return value, overrides
+ def getKeys(self):
+ return set(self.tinfoil.run_command('dataStoreConnectorGetKeys', self.dsindex))
+ def getVarHistory(self, name):
+ return self.tinfoil.run_command('dataStoreConnectorGetVarHistory', self.dsindex, name)
+ def expandPythonRef(self, varname, expr, d):
+ ds = bb.remotedata.RemoteDatastores.transmit_datastore(d)
+ ret = self.tinfoil.run_command('dataStoreConnectorExpandPythonRef', ds, varname, expr)
+ return ret
+ def setVar(self, varname, value):
+ if self.dsindex is None:
+ self.tinfoil.run_command('setVariable', varname, value)
+ else:
+ # Not currently implemented - indicate that setting should
+ # be redirected to local side
+ return True
+ def setVarFlag(self, varname, flagname, value):
+ if self.dsindex is None:
+ self.tinfoil.run_command('dataStoreConnectorSetVarFlag', self.dsindex, varname, flagname, value)
+ else:
+ # Not currently implemented - indicate that setting should
+ # be redirected to local side
+ return True
+ def delVar(self, varname):
+ if self.dsindex is None:
+ self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname)
+ else:
+ # Not currently implemented - indicate that setting should
+ # be redirected to local side
+ return True
+ def delVarFlag(self, varname, flagname):
+ if self.dsindex is None:
+ self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname, flagname)
+ else:
+ # Not currently implemented - indicate that setting should
+ # be redirected to local side
+ return True
+ def renameVar(self, name, newname):
+ if self.dsindex is None:
+ self.tinfoil.run_command('dataStoreConnectorRenameVar', self.dsindex, name, newname)
+ else:
+ # Not currently implemented - indicate that setting should
+ # be redirected to local side
+ return True
+
+class TinfoilCookerAdapter:
+ """
+ Provide an adapter for existing code that expects to access a cooker object via Tinfoil,
+ since now Tinfoil is on the client side it no longer has direct access.
+ """
+
+ class TinfoilCookerCollectionAdapter:
+ """ cooker.collection adapter """
+ def __init__(self, tinfoil):
+ self.tinfoil = tinfoil
+ def get_file_appends(self, fn):
+ return self.tinfoil.get_file_appends(fn)
+ def __getattr__(self, name):
+ if name == 'overlayed':
+ return self.tinfoil.get_overlayed_recipes()
+ elif name == 'bbappends':
+ return self.tinfoil.run_command('getAllAppends')
+ else:
+ raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+
+ class TinfoilRecipeCacheAdapter:
+ """ cooker.recipecache adapter """
+ def __init__(self, tinfoil):
+ self.tinfoil = tinfoil
+ self._cache = {}
+
+ def get_pkg_pn_fn(self):
+ pkg_pn = defaultdict(list, self.tinfoil.run_command('getRecipes') or [])
+ pkg_fn = {}
+ for pn, fnlist in pkg_pn.items():
+ for fn in fnlist:
+ pkg_fn[fn] = pn
+ self._cache['pkg_pn'] = pkg_pn
+ self._cache['pkg_fn'] = pkg_fn
+
+ def __getattr__(self, name):
+ # Grab these only when they are requested since they aren't always used
+ if name in self._cache:
+ return self._cache[name]
+ elif name == 'pkg_pn':
+ self.get_pkg_pn_fn()
+ return self._cache[name]
+ elif name == 'pkg_fn':
+ self.get_pkg_pn_fn()
+ return self._cache[name]
+ elif name == 'deps':
+ attrvalue = defaultdict(list, self.tinfoil.run_command('getRecipeDepends') or [])
+ elif name == 'rundeps':
+ attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeDepends') or [])
+ elif name == 'runrecs':
+ attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeRecommends') or [])
+ elif name == 'pkg_pepvpr':
+ attrvalue = self.tinfoil.run_command('getRecipeVersions') or {}
+ elif name == 'inherits':
+ attrvalue = self.tinfoil.run_command('getRecipeInherits') or {}
+ elif name == 'bbfile_priority':
+ attrvalue = self.tinfoil.run_command('getBbFilePriority') or {}
+ elif name == 'pkg_dp':
+ attrvalue = self.tinfoil.run_command('getDefaultPreference') or {}
+ elif name == 'fn_provides':
+ attrvalue = self.tinfoil.run_command('getRecipeProvides') or {}
+ elif name == 'packages':
+ attrvalue = self.tinfoil.run_command('getRecipePackages') or {}
+ elif name == 'packages_dynamic':
+ attrvalue = self.tinfoil.run_command('getRecipePackagesDynamic') or {}
+ elif name == 'rproviders':
+ attrvalue = self.tinfoil.run_command('getRProviders') or {}
+ else:
+ raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+
+ self._cache[name] = attrvalue
+ return attrvalue
+
+ def __init__(self, tinfoil):
+ self.tinfoil = tinfoil
+ self.collection = self.TinfoilCookerCollectionAdapter(tinfoil)
+ self.recipecaches = {}
+ # FIXME all machines
+ self.recipecaches[''] = self.TinfoilRecipeCacheAdapter(tinfoil)
+ self._cache = {}
+ def __getattr__(self, name):
+ # Grab these only when they are requested since they aren't always used
+ if name in self._cache:
+ return self._cache[name]
+ elif name == 'skiplist':
+ attrvalue = self.tinfoil.get_skipped_recipes()
+ elif name == 'bbfile_config_priorities':
+ ret = self.tinfoil.run_command('getLayerPriorities')
+ bbfile_config_priorities = []
+ for collection, pattern, regex, pri in ret:
+ bbfile_config_priorities.append((collection, pattern, re.compile(regex), pri))
+
+ attrvalue = bbfile_config_priorities
+ else:
+ raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+
+ self._cache[name] = attrvalue
+ return attrvalue
+
+ def findBestProvider(self, pn):
+ return self.tinfoil.find_best_provider(pn)
+
+
+class TinfoilRecipeInfo:
+ """
+ Provides a convenient representation of the cached information for a single recipe.
+ Some attributes are set on construction, others are read on-demand (which internally
+ may result in a remote procedure call to the bitbake server the first time).
+ Note that only information which is cached is available through this object - if
+ you need other variable values you will need to parse the recipe using
+ Tinfoil.parse_recipe().
+ """
+ def __init__(self, recipecache, d, pn, fn, fns):
+ self._recipecache = recipecache
+ self._d = d
+ self.pn = pn
+ self.fn = fn
+ self.fns = fns
+ self.inherit_files = recipecache.inherits[fn]
+ self.depends = recipecache.deps[fn]
+ (self.pe, self.pv, self.pr) = recipecache.pkg_pepvpr[fn]
+ self._cached_packages = None
+ self._cached_rprovides = None
+ self._cached_packages_dynamic = None
+
+ def __getattr__(self, name):
+ if name == 'alternates':
+ return [x for x in self.fns if x != self.fn]
+ elif name == 'rdepends':
+ return self._recipecache.rundeps[self.fn]
+ elif name == 'rrecommends':
+ return self._recipecache.runrecs[self.fn]
+ elif name == 'provides':
+ return self._recipecache.fn_provides[self.fn]
+ elif name == 'packages':
+ if self._cached_packages is None:
+ self._cached_packages = []
+ for pkg, fns in self._recipecache.packages.items():
+ if self.fn in fns:
+ self._cached_packages.append(pkg)
+ return self._cached_packages
+ elif name == 'packages_dynamic':
+ if self._cached_packages_dynamic is None:
+ self._cached_packages_dynamic = []
+ for pkg, fns in self._recipecache.packages_dynamic.items():
+ if self.fn in fns:
+ self._cached_packages_dynamic.append(pkg)
+ return self._cached_packages_dynamic
+ elif name == 'rprovides':
+ if self._cached_rprovides is None:
+ self._cached_rprovides = []
+ for pkg, fns in self._recipecache.rproviders.items():
+ if self.fn in fns:
+ self._cached_rprovides.append(pkg)
+ return self._cached_rprovides
+ else:
+ raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+ def inherits(self, only_recipe=False):
+ """
+ Get the inherited classes for a recipe. Returns the class names only.
+ Parameters:
+ only_recipe: True to return only the classes inherited by the recipe
+ itself, False to return all classes inherited within
+ the context for the recipe (which includes globally
+ inherited classes).
+ """
+ if only_recipe:
+ global_inherit = [x for x in (self._d.getVar('BBINCLUDED') or '').split() if x.endswith('.bbclass')]
+ else:
+ global_inherit = []
+ for clsfile in self.inherit_files:
+ if only_recipe and clsfile in global_inherit:
+ continue
+ clsname = os.path.splitext(os.path.basename(clsfile))[0]
+ yield clsname
+ def __str__(self):
+ return '%s' % self.pn
+
+
+class Tinfoil:
+ """
+ Tinfoil - an API for scripts and utilities to query
+ BitBake internals and perform build operations.
+ """
+
+ def __init__(self, output=sys.stdout, tracking=False, setup_logging=True):
+ """
+ Create a new tinfoil object.
+ Parameters:
+ output: specifies where console output should be sent. Defaults
+ to sys.stdout.
+ tracking: True to enable variable history tracking, False to
+ disable it (default). Enabling this has a minor
+ performance impact so typically it isn't enabled
+ unless you need to query variable history.
+ setup_logging: True to setup a logger so that things like
+ bb.warn() will work immediately and timeout warnings
+ are visible; False to let BitBake do this itself.
+ """
+ self.logger = logging.getLogger('BitBake')
+ self.config_data = None
+ self.cooker = None
+ self.tracking = tracking
+ self.ui_module = None
+ self.server_connection = None
+ self.recipes_parsed = False
+ self.quiet = 0
+ self.oldhandlers = self.logger.handlers[:]
+ if setup_logging:
+ # This is the *client-side* logger, nothing to do with
+ # logging messages from the server
+ bb.msg.logger_create('BitBake', output)
+ self.localhandlers = []
+ for handler in self.logger.handlers:
+ if handler not in self.oldhandlers:
+ self.localhandlers.append(handler)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.shutdown()
+
+ def prepare(self, config_only=False, config_params=None, quiet=0, extra_features=None):
+ """
+ Prepares the underlying BitBake system to be used via tinfoil.
+ This function must be called prior to calling any of the other
+ functions in the API.
+ NOTE: if you call prepare() you must absolutely call shutdown()
+ before your code terminates. You can use a "with" block to ensure
+ this happens e.g.
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare()
+ ...
+
+ Parameters:
+ config_only: True to read only the configuration and not load
+ the cache / parse recipes. This is useful if you just
+ want to query the value of a variable at the global
+ level or you want to do anything else that doesn't
+ involve knowing anything about the recipes in the
+ current configuration. False loads the cache / parses
+ recipes.
+ config_params: optionally specify your own configuration
+ parameters. If not specified an instance of
+ TinfoilConfigParameters will be created internally.
+ quiet: quiet level controlling console output - equivalent
+ to bitbake's -q/--quiet option. Default of 0 gives
+ the same output level as normal bitbake execution.
+ extra_features: extra features to be added to the feature
+ set requested from the server. See
+ CookerFeatures._feature_list for possible
+ features.
+ """
+ self.quiet = quiet
+
+ if self.tracking:
+ extrafeatures = [bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING]
+ else:
+ extrafeatures = []
+
+ if extra_features:
+ extrafeatures += extra_features
+
+ if not config_params:
+ config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet)
+
+ cookerconfig = CookerConfiguration()
+ cookerconfig.setConfigParameters(config_params)
+
+ if not config_only:
+ # Disable local loggers because the UI module is going to set up its own
+ for handler in self.localhandlers:
+ self.logger.handlers.remove(handler)
+ self.localhandlers = []
+
+ self.server_connection, ui_module = setup_bitbake(config_params,
+ cookerconfig,
+ extrafeatures)
+
+ self.ui_module = ui_module
+
+ # Ensure the path to bitbake's bin directory is in PATH so that things like
+ # bitbake-worker can be run (usually this is the case, but it doesn't have to be)
+ path = os.getenv('PATH').split(':')
+ bitbakebinpath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'bin'))
+ for entry in path:
+ if entry.endswith(os.sep):
+ entry = entry[:-1]
+ if os.path.abspath(entry) == bitbakebinpath:
+ break
+ else:
+ path.insert(0, bitbakebinpath)
+ os.environ['PATH'] = ':'.join(path)
+
+ if self.server_connection:
+ _server_connections.append(self.server_connection)
+ if config_only:
+ config_params.updateToServer(self.server_connection.connection, os.environ.copy())
+ self.run_command('parseConfiguration')
+ else:
+ self.run_actions(config_params)
+ self.recipes_parsed = True
+
+ self.config_data = bb.data.init()
+ connector = TinfoilDataStoreConnector(self, None)
+ self.config_data.setVar('_remote_data', connector)
+ self.cooker = TinfoilCookerAdapter(self)
+ self.cooker_data = self.cooker.recipecaches['']
+ else:
+ raise Exception('Failed to start bitbake server')
+
+ def run_actions(self, config_params):
+ """
+ Run the actions specified in config_params through the UI.
+ """
+ ret = self.ui_module.main(self.server_connection.connection, self.server_connection.events, config_params)
+ if ret:
+ raise TinfoilUIException(ret)
+
+ def parseRecipes(self):
+ """
+ Legacy function - use parse_recipes() instead.
+ """
+ self.parse_recipes()
+
+ def parse_recipes(self):
+ """
+ Load information on all recipes. Normally you should specify
+ config_only=False when calling prepare() instead of using this
+ function; this function is designed for situations where you need
+ to initialise Tinfoil and use it with config_only=True first and
+ then conditionally call this function to parse recipes later.
+ """
+ config_params = TinfoilConfigParameters(config_only=False)
+ self.run_actions(config_params)
+ self.recipes_parsed = True
+
+ def run_command(self, command, *params):
+ """
+ Run a command on the server (as implemented in bb.command).
+ Note that there are two types of command - synchronous and
+ asynchronous; in order to receive the results of asynchronous
+ commands you will need to set an appropriate event mask
+ using set_event_mask() and listen for the result using
+ wait_event() - with the correct event mask you'll at least get
+ bb.command.CommandCompleted and possibly other events before
+ that depending on the command.
+ """
+ if not self.server_connection:
+ raise Exception('Not connected to server (did you call .prepare()?)')
+
+ commandline = [command]
+ if params:
+ commandline.extend(params)
+ result = self.server_connection.connection.runCommand(commandline)
+ if result[1]:
+ raise TinfoilCommandFailed(result[1])
+ return result[0]
+
+ def set_event_mask(self, eventlist):
+ """Set the event mask which will be applied within wait_event()"""
+ if not self.server_connection:
+ raise Exception('Not connected to server (did you call .prepare()?)')
+ llevel, debug_domains = bb.msg.constructLogOptions()
+ ret = self.run_command('setEventMask', self.server_connection.connection.getEventHandle(), llevel, debug_domains, eventlist)
+ if not ret:
+ raise Exception('setEventMask failed')
+
+ def wait_event(self, timeout=0):
+ """
+ Wait for an event from the server for the specified time.
+ A timeout of 0 means don't wait if there are no events in the queue.
+ Returns the next event in the queue or None if the timeout was
+ reached. Note that in order to recieve any events you will
+ first need to set the internal event mask using set_event_mask()
+ (otherwise whatever event mask the UI set up will be in effect).
+ """
+ if not self.server_connection:
+ raise Exception('Not connected to server (did you call .prepare()?)')
+ return self.server_connection.events.waitEvent(timeout)
+
+ def get_overlayed_recipes(self):
+ """
+ Find recipes which are overlayed (i.e. where recipes exist in multiple layers)
+ """
+ return defaultdict(list, self.run_command('getOverlayedRecipes'))
+
+ def get_skipped_recipes(self):
+ """
+ Find recipes which were skipped (i.e. SkipRecipe was raised
+ during parsing).
+ """
+ return OrderedDict(self.run_command('getSkippedRecipes'))
+
+ def get_all_providers(self):
+ return defaultdict(list, self.run_command('allProviders'))
+
+ def find_providers(self):
+ return self.run_command('findProviders')
+
+ def find_best_provider(self, pn):
+ return self.run_command('findBestProvider', pn)
+
+ def get_runtime_providers(self, rdep):
+ return self.run_command('getRuntimeProviders', rdep)
+
+ def get_recipe_file(self, pn):
+ """
+ Get the file name for the specified recipe/target. Raises
+ bb.providers.NoProvider if there is no match or the recipe was
+ skipped.
+ """
+ best = self.find_best_provider(pn)
+ if not best or (len(best) > 3 and not best[3]):
+ skiplist = self.get_skipped_recipes()
+ taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
+ skipreasons = taskdata.get_reasons(pn)
+ if skipreasons:
+ raise bb.providers.NoProvider('%s is unavailable:\n %s' % (pn, ' \n'.join(skipreasons)))
+ else:
+ raise bb.providers.NoProvider('Unable to find any recipe file matching "%s"' % pn)
+ return best[3]
+
+ def get_file_appends(self, fn):
+ """
+ Find the bbappends for a recipe file
+ """
+ return self.run_command('getFileAppends', fn)
+
+ def all_recipes(self, mc='', sort=True):
+ """
+ Enable iterating over all recipes in the current configuration.
+ Returns an iterator over TinfoilRecipeInfo objects created on demand.
+ Parameters:
+ mc: The multiconfig, default of '' uses the main configuration.
+ sort: True to sort recipes alphabetically (default), False otherwise
+ """
+ recipecache = self.cooker.recipecaches[mc]
+ if sort:
+ recipes = sorted(recipecache.pkg_pn.items())
+ else:
+ recipes = recipecache.pkg_pn.items()
+ for pn, fns in recipes:
+ prov = self.find_best_provider(pn)
+ recipe = TinfoilRecipeInfo(recipecache,
+ self.config_data,
+ pn=pn,
+ fn=prov[3],
+ fns=fns)
+ yield recipe
+
+ def all_recipe_files(self, mc='', variants=True, preferred_only=False):
+ """
+ Enable iterating over all recipe files in the current configuration.
+ Returns an iterator over file paths.
+ Parameters:
+ mc: The multiconfig, default of '' uses the main configuration.
+ variants: True to include variants of recipes created through
+ BBCLASSEXTEND (default) or False to exclude them
+ preferred_only: True to include only the preferred recipe where
+ multiple exist providing the same PN, False to list
+ all recipes
+ """
+ recipecache = self.cooker.recipecaches[mc]
+ if preferred_only:
+ files = []
+ for pn in recipecache.pkg_pn.keys():
+ prov = self.find_best_provider(pn)
+ files.append(prov[3])
+ else:
+ files = recipecache.pkg_fn.keys()
+ for fn in sorted(files):
+ if not variants and fn.startswith('virtual:'):
+ continue
+ yield fn
+
+
+ def get_recipe_info(self, pn, mc=''):
+ """
+ Get information on a specific recipe in the current configuration by name (PN).
+ Returns a TinfoilRecipeInfo object created on demand.
+ Parameters:
+ mc: The multiconfig, default of '' uses the main configuration.
+ """
+ recipecache = self.cooker.recipecaches[mc]
+ prov = self.find_best_provider(pn)
+ fn = prov[3]
+ if fn:
+ actual_pn = recipecache.pkg_fn[fn]
+ recipe = TinfoilRecipeInfo(recipecache,
+ self.config_data,
+ pn=actual_pn,
+ fn=fn,
+ fns=recipecache.pkg_pn[actual_pn])
+ return recipe
+ else:
+ return None
+
+ def parse_recipe(self, pn):
+ """
+ Parse the specified recipe and return a datastore object
+ representing the environment for the recipe.
+ """
+ fn = self.get_recipe_file(pn)
+ return self.parse_recipe_file(fn)
+
+ def parse_recipe_file(self, fn, appends=True, appendlist=None, config_data=None):
+ """
+ Parse the specified recipe file (with or without bbappends)
+ and return a datastore object representing the environment
+ for the recipe.
+ Parameters:
+ fn: recipe file to parse - can be a file path or virtual
+ specification
+ appends: True to apply bbappends, False otherwise
+ appendlist: optional list of bbappend files to apply, if you
+ want to filter them
+ config_data: custom config datastore to use. NOTE: if you
+ specify config_data then you cannot use a virtual
+ specification for fn.
+ """
+ if self.tracking:
+ # Enable history tracking just for the parse operation
+ self.run_command('enableDataTracking')
+ try:
+ if appends and appendlist == []:
+ appends = False
+ if config_data:
+ dctr = bb.remotedata.RemoteDatastores.transmit_datastore(config_data)
+ dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, dctr)
+ else:
+ dscon = self.run_command('parseRecipeFile', fn, appends, appendlist)
+ if dscon:
+ return self._reconvert_type(dscon, 'DataStoreConnectionHandle')
+ else:
+ return None
+ finally:
+ if self.tracking:
+ self.run_command('disableDataTracking')
+
+ def build_file(self, buildfile, task, internal=True):
+ """
+ Runs the specified task for just a single recipe (i.e. no dependencies).
+ This is equivalent to bitbake -b, except with the default internal=True
+ no warning about dependencies will be produced, normal info messages
+ from the runqueue will be silenced and BuildInit, BuildStarted and
+ BuildCompleted events will not be fired.
+ """
+ return self.run_command('buildFile', buildfile, task, internal)
+
+ def build_targets(self, targets, task=None, handle_events=True, extra_events=None, event_callback=None):
+ """
+ Builds the specified targets. This is equivalent to a normal invocation
+ of bitbake. Has built-in event handling which is enabled by default and
+ can be extended if needed.
+ Parameters:
+ targets:
+ One or more targets to build. Can be a list or a
+ space-separated string.
+ task:
+ The task to run; if None then the value of BB_DEFAULT_TASK
+ will be used. Default None.
+ handle_events:
+ True to handle events in a similar way to normal bitbake
+ invocation with knotty; False to return immediately (on the
+ assumption that the caller will handle the events instead).
+ Default True.
+ extra_events:
+ An optional list of events to add to the event mask (if
+ handle_events=True). If you add events here you also need
+ to specify a callback function in event_callback that will
+ handle the additional events. Default None.
+ event_callback:
+ An optional function taking a single parameter which
+ will be called first upon receiving any event (if
+ handle_events=True) so that the caller can override or
+ extend the event handling. Default None.
+ """
+ if isinstance(targets, str):
+ targets = targets.split()
+ if not task:
+ task = self.config_data.getVar('BB_DEFAULT_TASK')
+
+ if handle_events:
+ # A reasonable set of default events matching up with those we handle below
+ eventmask = [
+ 'bb.event.BuildStarted',
+ 'bb.event.BuildCompleted',
+ 'logging.LogRecord',
+ 'bb.event.NoProvider',
+ 'bb.command.CommandCompleted',
+ 'bb.command.CommandFailed',
+ 'bb.build.TaskStarted',
+ 'bb.build.TaskFailed',
+ 'bb.build.TaskSucceeded',
+ 'bb.build.TaskFailedSilent',
+ 'bb.build.TaskProgress',
+ 'bb.runqueue.runQueueTaskStarted',
+ 'bb.runqueue.sceneQueueTaskStarted',
+ 'bb.event.ProcessStarted',
+ 'bb.event.ProcessProgress',
+ 'bb.event.ProcessFinished',
+ ]
+ if extra_events:
+ eventmask.extend(extra_events)
+ ret = self.set_event_mask(eventmask)
+
+ includelogs = self.config_data.getVar('BBINCLUDELOGS')
+ loglines = self.config_data.getVar('BBINCLUDELOGS_LINES')
+
+ ret = self.run_command('buildTargets', targets, task)
+ if handle_events:
+ result = False
+ # Borrowed from knotty, instead somewhat hackily we use the helper
+ # as the object to store "shutdown" on
+ helper = bb.ui.uihelper.BBUIHelper()
+ # We set up logging optionally in the constructor so now we need to
+ # grab the handlers to pass to TerminalFilter
+ console = None
+ errconsole = None
+ for handler in self.logger.handlers:
+ if isinstance(handler, logging.StreamHandler):
+ if handler.stream == sys.stdout:
+ console = handler
+ elif handler.stream == sys.stderr:
+ errconsole = handler
+ format_str = "%(levelname)s: %(message)s"
+ format = bb.msg.BBLogFormatter(format_str)
+ helper.shutdown = 0
+ parseprogress = None
+ termfilter = bb.ui.knotty.TerminalFilter(helper, helper, console, errconsole, format, quiet=self.quiet)
+ try:
+ while True:
+ try:
+ event = self.wait_event(0.25)
+ if event:
+ if event_callback and event_callback(event):
+ continue
+ if helper.eventHandler(event):
+ if isinstance(event, bb.build.TaskFailedSilent):
+ logger.warning("Logfile for failed setscene task is %s" % event.logfile)
+ elif isinstance(event, bb.build.TaskFailed):
+ bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter)
+ continue
+ if isinstance(event, bb.event.ProcessStarted):
+ if self.quiet > 1:
+ continue
+ parseprogress = bb.ui.knotty.new_progress(event.processname, event.total)
+ parseprogress.start(False)
+ continue
+ if isinstance(event, bb.event.ProcessProgress):
+ if self.quiet > 1:
+ continue
+ if parseprogress:
+ parseprogress.update(event.progress)
+ else:
+ bb.warn("Got ProcessProgress event for someting that never started?")
+ continue
+ if isinstance(event, bb.event.ProcessFinished):
+ if self.quiet > 1:
+ continue
+ if parseprogress:
+ parseprogress.finish()
+ parseprogress = None
+ continue
+ if isinstance(event, bb.command.CommandCompleted):
+ result = True
+ break
+ if isinstance(event, bb.command.CommandFailed):
+ self.logger.error(str(event))
+ result = False
+ break
+ if isinstance(event, logging.LogRecord):
+ if event.taskpid == 0 or event.levelno > logging.INFO:
+ self.logger.handle(event)
+ continue
+ if isinstance(event, bb.event.NoProvider):
+ self.logger.error(str(event))
+ result = False
+ break
+
+ elif helper.shutdown > 1:
+ break
+ termfilter.updateFooter()
+ except KeyboardInterrupt:
+ termfilter.clearFooter()
+ if helper.shutdown == 1:
+ print("\nSecond Keyboard Interrupt, stopping...\n")
+ ret = self.run_command("stateForceShutdown")
+ if ret and ret[2]:
+ self.logger.error("Unable to cleanly stop: %s" % ret[2])
+ elif helper.shutdown == 0:
+ print("\nKeyboard Interrupt, closing down...\n")
+ interrupted = True
+ ret = self.run_command("stateShutdown")
+ if ret and ret[2]:
+ self.logger.error("Unable to cleanly shutdown: %s" % ret[2])
+ helper.shutdown = helper.shutdown + 1
+ termfilter.clearFooter()
+ finally:
+ termfilter.finish()
+ if helper.failed_tasks:
+ result = False
+ return result
+ else:
+ return ret
+
+ def shutdown(self):
+ """
+ Shut down tinfoil. Disconnects from the server and gracefully
+ releases any associated resources. You must call this function if
+ prepare() has been called, or use a with... block when you create
+ the tinfoil object which will ensure that it gets called.
+ """
+ if self.server_connection:
+ self.run_command('clientComplete')
+ _server_connections.remove(self.server_connection)
+ bb.event.ui_queue = []
+ self.server_connection.terminate()
+ self.server_connection = None
+
+ # Restore logging handlers to how it looked when we started
+ if self.oldhandlers:
+ for handler in self.logger.handlers:
+ if handler not in self.oldhandlers:
+ self.logger.handlers.remove(handler)
+
+ def _reconvert_type(self, obj, origtypename):
+ """
+ Convert an object back to the right type, in the case
+ that marshalling has changed it (especially with xmlrpc)
+ """
+ supported_types = {
+ 'set': set,
+ 'DataStoreConnectionHandle': bb.command.DataStoreConnectionHandle,
+ }
+
+ origtype = supported_types.get(origtypename, None)
+ if origtype is None:
+ raise Exception('Unsupported type "%s"' % origtypename)
+ if type(obj) == origtype:
+ newobj = obj
+ elif isinstance(obj, dict):
+ # New style class
+ newobj = origtype()
+ for k,v in obj.items():
+ setattr(newobj, k, v)
+ else:
+ # Assume we can coerce the type
+ newobj = origtype(obj)
+
+ if isinstance(newobj, bb.command.DataStoreConnectionHandle):
+ connector = TinfoilDataStoreConnector(self, newobj.dsindex)
+ newobj = bb.data.init()
+ newobj.setVar('_remote_data', connector)
+
+ return newobj
+
+
+class TinfoilConfigParameters(BitBakeConfigParameters):
+
+ def __init__(self, config_only, **options):
+ self.initial_options = options
+ # Apply some sane defaults
+ if not 'parse_only' in options:
+ self.initial_options['parse_only'] = not config_only
+ #if not 'status_only' in options:
+ # self.initial_options['status_only'] = config_only
+ if not 'ui' in options:
+ self.initial_options['ui'] = 'knotty'
+ if not 'argv' in options:
+ self.initial_options['argv'] = []
+
+ super(TinfoilConfigParameters, self).__init__()
+
+ def parseCommandLine(self, argv=None):
+ # We don't want any parameters parsed from the command line
+ opts = super(TinfoilConfigParameters, self).parseCommandLine([])
+ for key, val in self.initial_options.items():
+ setattr(opts[0], key, val)
+ return opts
diff --git a/poky/bitbake/lib/bb/ui/__init__.py b/poky/bitbake/lib/bb/ui/__init__.py
new file mode 100644
index 000000000..a4805ed02
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/__init__.py
@@ -0,0 +1,17 @@
+#
+# BitBake UI Implementation
+#
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
diff --git a/poky/bitbake/lib/bb/ui/buildinfohelper.py b/poky/bitbake/lib/bb/ui/buildinfohelper.py
new file mode 100644
index 000000000..524a5b094
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/buildinfohelper.py
@@ -0,0 +1,2002 @@
+#
+# BitBake ToasterUI Implementation
+#
+# Copyright (C) 2013 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import bb
+import re
+import os
+
+import django
+from django.utils import timezone
+
+import toaster
+# Add toaster module to the search path to help django.setup() find the right
+# modules
+sys.path.insert(0, os.path.dirname(toaster.__file__))
+
+#Set the DJANGO_SETTINGS_MODULE if it's not already set
+os.environ["DJANGO_SETTINGS_MODULE"] =\
+ os.environ.get("DJANGO_SETTINGS_MODULE",
+ "toaster.toastermain.settings")
+# Setup django framework (needs to be done before importing modules)
+django.setup()
+
+from orm.models import Build, Task, Recipe, Layer_Version, Layer, Target, LogMessage, HelpText
+from orm.models import Target_Image_File, TargetKernelFile, TargetSDKFile
+from orm.models import Variable, VariableHistory
+from orm.models import Package, Package_File, Target_Installed_Package, Target_File
+from orm.models import Task_Dependency, Package_Dependency
+from orm.models import Recipe_Dependency, Provides
+from orm.models import Project, CustomImagePackage
+from orm.models import signal_runbuilds
+
+from bldcontrol.models import BuildEnvironment, BuildRequest
+from bldcontrol.models import BRLayer
+from bldcontrol import bbcontroller
+
+from bb.msg import BBLogFormatter as formatter
+from django.db import models
+from pprint import pformat
+import logging
+from datetime import datetime, timedelta
+
+from django.db import transaction, connection
+
+
+# pylint: disable=invalid-name
+# the logger name is standard throughout BitBake
+logger = logging.getLogger("ToasterLogger")
+
+class NotExisting(Exception):
+ pass
+
+class ORMWrapper(object):
+ """ This class creates the dictionaries needed to store information in the database
+ following the format defined by the Django models. It is also used to save this
+ information in the database.
+ """
+
+ def __init__(self):
+ self.layer_version_objects = []
+ self.layer_version_built = []
+ self.task_objects = {}
+ self.recipe_objects = {}
+
+ @staticmethod
+ def _build_key(**kwargs):
+ key = "0"
+ for k in sorted(kwargs.keys()):
+ if isinstance(kwargs[k], models.Model):
+ key += "-%d" % kwargs[k].id
+ else:
+ key += "-%s" % str(kwargs[k])
+ return key
+
+
+ def _cached_get_or_create(self, clazz, **kwargs):
+ """ This is a memory-cached get_or_create. We assume that the objects will not be created in the
+ database through any other means.
+ """
+
+ assert issubclass(clazz, models.Model), "_cached_get_or_create needs to get the class as first argument"
+
+ key = ORMWrapper._build_key(**kwargs)
+ dictname = "objects_%s" % clazz.__name__
+ if not dictname in vars(self).keys():
+ vars(self)[dictname] = {}
+
+ created = False
+ if not key in vars(self)[dictname].keys():
+ vars(self)[dictname][key], created = \
+ clazz.objects.get_or_create(**kwargs)
+
+ return (vars(self)[dictname][key], created)
+
+
+ def _cached_get(self, clazz, **kwargs):
+ """ This is a memory-cached get. We assume that the objects will not change in the database between gets.
+ """
+ assert issubclass(clazz, models.Model), "_cached_get needs to get the class as first argument"
+
+ key = ORMWrapper._build_key(**kwargs)
+ dictname = "objects_%s" % clazz.__name__
+
+ if not dictname in vars(self).keys():
+ vars(self)[dictname] = {}
+
+ if not key in vars(self)[dictname].keys():
+ vars(self)[dictname][key] = clazz.objects.get(**kwargs)
+
+ return vars(self)[dictname][key]
+
+ def get_similar_target_with_image_files(self, target):
+ """
+ Get a Target object "similar" to target; i.e. with the same target
+ name ('core-image-minimal' etc.) and machine.
+ """
+ return target.get_similar_target_with_image_files()
+
+ def get_similar_target_with_sdk_files(self, target):
+ return target.get_similar_target_with_sdk_files()
+
+ def clone_image_artifacts(self, target_from, target_to):
+ target_to.clone_image_artifacts_from(target_from)
+
+ def clone_sdk_artifacts(self, target_from, target_to):
+ target_to.clone_sdk_artifacts_from(target_from)
+
+ def _timestamp_to_datetime(self, secs):
+ """
+ Convert timestamp in seconds to Python datetime
+ """
+ return timezone.make_aware(datetime(1970, 1, 1) + timedelta(seconds=secs))
+
+ # pylint: disable=no-self-use
+ # we disable detection of no self use in functions because the methods actually work on the object
+ # even if they don't touch self anywhere
+
+ # pylint: disable=bad-continuation
+ # we do not follow the python conventions for continuation indentation due to long lines here
+
+ def get_or_create_build_object(self, brbe):
+ prj = None
+ buildrequest = None
+ if brbe is not None:
+ # Toaster-triggered build
+ logger.debug(1, "buildinfohelper: brbe is %s" % brbe)
+ br, _ = brbe.split(":")
+ buildrequest = BuildRequest.objects.get(pk=br)
+ prj = buildrequest.project
+ else:
+ # CLI build
+ prj = Project.objects.get_or_create_default_project()
+ logger.debug(1, "buildinfohelper: project is not specified, defaulting to %s" % prj)
+
+ if buildrequest is not None:
+ # reuse existing Build object
+ build = buildrequest.build
+ build.project = prj
+ build.save()
+ else:
+ # create new Build object
+ now = timezone.now()
+ build = Build.objects.create(
+ project=prj,
+ started_on=now,
+ completed_on=now,
+ build_name='')
+
+ logger.debug(1, "buildinfohelper: build is created %s" % build)
+
+ if buildrequest is not None:
+ buildrequest.build = build
+ buildrequest.save()
+
+ return build
+
+ def update_build(self, build, data_dict):
+ for key in data_dict:
+ setattr(build, key, data_dict[key])
+ build.save()
+
+ @staticmethod
+ def get_or_create_targets(target_info):
+ """
+ NB get_or_create() is used here because for Toaster-triggered builds,
+ we already created the targets when the build was triggered.
+ """
+ result = []
+ for target in target_info['targets']:
+ task = ''
+ if ':' in target:
+ target, task = target.split(':', 1)
+ if task.startswith('do_'):
+ task = task[3:]
+ if task == 'build':
+ task = ''
+
+ obj, _ = Target.objects.get_or_create(build=target_info['build'],
+ target=target,
+ task=task)
+ result.append(obj)
+ return result
+
+ def update_build_stats_and_outcome(self, build, errors, warnings, taskfailures):
+ assert isinstance(build,Build)
+ assert isinstance(errors, int)
+ assert isinstance(warnings, int)
+
+ if build.outcome == Build.CANCELLED:
+ return
+ try:
+ if build.buildrequest.state == BuildRequest.REQ_CANCELLING:
+ return
+ except AttributeError:
+ # We may not have a buildrequest if this is a command line build
+ pass
+
+ outcome = Build.SUCCEEDED
+ if errors or taskfailures:
+ outcome = Build.FAILED
+
+ build.completed_on = timezone.now()
+ build.outcome = outcome
+ build.save()
+ signal_runbuilds()
+
+ def update_target_set_license_manifest(self, target, license_manifest_path):
+ target.license_manifest_path = license_manifest_path
+ target.save()
+
+ def update_target_set_package_manifest(self, target, package_manifest_path):
+ target.package_manifest_path = package_manifest_path
+ target.save()
+
+ def update_task_object(self, build, task_name, recipe_name, task_stats):
+ """
+ Find the task for build which matches the recipe and task name
+ to be stored
+ """
+ task_to_update = Task.objects.get(
+ build = build,
+ task_name = task_name,
+ recipe__name = recipe_name
+ )
+
+ if 'started' in task_stats and 'ended' in task_stats:
+ task_to_update.started = self._timestamp_to_datetime(task_stats['started'])
+ task_to_update.ended = self._timestamp_to_datetime(task_stats['ended'])
+ task_to_update.elapsed_time = (task_stats['ended'] - task_stats['started'])
+ task_to_update.cpu_time_user = task_stats.get('cpu_time_user')
+ task_to_update.cpu_time_system = task_stats.get('cpu_time_system')
+ if 'disk_io_read' in task_stats and 'disk_io_write' in task_stats:
+ task_to_update.disk_io_read = task_stats['disk_io_read']
+ task_to_update.disk_io_write = task_stats['disk_io_write']
+ task_to_update.disk_io = task_stats['disk_io_read'] + task_stats['disk_io_write']
+
+ task_to_update.save()
+
+ def get_update_task_object(self, task_information, must_exist = False):
+ assert 'build' in task_information
+ assert 'recipe' in task_information
+ assert 'task_name' in task_information
+
+ # we use must_exist info for database look-up optimization
+ task_object, created = self._cached_get_or_create(Task,
+ build=task_information['build'],
+ recipe=task_information['recipe'],
+ task_name=task_information['task_name']
+ )
+ if created and must_exist:
+ task_information['debug'] = "build id %d, recipe id %d" % (task_information['build'].pk, task_information['recipe'].pk)
+ raise NotExisting("Task object created when expected to exist", task_information)
+
+ object_changed = False
+ for v in vars(task_object):
+ if v in task_information.keys():
+ if vars(task_object)[v] != task_information[v]:
+ vars(task_object)[v] = task_information[v]
+ object_changed = True
+
+ # update setscene-related information if the task has a setscene
+ if task_object.outcome == Task.OUTCOME_COVERED and 1 == task_object.get_related_setscene().count():
+ task_object.outcome = Task.OUTCOME_CACHED
+ object_changed = True
+
+ outcome_task_setscene = Task.objects.get(task_executed=True, build = task_object.build,
+ recipe = task_object.recipe, task_name=task_object.task_name+"_setscene").outcome
+ if outcome_task_setscene == Task.OUTCOME_SUCCESS:
+ task_object.sstate_result = Task.SSTATE_RESTORED
+ object_changed = True
+ elif outcome_task_setscene == Task.OUTCOME_FAILED:
+ task_object.sstate_result = Task.SSTATE_FAILED
+ object_changed = True
+
+ if object_changed:
+ task_object.save()
+ return task_object
+
+
+ def get_update_recipe_object(self, recipe_information, must_exist = False):
+ assert 'layer_version' in recipe_information
+ assert 'file_path' in recipe_information
+ assert 'pathflags' in recipe_information
+
+ assert not recipe_information['file_path'].startswith("/") # we should have layer-relative paths at all times
+
+
+ def update_recipe_obj(recipe_object):
+ object_changed = False
+ for v in vars(recipe_object):
+ if v in recipe_information.keys():
+ object_changed = True
+ vars(recipe_object)[v] = recipe_information[v]
+
+ if object_changed:
+ recipe_object.save()
+
+ recipe, created = self._cached_get_or_create(Recipe, layer_version=recipe_information['layer_version'],
+ file_path=recipe_information['file_path'], pathflags = recipe_information['pathflags'])
+
+ update_recipe_obj(recipe)
+
+ built_recipe = None
+ # Create a copy of the recipe for historical puposes and update it
+ for built_layer in self.layer_version_built:
+ if built_layer.layer == recipe_information['layer_version'].layer:
+ built_recipe, c = self._cached_get_or_create(Recipe,
+ layer_version=built_layer,
+ file_path=recipe_information['file_path'],
+ pathflags = recipe_information['pathflags'])
+ update_recipe_obj(built_recipe)
+ break
+
+
+ # If we're in analysis mode or if this is a custom recipe
+ # then we are wholly responsible for the data
+ # and therefore we return the 'real' recipe rather than the build
+ # history copy of the recipe.
+ if recipe_information['layer_version'].build is not None and \
+ recipe_information['layer_version'].build.project == \
+ Project.objects.get_or_create_default_project():
+ return recipe
+
+ if built_recipe is None:
+ return recipe
+
+ return built_recipe
+
+ def get_update_layer_version_object(self, build_obj, layer_obj, layer_version_information):
+ if isinstance(layer_obj, Layer_Version):
+ # We already found our layer version for this build so just
+ # update it with the new build information
+ logger.debug("We found our layer from toaster")
+ layer_obj.local_path = layer_version_information['local_path']
+ layer_obj.save()
+ self.layer_version_objects.append(layer_obj)
+
+ # create a new copy of this layer version as a snapshot for
+ # historical purposes
+ layer_copy, c = Layer_Version.objects.get_or_create(
+ build=build_obj,
+ layer=layer_obj.layer,
+ release=layer_obj.release,
+ branch=layer_version_information['branch'],
+ commit=layer_version_information['commit'],
+ local_path=layer_version_information['local_path'],
+ )
+
+ logger.debug("Created new layer version %s for build history",
+ layer_copy.layer.name)
+
+ self.layer_version_built.append(layer_copy)
+
+ return layer_obj
+
+ assert isinstance(build_obj, Build)
+ assert isinstance(layer_obj, Layer)
+ assert 'branch' in layer_version_information
+ assert 'commit' in layer_version_information
+ assert 'priority' in layer_version_information
+ assert 'local_path' in layer_version_information
+
+ # If we're doing a command line build then associate this new layer with the
+ # project to avoid it 'contaminating' toaster data
+ project = None
+ if build_obj.project == Project.objects.get_or_create_default_project():
+ project = build_obj.project
+
+ layer_version_object, _ = Layer_Version.objects.get_or_create(
+ build = build_obj,
+ layer = layer_obj,
+ branch = layer_version_information['branch'],
+ commit = layer_version_information['commit'],
+ priority = layer_version_information['priority'],
+ local_path = layer_version_information['local_path'],
+ project=project)
+
+ self.layer_version_objects.append(layer_version_object)
+
+ return layer_version_object
+
+ def get_update_layer_object(self, layer_information, brbe):
+ assert 'name' in layer_information
+ assert 'layer_index_url' in layer_information
+
+ # From command line builds we have no brbe as the request is directly
+ # from bitbake
+ if brbe is None:
+ # If we don't have git commit sha then we're using a non-git
+ # layer so set the layer_source_dir to identify it as such
+ if not layer_information['version']['commit']:
+ local_source_dir = layer_information["local_path"]
+ else:
+ local_source_dir = None
+
+ layer_object, _ = \
+ Layer.objects.get_or_create(
+ name=layer_information['name'],
+ local_source_dir=local_source_dir,
+ layer_index_url=layer_information['layer_index_url'])
+
+ return layer_object
+ else:
+ br_id, be_id = brbe.split(":")
+
+ # Find the layer version by matching the layer event information
+ # against the metadata we have in Toaster
+
+ try:
+ br_layer = BRLayer.objects.get(req=br_id,
+ name=layer_information['name'])
+ return br_layer.layer_version
+ except (BRLayer.MultipleObjectsReturned, BRLayer.DoesNotExist):
+ # There are multiple of the same layer name or the name
+ # hasn't been determined by the toaster.bbclass layer
+ # so let's filter by the local_path
+ bc = bbcontroller.getBuildEnvironmentController(pk=be_id)
+ for br_layer in BRLayer.objects.filter(req=br_id):
+ if br_layer.giturl and \
+ layer_information['local_path'].endswith(
+ bc.getGitCloneDirectory(br_layer.giturl,
+ br_layer.commit)):
+ return br_layer.layer_version
+
+ if br_layer.local_source_dir == \
+ layer_information['local_path']:
+ return br_layer.layer_version
+
+ # We've reached the end of our search and couldn't find the layer
+ # we can continue but some data may be missing
+ raise NotExisting("Unidentified layer %s" %
+ pformat(layer_information))
+
+ def save_target_file_information(self, build_obj, target_obj, filedata):
+ assert isinstance(build_obj, Build)
+ assert isinstance(target_obj, Target)
+ dirs = filedata['dirs']
+ files = filedata['files']
+ syms = filedata['syms']
+
+ # always create the root directory as a special case;
+ # note that this is never displayed, so the owner, group,
+ # size, permission are irrelevant
+ tf_obj = Target_File.objects.create(target = target_obj,
+ path = '/',
+ size = 0,
+ owner = '',
+ group = '',
+ permission = '',
+ inodetype = Target_File.ITYPE_DIRECTORY)
+ tf_obj.save()
+
+ # insert directories, ordered by name depth
+ for d in sorted(dirs, key=lambda x:len(x[-1].split("/"))):
+ (user, group, size) = d[1:4]
+ permission = d[0][1:]
+ path = d[4].lstrip(".")
+
+ # we already created the root directory, so ignore any
+ # entry for it
+ if len(path) == 0:
+ continue
+
+ parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1])
+ if len(parent_path) == 0:
+ parent_path = "/"
+ parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
+ tf_obj = Target_File.objects.create(
+ target = target_obj,
+ path = path,
+ size = size,
+ inodetype = Target_File.ITYPE_DIRECTORY,
+ permission = permission,
+ owner = user,
+ group = group,
+ directory = parent_obj)
+
+
+ # we insert files
+ for d in files:
+ (user, group, size) = d[1:4]
+ permission = d[0][1:]
+ path = d[4].lstrip(".")
+ parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1])
+ inodetype = Target_File.ITYPE_REGULAR
+ if d[0].startswith('b'):
+ inodetype = Target_File.ITYPE_BLOCK
+ if d[0].startswith('c'):
+ inodetype = Target_File.ITYPE_CHARACTER
+ if d[0].startswith('p'):
+ inodetype = Target_File.ITYPE_FIFO
+
+ tf_obj = Target_File.objects.create(
+ target = target_obj,
+ path = path,
+ size = size,
+ inodetype = inodetype,
+ permission = permission,
+ owner = user,
+ group = group)
+ parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
+ tf_obj.directory = parent_obj
+ tf_obj.save()
+
+ # we insert symlinks
+ for d in syms:
+ (user, group, size) = d[1:4]
+ permission = d[0][1:]
+ path = d[4].lstrip(".")
+ filetarget_path = d[6]
+
+ parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1])
+ if not filetarget_path.startswith("/"):
+ # we have a relative path, get a normalized absolute one
+ filetarget_path = parent_path + "/" + filetarget_path
+ fcp = filetarget_path.split("/")
+ fcpl = []
+ for i in fcp:
+ if i == "..":
+ fcpl.pop()
+ else:
+ fcpl.append(i)
+ filetarget_path = "/".join(fcpl)
+
+ try:
+ filetarget_obj = Target_File.objects.get(target = target_obj, path = filetarget_path)
+ except Target_File.DoesNotExist:
+ # we might have an invalid link; no way to detect this. just set it to None
+ filetarget_obj = None
+
+ parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
+
+ tf_obj = Target_File.objects.create(
+ target = target_obj,
+ path = path,
+ size = size,
+ inodetype = Target_File.ITYPE_SYMLINK,
+ permission = permission,
+ owner = user,
+ group = group,
+ directory = parent_obj,
+ sym_target = filetarget_obj)
+
+
+ def save_target_package_information(self, build_obj, target_obj, packagedict, pkgpnmap, recipes, built_package=False):
+ assert isinstance(build_obj, Build)
+ assert isinstance(target_obj, Target)
+
+ errormsg = ""
+ for p in packagedict:
+ # Search name swtiches round the installed name vs package name
+ # by default installed name == package name
+ searchname = p
+ if p not in pkgpnmap:
+ logger.warning("Image packages list contains %p, but is"
+ " missing from all packages list where the"
+ " metadata comes from. Skipping...", p)
+ continue
+
+ if 'OPKGN' in pkgpnmap[p].keys():
+ searchname = pkgpnmap[p]['OPKGN']
+
+ built_recipe = recipes[pkgpnmap[p]['PN']]
+
+ if built_package:
+ packagedict[p]['object'], created = Package.objects.get_or_create( build = build_obj, name = searchname )
+ recipe = built_recipe
+ else:
+ packagedict[p]['object'], created = \
+ CustomImagePackage.objects.get_or_create(name=searchname)
+ # Clear the Package_Dependency objects as we're going to update
+ # the CustomImagePackage with the latest dependency information
+ packagedict[p]['object'].package_dependencies_target.all().delete()
+ packagedict[p]['object'].package_dependencies_source.all().delete()
+ try:
+ recipe = self._cached_get(
+ Recipe,
+ name=built_recipe.name,
+ layer_version__build=None,
+ layer_version__release=
+ built_recipe.layer_version.release,
+ file_path=built_recipe.file_path,
+ version=built_recipe.version
+ )
+ except (Recipe.DoesNotExist,
+ Recipe.MultipleObjectsReturned) as e:
+ logger.info("We did not find one recipe for the"
+ "configuration data package %s %s" % (p, e))
+ continue
+
+ if created or packagedict[p]['object'].size == -1: # save the data anyway we can, not just if it was not created here; bug [YOCTO #6887]
+ # fill in everything we can from the runtime-reverse package data
+ try:
+ packagedict[p]['object'].recipe = recipe
+ packagedict[p]['object'].version = pkgpnmap[p]['PV']
+ packagedict[p]['object'].installed_name = p
+ packagedict[p]['object'].revision = pkgpnmap[p]['PR']
+ packagedict[p]['object'].license = pkgpnmap[p]['LICENSE']
+ packagedict[p]['object'].section = pkgpnmap[p]['SECTION']
+ packagedict[p]['object'].summary = pkgpnmap[p]['SUMMARY']
+ packagedict[p]['object'].description = pkgpnmap[p]['DESCRIPTION']
+ packagedict[p]['object'].size = int(pkgpnmap[p]['PKGSIZE'])
+
+ # no files recorded for this package, so save files info
+ packagefile_objects = []
+ for targetpath in pkgpnmap[p]['FILES_INFO']:
+ targetfilesize = pkgpnmap[p]['FILES_INFO'][targetpath]
+ packagefile_objects.append(Package_File( package = packagedict[p]['object'],
+ path = targetpath,
+ size = targetfilesize))
+ if len(packagefile_objects):
+ Package_File.objects.bulk_create(packagefile_objects)
+ except KeyError as e:
+ errormsg += " stpi: Key error, package %s key %s \n" % ( p, e )
+
+ # save disk installed size
+ packagedict[p]['object'].installed_size = packagedict[p]['size']
+ packagedict[p]['object'].save()
+
+ if built_package:
+ Target_Installed_Package.objects.create(target = target_obj, package = packagedict[p]['object'])
+
+ packagedeps_objs = []
+ for p in packagedict:
+ for (px,deptype) in packagedict[p]['depends']:
+ if deptype == 'depends':
+ tdeptype = Package_Dependency.TYPE_TRDEPENDS
+ elif deptype == 'recommends':
+ tdeptype = Package_Dependency.TYPE_TRECOMMENDS
+
+ try:
+ packagedeps_objs.append(Package_Dependency(
+ package = packagedict[p]['object'],
+ depends_on = packagedict[px]['object'],
+ dep_type = tdeptype,
+ target = target_obj))
+ except KeyError as e:
+ logger.warning("Could not add dependency to the package %s "
+ "because %s is an unknown package", p, px)
+
+ if len(packagedeps_objs) > 0:
+ Package_Dependency.objects.bulk_create(packagedeps_objs)
+ else:
+ logger.info("No package dependencies created")
+
+ if len(errormsg) > 0:
+ logger.warning("buildinfohelper: target_package_info could not identify recipes: \n%s", errormsg)
+
+ def save_target_image_file_information(self, target_obj, file_name, file_size):
+ Target_Image_File.objects.create(target=target_obj,
+ file_name=file_name, file_size=file_size)
+
+ def save_target_kernel_file(self, target_obj, file_name, file_size):
+ """
+ Save kernel file (bzImage, modules*) information for a Target target_obj.
+ """
+ TargetKernelFile.objects.create(target=target_obj,
+ file_name=file_name, file_size=file_size)
+
+ def save_target_sdk_file(self, target_obj, file_name, file_size):
+ """
+ Save SDK artifacts to the database, associating them with a
+ Target object.
+ """
+ TargetSDKFile.objects.create(target=target_obj, file_name=file_name,
+ file_size=file_size)
+
+ def create_logmessage(self, log_information):
+ assert 'build' in log_information
+ assert 'level' in log_information
+ assert 'message' in log_information
+
+ log_object = LogMessage.objects.create(
+ build = log_information['build'],
+ level = log_information['level'],
+ message = log_information['message'])
+
+ for v in vars(log_object):
+ if v in log_information.keys():
+ vars(log_object)[v] = log_information[v]
+
+ return log_object.save()
+
+
+ def save_build_package_information(self, build_obj, package_info, recipes,
+ built_package):
+ # assert isinstance(build_obj, Build)
+
+ if not 'PN' in package_info.keys():
+ # no package data to save (e.g. 'OPKGN'="lib64-*"|"lib32-*")
+ return None
+
+ # create and save the object
+ pname = package_info['PKG']
+ built_recipe = recipes[package_info['PN']]
+ if 'OPKGN' in package_info.keys():
+ pname = package_info['OPKGN']
+
+ if built_package:
+ bp_object, _ = Package.objects.get_or_create( build = build_obj,
+ name = pname )
+ recipe = built_recipe
+ else:
+ bp_object, created = \
+ CustomImagePackage.objects.get_or_create(name=pname)
+ try:
+ recipe = self._cached_get(Recipe,
+ name=built_recipe.name,
+ layer_version__build=None,
+ file_path=built_recipe.file_path,
+ version=built_recipe.version)
+
+ except (Recipe.DoesNotExist, Recipe.MultipleObjectsReturned):
+ logger.debug("We did not find one recipe for the configuration"
+ "data package %s" % pname)
+ return
+
+ bp_object.installed_name = package_info['PKG']
+ bp_object.recipe = recipe
+ bp_object.version = package_info['PKGV']
+ bp_object.revision = package_info['PKGR']
+ bp_object.summary = package_info['SUMMARY']
+ bp_object.description = package_info['DESCRIPTION']
+ bp_object.size = int(package_info['PKGSIZE'])
+ bp_object.section = package_info['SECTION']
+ bp_object.license = package_info['LICENSE']
+ bp_object.save()
+
+ # save any attached file information
+ packagefile_objects = []
+ for path in package_info['FILES_INFO']:
+ packagefile_objects.append(Package_File( package = bp_object,
+ path = path,
+ size = package_info['FILES_INFO'][path] ))
+ if len(packagefile_objects):
+ Package_File.objects.bulk_create(packagefile_objects)
+
+ def _po_byname(p):
+ if built_package:
+ pkg, created = Package.objects.get_or_create(build=build_obj,
+ name=p)
+ else:
+ pkg, created = CustomImagePackage.objects.get_or_create(name=p)
+
+ if created:
+ pkg.size = -1
+ pkg.save()
+ return pkg
+
+ packagedeps_objs = []
+ # save soft dependency information
+ if 'RDEPENDS' in package_info and package_info['RDEPENDS']:
+ for p in bb.utils.explode_deps(package_info['RDEPENDS']):
+ packagedeps_objs.append(Package_Dependency( package = bp_object,
+ depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RDEPENDS))
+ if 'RPROVIDES' in package_info and package_info['RPROVIDES']:
+ for p in bb.utils.explode_deps(package_info['RPROVIDES']):
+ packagedeps_objs.append(Package_Dependency( package = bp_object,
+ depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RPROVIDES))
+ if 'RRECOMMENDS' in package_info and package_info['RRECOMMENDS']:
+ for p in bb.utils.explode_deps(package_info['RRECOMMENDS']):
+ packagedeps_objs.append(Package_Dependency( package = bp_object,
+ depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RRECOMMENDS))
+ if 'RSUGGESTS' in package_info and package_info['RSUGGESTS']:
+ for p in bb.utils.explode_deps(package_info['RSUGGESTS']):
+ packagedeps_objs.append(Package_Dependency( package = bp_object,
+ depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RSUGGESTS))
+ if 'RREPLACES' in package_info and package_info['RREPLACES']:
+ for p in bb.utils.explode_deps(package_info['RREPLACES']):
+ packagedeps_objs.append(Package_Dependency( package = bp_object,
+ depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RREPLACES))
+ if 'RCONFLICTS' in package_info and package_info['RCONFLICTS']:
+ for p in bb.utils.explode_deps(package_info['RCONFLICTS']):
+ packagedeps_objs.append(Package_Dependency( package = bp_object,
+ depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RCONFLICTS))
+
+ if len(packagedeps_objs) > 0:
+ Package_Dependency.objects.bulk_create(packagedeps_objs)
+
+ return bp_object
+
+ def save_build_variables(self, build_obj, vardump):
+ assert isinstance(build_obj, Build)
+
+ for k in vardump:
+ desc = vardump[k]['doc']
+ if desc is None:
+ var_words = [word for word in k.split('_')]
+ root_var = "_".join([word for word in var_words if word.isupper()])
+ if root_var and root_var != k and root_var in vardump:
+ desc = vardump[root_var]['doc']
+ if desc is None:
+ desc = ''
+ if len(desc):
+ HelpText.objects.get_or_create(build=build_obj,
+ area=HelpText.VARIABLE,
+ key=k, text=desc)
+ if not bool(vardump[k]['func']):
+ value = vardump[k]['v']
+ if value is None:
+ value = ''
+ variable_obj = Variable.objects.create( build = build_obj,
+ variable_name = k,
+ variable_value = value,
+ description = desc)
+
+ varhist_objects = []
+ for vh in vardump[k]['history']:
+ if not 'documentation.conf' in vh['file']:
+ varhist_objects.append(VariableHistory( variable = variable_obj,
+ file_name = vh['file'],
+ line_number = vh['line'],
+ operation = vh['op']))
+ if len(varhist_objects):
+ VariableHistory.objects.bulk_create(varhist_objects)
+
+
+class MockEvent(object):
+ """ This object is used to create event, for which normal event-processing methods can
+ be used, out of data that is not coming via an actual event
+ """
+ def __init__(self):
+ self.msg = None
+ self.levelno = None
+ self.taskname = None
+ self.taskhash = None
+ self.pathname = None
+ self.lineno = None
+
+ def getMessage(self):
+ """
+ Simulate LogRecord message return
+ """
+ return self.msg
+
+
+class BuildInfoHelper(object):
+ """ This class gathers the build information from the server and sends it
+ towards the ORM wrapper for storing in the database
+ It is instantiated once per build
+ Keeps in memory all data that needs matching before writing it to the database
+ """
+
+ # tasks which produce image files; note we include '', as we set
+ # the task for a target to '' (i.e. 'build') if no target is
+ # explicitly defined
+ IMAGE_GENERATING_TASKS = ['', 'build', 'image', 'populate_sdk_ext']
+
+ # pylint: disable=protected-access
+ # the code will look into the protected variables of the event; no easy way around this
+ # pylint: disable=bad-continuation
+ # we do not follow the python conventions for continuation indentation due to long lines here
+
+ def __init__(self, server, has_build_history = False, brbe = None):
+ self.internal_state = {}
+ self.internal_state['taskdata'] = {}
+ self.internal_state['targets'] = []
+ self.task_order = 0
+ self.autocommit_step = 1
+ self.server = server
+ # we use manual transactions if the database doesn't autocommit on us
+ if not connection.features.autocommits_when_autocommit_is_off:
+ transaction.set_autocommit(False)
+ self.orm_wrapper = ORMWrapper()
+ self.has_build_history = has_build_history
+ self.tmp_dir = self.server.runCommand(["getVariable", "TMPDIR"])[0]
+
+ # this is set for Toaster-triggered builds by localhostbecontroller
+ # via toasterui
+ self.brbe = brbe
+
+ self.project = None
+
+ logger.debug(1, "buildinfohelper: Build info helper inited %s" % vars(self))
+
+
+ ###################
+ ## methods to convert event/external info into objects that the ORM layer uses
+
+ def _ensure_build(self):
+ """
+ Ensure the current build object exists and is up to date with
+ data on the bitbake server
+ """
+ if not 'build' in self.internal_state or not self.internal_state['build']:
+ # create the Build object
+ self.internal_state['build'] = \
+ self.orm_wrapper.get_or_create_build_object(self.brbe)
+
+ build = self.internal_state['build']
+
+ # update missing fields on the Build object with found data
+ build_info = {}
+
+ # set to True if at least one field is going to be set
+ changed = False
+
+ if not build.build_name:
+ build_name = self.server.runCommand(["getVariable", "BUILDNAME"])[0]
+
+ # only reset the build name if the one on the server is actually
+ # a valid value for the build_name field
+ if build_name != None:
+ build_info['build_name'] = build_name
+ changed = True
+
+ if not build.machine:
+ build_info['machine'] = self.server.runCommand(["getVariable", "MACHINE"])[0]
+ changed = True
+
+ if not build.distro:
+ build_info['distro'] = self.server.runCommand(["getVariable", "DISTRO"])[0]
+ changed = True
+
+ if not build.distro_version:
+ build_info['distro_version'] = self.server.runCommand(["getVariable", "DISTRO_VERSION"])[0]
+ changed = True
+
+ if not build.bitbake_version:
+ build_info['bitbake_version'] = self.server.runCommand(["getVariable", "BB_VERSION"])[0]
+ changed = True
+
+ if changed:
+ self.orm_wrapper.update_build(self.internal_state['build'], build_info)
+
+ def _get_task_information(self, event, recipe):
+ assert 'taskname' in vars(event)
+ self._ensure_build()
+
+ task_information = {}
+ task_information['build'] = self.internal_state['build']
+ task_information['outcome'] = Task.OUTCOME_NA
+ task_information['recipe'] = recipe
+ task_information['task_name'] = event.taskname
+ try:
+ # some tasks don't come with a hash. and that's ok
+ task_information['sstate_checksum'] = event.taskhash
+ except AttributeError:
+ pass
+ return task_information
+
+ def _get_layer_version_for_dependency(self, pathRE):
+ """ Returns the layer in the toaster db that has a full regex
+ match to the pathRE. pathRE - the layer path passed as a regex in the
+ event. It is created in cooker.py as a collection for the layer
+ priorities.
+ """
+ self._ensure_build()
+
+ def _sort_longest_path(layer_version):
+ assert isinstance(layer_version, Layer_Version)
+ return len(layer_version.local_path)
+
+ # Our paths don't append a trailing slash
+ if pathRE.endswith("/"):
+ pathRE = pathRE[:-1]
+
+ p = re.compile(pathRE)
+ path=re.sub(r'[$^]',r'',pathRE)
+ # Heuristics: we always match recipe to the deepest layer path in
+ # the discovered layers
+ for lvo in sorted(self.orm_wrapper.layer_version_objects,
+ reverse=True, key=_sort_longest_path):
+ if p.fullmatch(os.path.abspath(lvo.local_path)):
+ return lvo
+ if lvo.layer.local_source_dir:
+ if p.fullmatch(os.path.abspath(lvo.layer.local_source_dir)):
+ return lvo
+ if 0 == path.find(lvo.local_path):
+ # sub-layer path inside existing layer
+ return lvo
+
+ # if we get here, we didn't read layers correctly;
+ # dump whatever information we have on the error log
+ logger.warning("Could not match layer dependency for path %s : %s",
+ pathRE,
+ self.orm_wrapper.layer_version_objects)
+ return None
+
+ def _get_layer_version_for_path(self, path):
+ self._ensure_build()
+
+ def _slkey_interactive(layer_version):
+ assert isinstance(layer_version, Layer_Version)
+ return len(layer_version.local_path)
+
+ # Heuristics: we always match recipe to the deepest layer path in the discovered layers
+ for lvo in sorted(self.orm_wrapper.layer_version_objects, reverse=True, key=_slkey_interactive):
+ # we can match to the recipe file path
+ if path.startswith(lvo.local_path):
+ return lvo
+ if lvo.layer.local_source_dir and \
+ path.startswith(lvo.layer.local_source_dir):
+ return lvo
+
+ #if we get here, we didn't read layers correctly; dump whatever information we have on the error log
+ logger.warning("Could not match layer version for recipe path %s : %s", path, self.orm_wrapper.layer_version_objects)
+
+ #mockup the new layer
+ unknown_layer, _ = Layer.objects.get_or_create(name="Unidentified layer", layer_index_url="")
+ unknown_layer_version_obj, _ = Layer_Version.objects.get_or_create(layer = unknown_layer, build = self.internal_state['build'])
+
+ # append it so we don't run into this error again and again
+ self.orm_wrapper.layer_version_objects.append(unknown_layer_version_obj)
+
+ return unknown_layer_version_obj
+
+ def _get_recipe_information_from_taskfile(self, taskfile):
+ localfilepath = taskfile.split(":")[-1]
+ filepath_flags = ":".join(sorted(taskfile.split(":")[:-1]))
+ layer_version_obj = self._get_layer_version_for_path(localfilepath)
+
+
+
+ recipe_info = {}
+ recipe_info['layer_version'] = layer_version_obj
+ recipe_info['file_path'] = localfilepath
+ recipe_info['pathflags'] = filepath_flags
+
+ if recipe_info['file_path'].startswith(recipe_info['layer_version'].local_path):
+ recipe_info['file_path'] = recipe_info['file_path'][len(recipe_info['layer_version'].local_path):].lstrip("/")
+ else:
+ raise RuntimeError("Recipe file path %s is not under layer version at %s" % (recipe_info['file_path'], recipe_info['layer_version'].local_path))
+
+ return recipe_info
+
+ def _get_path_information(self, task_object):
+ self._ensure_build()
+
+ assert isinstance(task_object, Task)
+ build_stats_format = "{tmpdir}/buildstats/{buildname}/{package}/"
+ build_stats_path = []
+
+ for t in self.internal_state['targets']:
+ buildname = self.internal_state['build'].build_name
+ pe, pv = task_object.recipe.version.split(":",1)
+ if len(pe) > 0:
+ package = task_object.recipe.name + "-" + pe + "_" + pv
+ else:
+ package = task_object.recipe.name + "-" + pv
+
+ build_stats_path.append(build_stats_format.format(tmpdir=self.tmp_dir,
+ buildname=buildname,
+ package=package))
+
+ return build_stats_path
+
+
+ ################################
+ ## external available methods to store information
+ @staticmethod
+ def _get_data_from_event(event):
+ evdata = None
+ if '_localdata' in vars(event):
+ evdata = event._localdata
+ elif 'data' in vars(event):
+ evdata = event.data
+ else:
+ raise Exception("Event with neither _localdata or data properties")
+ return evdata
+
+ def store_layer_info(self, event):
+ layerinfos = BuildInfoHelper._get_data_from_event(event)
+ self.internal_state['lvs'] = {}
+ for layer in layerinfos:
+ try:
+ self.internal_state['lvs'][self.orm_wrapper.get_update_layer_object(layerinfos[layer], self.brbe)] = layerinfos[layer]['version']
+ self.internal_state['lvs'][self.orm_wrapper.get_update_layer_object(layerinfos[layer], self.brbe)]['local_path'] = layerinfos[layer]['local_path']
+ except NotExisting as nee:
+ logger.warning("buildinfohelper: cannot identify layer exception:%s ", nee)
+
+ def store_started_build(self):
+ self._ensure_build()
+
+ def save_build_log_file_path(self, build_log_path):
+ self._ensure_build()
+
+ if not self.internal_state['build'].cooker_log_path:
+ data_dict = {'cooker_log_path': build_log_path}
+ self.orm_wrapper.update_build(self.internal_state['build'], data_dict)
+
+ def save_build_targets(self, event):
+ self._ensure_build()
+
+ # create target information
+ assert '_pkgs' in vars(event)
+ target_information = {}
+ target_information['targets'] = event._pkgs
+ target_information['build'] = self.internal_state['build']
+
+ self.internal_state['targets'] = self.orm_wrapper.get_or_create_targets(target_information)
+
+ def save_build_layers_and_variables(self):
+ self._ensure_build()
+
+ build_obj = self.internal_state['build']
+
+ # save layer version information for this build
+ if not 'lvs' in self.internal_state:
+ logger.error("Layer version information not found; Check if the bitbake server was configured to inherit toaster.bbclass.")
+ else:
+ for layer_obj in self.internal_state['lvs']:
+ self.orm_wrapper.get_update_layer_version_object(build_obj, layer_obj, self.internal_state['lvs'][layer_obj])
+
+ del self.internal_state['lvs']
+
+ # Save build configuration
+ data = self.server.runCommand(["getAllKeysWithFlags", ["doc", "func"]])[0]
+
+ # convert the paths from absolute to relative to either the build directory or layer checkouts
+ path_prefixes = []
+
+ if self.brbe is not None:
+ _, be_id = self.brbe.split(":")
+ be = BuildEnvironment.objects.get(pk = be_id)
+ path_prefixes.append(be.builddir)
+
+ for layer in sorted(self.orm_wrapper.layer_version_objects, key = lambda x:len(x.local_path), reverse=True):
+ path_prefixes.append(layer.local_path)
+
+ # we strip the prefixes
+ for k in data:
+ if not bool(data[k]['func']):
+ for vh in data[k]['history']:
+ if not 'documentation.conf' in vh['file']:
+ abs_file_name = vh['file']
+ for pp in path_prefixes:
+ if abs_file_name.startswith(pp + "/"):
+ # preserve layer name in relative path
+ vh['file']=abs_file_name[pp.rfind("/")+1:]
+ break
+
+ # save the variables
+ self.orm_wrapper.save_build_variables(build_obj, data)
+
+ return self.brbe
+
+ def set_recipes_to_parse(self, num_recipes):
+ """
+ Set the number of recipes which need to be parsed for this build.
+ This is set the first time ParseStarted is received by toasterui.
+ """
+ self._ensure_build()
+ self.internal_state['build'].recipes_to_parse = num_recipes
+ self.internal_state['build'].save()
+
+ def set_recipes_parsed(self, num_recipes):
+ """
+ Set the number of recipes parsed so far for this build; this is updated
+ each time a ParseProgress or ParseCompleted event is received by
+ toasterui.
+ """
+ self._ensure_build()
+ if num_recipes <= self.internal_state['build'].recipes_to_parse:
+ self.internal_state['build'].recipes_parsed = num_recipes
+ self.internal_state['build'].save()
+
+ def update_target_image_file(self, event):
+ evdata = BuildInfoHelper._get_data_from_event(event)
+
+ for t in self.internal_state['targets']:
+ if t.is_image == True:
+ output_files = list(evdata.keys())
+ for output in output_files:
+ if t.target in output and 'rootfs' in output and not output.endswith(".manifest"):
+ self.orm_wrapper.save_target_image_file_information(t, output, evdata[output])
+
+ def update_artifact_image_file(self, event):
+ self._ensure_build()
+ evdata = BuildInfoHelper._get_data_from_event(event)
+ for artifact_path in evdata.keys():
+ self.orm_wrapper.save_artifact_information(
+ self.internal_state['build'], artifact_path,
+ evdata[artifact_path])
+
+ def update_build_information(self, event, errors, warnings, taskfailures):
+ self._ensure_build()
+ self.orm_wrapper.update_build_stats_and_outcome(
+ self.internal_state['build'], errors, warnings, taskfailures)
+
+ def store_started_task(self, event):
+ assert isinstance(event, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted, bb.runqueue.runQueueTaskSkipped))
+ assert 'taskfile' in vars(event)
+ localfilepath = event.taskfile.split(":")[-1]
+ assert localfilepath.startswith("/")
+
+ identifier = event.taskfile + ":" + event.taskname
+
+ recipe_information = self._get_recipe_information_from_taskfile(event.taskfile)
+ recipe = self.orm_wrapper.get_update_recipe_object(recipe_information, True)
+
+ task_information = self._get_task_information(event, recipe)
+ task_information['outcome'] = Task.OUTCOME_NA
+
+ if isinstance(event, bb.runqueue.runQueueTaskSkipped):
+ assert 'reason' in vars(event)
+ task_information['task_executed'] = False
+ if event.reason == "covered":
+ task_information['outcome'] = Task.OUTCOME_COVERED
+ if event.reason == "existing":
+ task_information['outcome'] = Task.OUTCOME_PREBUILT
+ else:
+ task_information['task_executed'] = True
+ if 'noexec' in vars(event) and event.noexec == True:
+ task_information['task_executed'] = False
+ task_information['outcome'] = Task.OUTCOME_EMPTY
+ task_information['script_type'] = Task.CODING_NA
+
+ # do not assign order numbers to scene tasks
+ if not isinstance(event, bb.runqueue.sceneQueueTaskStarted):
+ self.task_order += 1
+ task_information['order'] = self.task_order
+
+ self.orm_wrapper.get_update_task_object(task_information)
+
+ self.internal_state['taskdata'][identifier] = {
+ 'outcome': task_information['outcome'],
+ }
+
+
+ def store_tasks_stats(self, event):
+ self._ensure_build()
+ task_data = BuildInfoHelper._get_data_from_event(event)
+
+ for (task_file, task_name, task_stats, recipe_name) in task_data:
+ build = self.internal_state['build']
+ self.orm_wrapper.update_task_object(build, task_name, recipe_name, task_stats)
+
+ def update_and_store_task(self, event):
+ assert 'taskfile' in vars(event)
+ localfilepath = event.taskfile.split(":")[-1]
+ assert localfilepath.startswith("/")
+
+ identifier = event.taskfile + ":" + event.taskname
+ if not identifier in self.internal_state['taskdata']:
+ if isinstance(event, bb.build.TaskBase):
+ # we do a bit of guessing
+ candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)]
+ if len(candidates) == 1:
+ identifier = candidates[0]
+ elif len(candidates) > 1 and hasattr(event,'_package'):
+ if 'native-' in event._package:
+ identifier = 'native:' + identifier
+ if 'nativesdk-' in event._package:
+ identifier = 'nativesdk:' + identifier
+ candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)]
+ if len(candidates) == 1:
+ identifier = candidates[0]
+
+ assert identifier in self.internal_state['taskdata']
+ identifierlist = identifier.split(":")
+ realtaskfile = ":".join(identifierlist[0:len(identifierlist)-1])
+ recipe_information = self._get_recipe_information_from_taskfile(realtaskfile)
+ recipe = self.orm_wrapper.get_update_recipe_object(recipe_information, True)
+ task_information = self._get_task_information(event,recipe)
+
+ task_information['outcome'] = self.internal_state['taskdata'][identifier]['outcome']
+
+ if 'logfile' in vars(event):
+ task_information['logfile'] = event.logfile
+
+ if '_message' in vars(event):
+ task_information['message'] = event._message
+
+ if 'taskflags' in vars(event):
+ # with TaskStarted, we get even more information
+ if 'python' in event.taskflags.keys() and event.taskflags['python'] == '1':
+ task_information['script_type'] = Task.CODING_PYTHON
+ else:
+ task_information['script_type'] = Task.CODING_SHELL
+
+ if task_information['outcome'] == Task.OUTCOME_NA:
+ if isinstance(event, (bb.runqueue.runQueueTaskCompleted, bb.runqueue.sceneQueueTaskCompleted)):
+ task_information['outcome'] = Task.OUTCOME_SUCCESS
+ del self.internal_state['taskdata'][identifier]
+
+ if isinstance(event, (bb.runqueue.runQueueTaskFailed, bb.runqueue.sceneQueueTaskFailed)):
+ task_information['outcome'] = Task.OUTCOME_FAILED
+ del self.internal_state['taskdata'][identifier]
+
+ if not connection.features.autocommits_when_autocommit_is_off:
+ # we force a sync point here, to get the progress bar to show
+ if self.autocommit_step % 3 == 0:
+ transaction.set_autocommit(True)
+ transaction.set_autocommit(False)
+ self.autocommit_step += 1
+
+ self.orm_wrapper.get_update_task_object(task_information, True) # must exist
+
+
+ def store_missed_state_tasks(self, event):
+ for (fn, taskname, taskhash, sstatefile) in BuildInfoHelper._get_data_from_event(event)['missed']:
+
+ # identifier = fn + taskname + "_setscene"
+ recipe_information = self._get_recipe_information_from_taskfile(fn)
+ recipe = self.orm_wrapper.get_update_recipe_object(recipe_information)
+ mevent = MockEvent()
+ mevent.taskname = taskname
+ mevent.taskhash = taskhash
+ task_information = self._get_task_information(mevent,recipe)
+
+ task_information['start_time'] = timezone.now()
+ task_information['outcome'] = Task.OUTCOME_NA
+ task_information['sstate_checksum'] = taskhash
+ task_information['sstate_result'] = Task.SSTATE_MISS
+ task_information['path_to_sstate_obj'] = sstatefile
+
+ self.orm_wrapper.get_update_task_object(task_information)
+
+ for (fn, taskname, taskhash, sstatefile) in BuildInfoHelper._get_data_from_event(event)['found']:
+
+ # identifier = fn + taskname + "_setscene"
+ recipe_information = self._get_recipe_information_from_taskfile(fn)
+ recipe = self.orm_wrapper.get_update_recipe_object(recipe_information)
+ mevent = MockEvent()
+ mevent.taskname = taskname
+ mevent.taskhash = taskhash
+ task_information = self._get_task_information(mevent,recipe)
+
+ task_information['path_to_sstate_obj'] = sstatefile
+
+ self.orm_wrapper.get_update_task_object(task_information)
+
+
+ def store_target_package_data(self, event):
+ self._ensure_build()
+
+ # for all image targets
+ for target in self.internal_state['targets']:
+ if target.is_image:
+ pkgdata = BuildInfoHelper._get_data_from_event(event)['pkgdata']
+ imgdata = BuildInfoHelper._get_data_from_event(event)['imgdata'].get(target.target, {})
+ filedata = BuildInfoHelper._get_data_from_event(event)['filedata'].get(target.target, {})
+
+ try:
+ self.orm_wrapper.save_target_package_information(self.internal_state['build'], target, imgdata, pkgdata, self.internal_state['recipes'], built_package=True)
+ self.orm_wrapper.save_target_package_information(self.internal_state['build'], target, imgdata.copy(), pkgdata, self.internal_state['recipes'], built_package=False)
+ except KeyError as e:
+ logger.warning("KeyError in save_target_package_information"
+ "%s ", e)
+
+ # only try to find files in the image if the task for this
+ # target is one which produces image files; otherwise, the old
+ # list of files in the files-in-image.txt file will be
+ # appended to the target even if it didn't produce any images
+ if target.task in BuildInfoHelper.IMAGE_GENERATING_TASKS:
+ try:
+ self.orm_wrapper.save_target_file_information(self.internal_state['build'], target, filedata)
+ except KeyError as e:
+ logger.warning("KeyError in save_target_file_information"
+ "%s ", e)
+
+
+
+ def cancel_cli_build(self):
+ """
+ If a build is currently underway, set its state to CANCELLED;
+ note that this only gets called for command line builds which are
+ interrupted, so it doesn't touch any BuildRequest objects
+ """
+ self._ensure_build()
+ self.internal_state['build'].outcome = Build.CANCELLED
+ self.internal_state['build'].save()
+ signal_runbuilds()
+
+ def store_dependency_information(self, event):
+ assert '_depgraph' in vars(event)
+ assert 'layer-priorities' in event._depgraph
+ assert 'pn' in event._depgraph
+ assert 'tdepends' in event._depgraph
+
+ errormsg = ""
+
+ # save layer version priorities
+ if 'layer-priorities' in event._depgraph.keys():
+ for lv in event._depgraph['layer-priorities']:
+ (_, path, _, priority) = lv
+ layer_version_obj = self._get_layer_version_for_dependency(path)
+ if layer_version_obj:
+ layer_version_obj.priority = priority
+ layer_version_obj.save()
+
+ # save recipe information
+ self.internal_state['recipes'] = {}
+ for pn in event._depgraph['pn']:
+
+ file_name = event._depgraph['pn'][pn]['filename'].split(":")[-1]
+ pathflags = ":".join(sorted(event._depgraph['pn'][pn]['filename'].split(":")[:-1]))
+ layer_version_obj = self._get_layer_version_for_path(file_name)
+
+ assert layer_version_obj is not None
+
+ recipe_info = {}
+ recipe_info['name'] = pn
+ recipe_info['layer_version'] = layer_version_obj
+
+ if 'version' in event._depgraph['pn'][pn]:
+ recipe_info['version'] = event._depgraph['pn'][pn]['version'].lstrip(":")
+
+ if 'summary' in event._depgraph['pn'][pn]:
+ recipe_info['summary'] = event._depgraph['pn'][pn]['summary']
+
+ if 'license' in event._depgraph['pn'][pn]:
+ recipe_info['license'] = event._depgraph['pn'][pn]['license']
+
+ if 'description' in event._depgraph['pn'][pn]:
+ recipe_info['description'] = event._depgraph['pn'][pn]['description']
+
+ if 'section' in event._depgraph['pn'][pn]:
+ recipe_info['section'] = event._depgraph['pn'][pn]['section']
+
+ if 'homepage' in event._depgraph['pn'][pn]:
+ recipe_info['homepage'] = event._depgraph['pn'][pn]['homepage']
+
+ if 'bugtracker' in event._depgraph['pn'][pn]:
+ recipe_info['bugtracker'] = event._depgraph['pn'][pn]['bugtracker']
+
+ recipe_info['file_path'] = file_name
+ recipe_info['pathflags'] = pathflags
+
+ if recipe_info['file_path'].startswith(recipe_info['layer_version'].local_path):
+ recipe_info['file_path'] = recipe_info['file_path'][len(recipe_info['layer_version'].local_path):].lstrip("/")
+ else:
+ raise RuntimeError("Recipe file path %s is not under layer version at %s" % (recipe_info['file_path'], recipe_info['layer_version'].local_path))
+
+ recipe = self.orm_wrapper.get_update_recipe_object(recipe_info)
+ recipe.is_image = False
+ if 'inherits' in event._depgraph['pn'][pn].keys():
+ for cls in event._depgraph['pn'][pn]['inherits']:
+ if cls.endswith('/image.bbclass'):
+ recipe.is_image = True
+ recipe_info['is_image'] = True
+ # Save the is_image state to the relevant recipe objects
+ self.orm_wrapper.get_update_recipe_object(recipe_info)
+ break
+ if recipe.is_image:
+ for t in self.internal_state['targets']:
+ if pn == t.target:
+ t.is_image = True
+ t.save()
+ self.internal_state['recipes'][pn] = recipe
+
+ # we'll not get recipes for key w/ values listed in ASSUME_PROVIDED
+
+ assume_provided = self.server.runCommand(["getVariable", "ASSUME_PROVIDED"])[0].split()
+
+ # save recipe dependency
+ # buildtime
+ recipedeps_objects = []
+ for recipe in event._depgraph['depends']:
+ target = self.internal_state['recipes'][recipe]
+ for dep in event._depgraph['depends'][recipe]:
+ if dep in assume_provided:
+ continue
+ via = None
+ if 'providermap' in event._depgraph and dep in event._depgraph['providermap']:
+ deprecipe = event._depgraph['providermap'][dep][0]
+ dependency = self.internal_state['recipes'][deprecipe]
+ via = Provides.objects.get_or_create(name=dep,
+ recipe=dependency)[0]
+ elif dep in self.internal_state['recipes']:
+ dependency = self.internal_state['recipes'][dep]
+ else:
+ errormsg += " stpd: KeyError saving recipe dependency for %s, %s \n" % (recipe, dep)
+ continue
+ recipe_dep = Recipe_Dependency(recipe=target,
+ depends_on=dependency,
+ via=via,
+ dep_type=Recipe_Dependency.TYPE_DEPENDS)
+ recipedeps_objects.append(recipe_dep)
+
+ Recipe_Dependency.objects.bulk_create(recipedeps_objects)
+
+ # save all task information
+ def _save_a_task(taskdesc):
+ spec = re.split(r'\.', taskdesc)
+ pn = ".".join(spec[0:-1])
+ taskname = spec[-1]
+ e = event
+ e.taskname = pn
+ recipe = self.internal_state['recipes'][pn]
+ task_info = self._get_task_information(e, recipe)
+ task_info['task_name'] = taskname
+ task_obj = self.orm_wrapper.get_update_task_object(task_info)
+ return task_obj
+
+ # create tasks
+ tasks = {}
+ for taskdesc in event._depgraph['tdepends']:
+ tasks[taskdesc] = _save_a_task(taskdesc)
+
+ # create dependencies between tasks
+ taskdeps_objects = []
+ for taskdesc in event._depgraph['tdepends']:
+ target = tasks[taskdesc]
+ for taskdep in event._depgraph['tdepends'][taskdesc]:
+ if taskdep not in tasks:
+ # Fetch tasks info is not collected previously
+ dep = _save_a_task(taskdep)
+ else:
+ dep = tasks[taskdep]
+ taskdeps_objects.append(Task_Dependency( task = target, depends_on = dep ))
+ Task_Dependency.objects.bulk_create(taskdeps_objects)
+
+ if len(errormsg) > 0:
+ logger.warning("buildinfohelper: dependency info not identify recipes: \n%s", errormsg)
+
+
+ def store_build_package_information(self, event):
+ self._ensure_build()
+
+ package_info = BuildInfoHelper._get_data_from_event(event)
+ self.orm_wrapper.save_build_package_information(
+ self.internal_state['build'],
+ package_info,
+ self.internal_state['recipes'],
+ built_package=True)
+
+ self.orm_wrapper.save_build_package_information(
+ self.internal_state['build'],
+ package_info,
+ self.internal_state['recipes'],
+ built_package=False)
+
+ def _store_build_done(self, errorcode):
+ logger.info("Build exited with errorcode %d", errorcode)
+
+ if not self.brbe:
+ return
+
+ br_id, be_id = self.brbe.split(":")
+
+ br = BuildRequest.objects.get(pk = br_id)
+
+ # if we're 'done' because we got cancelled update the build outcome
+ if br.state == BuildRequest.REQ_CANCELLING:
+ logger.info("Build cancelled")
+ br.build.outcome = Build.CANCELLED
+ br.build.save()
+ self.internal_state['build'] = br.build
+ errorcode = 0
+
+ if errorcode == 0:
+ # request archival of the project artifacts
+ br.state = BuildRequest.REQ_COMPLETED
+ else:
+ br.state = BuildRequest.REQ_FAILED
+ br.save()
+
+ be = BuildEnvironment.objects.get(pk = be_id)
+ be.lock = BuildEnvironment.LOCK_FREE
+ be.save()
+ signal_runbuilds()
+
+ def store_log_error(self, text):
+ mockevent = MockEvent()
+ mockevent.levelno = formatter.ERROR
+ mockevent.msg = text
+ mockevent.pathname = '-- None'
+ mockevent.lineno = LogMessage.ERROR
+ self.store_log_event(mockevent)
+
+ def store_log_exception(self, text, backtrace = ""):
+ mockevent = MockEvent()
+ mockevent.levelno = -1
+ mockevent.msg = text
+ mockevent.pathname = backtrace
+ mockevent.lineno = -1
+ self.store_log_event(mockevent)
+
+ def store_log_event(self, event):
+ self._ensure_build()
+
+ if event.levelno < formatter.WARNING:
+ return
+
+ # early return for CLI builds
+ if self.brbe is None:
+ if not 'backlog' in self.internal_state:
+ self.internal_state['backlog'] = []
+ self.internal_state['backlog'].append(event)
+ return
+
+ if 'backlog' in self.internal_state:
+ # if we have a backlog of events, do our best to save them here
+ if len(self.internal_state['backlog']):
+ tempevent = self.internal_state['backlog'].pop()
+ logger.debug(1, "buildinfohelper: Saving stored event %s "
+ % tempevent)
+ self.store_log_event(tempevent)
+ else:
+ logger.info("buildinfohelper: All events saved")
+ del self.internal_state['backlog']
+
+ log_information = {}
+ log_information['build'] = self.internal_state['build']
+ if event.levelno == formatter.CRITICAL:
+ log_information['level'] = LogMessage.CRITICAL
+ elif event.levelno == formatter.ERROR:
+ log_information['level'] = LogMessage.ERROR
+ elif event.levelno == formatter.WARNING:
+ log_information['level'] = LogMessage.WARNING
+ elif event.levelno == -2: # toaster self-logging
+ log_information['level'] = -2
+ else:
+ log_information['level'] = LogMessage.INFO
+
+ log_information['message'] = event.getMessage()
+ log_information['pathname'] = event.pathname
+ log_information['lineno'] = event.lineno
+ logger.info("Logging error 2: %s", log_information)
+
+ self.orm_wrapper.create_logmessage(log_information)
+
+ def _get_filenames_from_image_license(self, image_license_manifest_path):
+ """
+ Find the FILES line in the image_license.manifest file,
+ which has the basenames of the bzImage and modules files
+ in this format:
+ FILES: bzImage--4.4.11+git0+3a5f494784_53e84104c5-r0-qemux86-20160603165040.bin modules--4.4.11+git0+3a5f494784_53e84104c5-r0-qemux86-20160603165040.tgz
+ """
+ files = []
+ with open(image_license_manifest_path) as image_license:
+ for line in image_license:
+ if line.startswith('FILES'):
+ files_str = line.split(':')[1].strip()
+ files_str = re.sub(r' {2,}', ' ', files_str)
+
+ # ignore lines like "FILES:" with no filenames
+ if files_str:
+ files += files_str.split(' ')
+ return files
+
+ def _endswith(self, str_to_test, endings):
+ """
+ Returns True if str ends with one of the strings in the list
+ endings, False otherwise
+ """
+ endswith = False
+ for ending in endings:
+ if str_to_test.endswith(ending):
+ endswith = True
+ break
+ return endswith
+
+ def scan_task_artifacts(self, event):
+ """
+ The 'TaskArtifacts' event passes the manifest file content for the
+ tasks 'do_deploy', 'do_image_complete', 'do_populate_sdk', and
+ 'do_populate_sdk_ext'. The first two will be implemented later.
+ """
+ task_vars = BuildInfoHelper._get_data_from_event(event)
+ task_name = task_vars['task'][task_vars['task'].find(':')+1:]
+ task_artifacts = task_vars['artifacts']
+
+ if task_name in ['do_populate_sdk', 'do_populate_sdk_ext']:
+ targets = [target for target in self.internal_state['targets'] \
+ if target.task == task_name[3:]]
+ if not targets:
+ logger.warning("scan_task_artifacts: SDK targets not found: %s\n", task_name)
+ return
+ for artifact_path in task_artifacts:
+ if not os.path.isfile(artifact_path):
+ logger.warning("scan_task_artifacts: artifact file not found: %s\n", artifact_path)
+ continue
+ for target in targets:
+ # don't record the file if it's already been added
+ # to this target
+ matching_files = TargetSDKFile.objects.filter(
+ target=target, file_name=artifact_path)
+ if matching_files.count() == 0:
+ artifact_size = os.stat(artifact_path).st_size
+ self.orm_wrapper.save_target_sdk_file(
+ target, artifact_path, artifact_size)
+
+ def _get_image_files(self, deploy_dir_image, image_name, image_file_extensions):
+ """
+ Find files in deploy_dir_image whose basename starts with the
+ string image_name and ends with one of the strings in
+ image_file_extensions.
+
+ Returns a list of file dictionaries like
+
+ [
+ {
+ 'path': '/path/to/image/file',
+ 'size': <file size in bytes>
+ }
+ ]
+ """
+ image_files = []
+
+ for dirpath, _, filenames in os.walk(deploy_dir_image):
+ for filename in filenames:
+ if filename.startswith(image_name) and \
+ self._endswith(filename, image_file_extensions):
+ image_file_path = os.path.join(dirpath, filename)
+ image_file_size = os.stat(image_file_path).st_size
+
+ image_files.append({
+ 'path': image_file_path,
+ 'size': image_file_size
+ })
+
+ return image_files
+
+ def scan_image_artifacts(self):
+ """
+ Scan for built image artifacts in DEPLOY_DIR_IMAGE and associate them
+ with a Target object in self.internal_state['targets'].
+
+ We have two situations to handle:
+
+ 1. This is the first time a target + machine has been built, so
+ add files from the DEPLOY_DIR_IMAGE to the target.
+
+ OR
+
+ 2. There are no new files for the target (they were already produced by
+ a previous build), so copy them from the most recent previous build with
+ the same target, task and machine.
+ """
+ deploy_dir_image = \
+ self.server.runCommand(['getVariable', 'DEPLOY_DIR_IMAGE'])[0]
+
+ # if there's no DEPLOY_DIR_IMAGE, there aren't going to be
+ # any image artifacts, so we can return immediately
+ if not deploy_dir_image:
+ return
+
+ buildname = self.server.runCommand(['getVariable', 'BUILDNAME'])[0]
+ machine = self.server.runCommand(['getVariable', 'MACHINE'])[0]
+ image_name = self.server.runCommand(['getVariable', 'IMAGE_NAME'])[0]
+
+ # location of the manifest files for this build;
+ # note that this file is only produced if an image is produced
+ license_directory = \
+ self.server.runCommand(['getVariable', 'LICENSE_DIRECTORY'])[0]
+
+ # file name extensions for image files
+ image_file_extensions_unique = {}
+ image_fstypes = self.server.runCommand(
+ ['getVariable', 'IMAGE_FSTYPES'])[0]
+ if image_fstypes != None:
+ image_types_str = image_fstypes.strip()
+ image_file_extensions = re.sub(r' {2,}', ' ', image_types_str)
+ image_file_extensions_unique = set(image_file_extensions.split(' '))
+
+ targets = self.internal_state['targets']
+
+ # filter out anything which isn't an image target
+ image_targets = [target for target in targets if target.is_image]
+
+ for image_target in image_targets:
+ # this is set to True if we find at least one file relating to
+ # this target; if this remains False after the scan, we copy the
+ # files from the most-recent Target with the same target + machine
+ # onto this Target instead
+ has_files = False
+
+ # we construct this because by the time we reach
+ # BuildCompleted, this has reset to
+ # 'defaultpkgname-<MACHINE>-<BUILDNAME>';
+ # we need to change it to
+ # <TARGET>-<MACHINE>-<BUILDNAME>
+ real_image_name = re.sub(r'^defaultpkgname', image_target.target,
+ image_name)
+
+ image_license_manifest_path = os.path.join(
+ license_directory,
+ real_image_name,
+ 'image_license.manifest')
+
+ image_package_manifest_path = os.path.join(
+ license_directory,
+ real_image_name,
+ 'image_license.manifest')
+
+ # if image_license.manifest exists, we can read the names of
+ # bzImage, modules etc. files for this build from it, then look for
+ # them in the DEPLOY_DIR_IMAGE; note that this file is only produced
+ # if an image file was produced
+ if os.path.isfile(image_license_manifest_path):
+ has_files = True
+
+ basenames = self._get_filenames_from_image_license(
+ image_license_manifest_path)
+
+ for basename in basenames:
+ artifact_path = os.path.join(deploy_dir_image, basename)
+ if not os.path.exists(artifact_path):
+ logger.warning("artifact %s doesn't exist, skipping" % artifact_path)
+ continue
+ artifact_size = os.stat(artifact_path).st_size
+
+ # note that the artifact will only be saved against this
+ # build if it hasn't been already
+ self.orm_wrapper.save_target_kernel_file(image_target,
+ artifact_path, artifact_size)
+
+ # store the license manifest path on the target
+ # (this file is also created any time an image file is created)
+ license_manifest_path = os.path.join(license_directory,
+ real_image_name, 'license.manifest')
+
+ self.orm_wrapper.update_target_set_license_manifest(
+ image_target, license_manifest_path)
+
+ # store the package manifest path on the target (this file
+ # is created any time an image file is created)
+ package_manifest_path = os.path.join(deploy_dir_image,
+ real_image_name + '.rootfs.manifest')
+
+ if os.path.exists(package_manifest_path):
+ self.orm_wrapper.update_target_set_package_manifest(
+ image_target, package_manifest_path)
+
+ # scan the directory for image files relating to this build
+ # (via real_image_name); note that we don't have to set
+ # has_files = True, as searching for the license manifest file
+ # will already have set it to true if at least one image file was
+ # produced; note that the real_image_name includes BUILDNAME, which
+ # in turn includes a timestamp; so if no files were produced for
+ # this timestamp (i.e. the build reused existing image files already
+ # in the directory), no files will be recorded against this target
+ image_files = self._get_image_files(deploy_dir_image,
+ real_image_name, image_file_extensions_unique)
+
+ for image_file in image_files:
+ self.orm_wrapper.save_target_image_file_information(
+ image_target, image_file['path'], image_file['size'])
+
+ if not has_files:
+ # copy image files and build artifacts from the
+ # most-recently-built Target with the
+ # same target + machine as this Target; also copy the license
+ # manifest path, as that is not treated as an artifact and needs
+ # to be set separately
+ similar_target = \
+ self.orm_wrapper.get_similar_target_with_image_files(
+ image_target)
+
+ if similar_target:
+ logger.info('image artifacts for target %s cloned from ' \
+ 'target %s' % (image_target.pk, similar_target.pk))
+ self.orm_wrapper.clone_image_artifacts(similar_target,
+ image_target)
+
+ def _get_sdk_targets(self):
+ """
+ Return targets which could generate SDK artifacts, i.e.
+ "do_populate_sdk" and "do_populate_sdk_ext".
+ """
+ return [target for target in self.internal_state['targets'] \
+ if target.task in ['populate_sdk', 'populate_sdk_ext']]
+
+ def scan_sdk_artifacts(self, event):
+ """
+ Note that we have to intercept an SDKArtifactInfo event from
+ toaster.bbclass (via toasterui) to get hold of the SDK variables we
+ need to be able to scan for files accurately: this is because
+ variables like TOOLCHAIN_OUTPUTNAME have reset to None by the time
+ BuildCompleted is fired by bitbake, so we have to get those values
+ while the build is still in progress.
+
+ For populate_sdk_ext, this runs twice, with two different
+ TOOLCHAIN_OUTPUTNAME settings, each of which will capture some of the
+ files in the SDK output directory.
+ """
+ sdk_vars = BuildInfoHelper._get_data_from_event(event)
+ toolchain_outputname = sdk_vars['TOOLCHAIN_OUTPUTNAME']
+
+ # targets which might have created SDK artifacts
+ sdk_targets = self._get_sdk_targets()
+
+ # location of SDK artifacts
+ tmpdir = self.server.runCommand(['getVariable', 'TMPDIR'])[0]
+ sdk_dir = os.path.join(tmpdir, 'deploy', 'sdk')
+
+ # all files in the SDK directory
+ artifacts = []
+ for dir_path, _, filenames in os.walk(sdk_dir):
+ for filename in filenames:
+ full_path = os.path.join(dir_path, filename)
+ if not os.path.islink(full_path):
+ artifacts.append(full_path)
+
+ for sdk_target in sdk_targets:
+ # find files in the SDK directory which haven't already been
+ # recorded against a Target and whose basename matches
+ # TOOLCHAIN_OUTPUTNAME
+ for artifact_path in artifacts:
+ basename = os.path.basename(artifact_path)
+
+ toolchain_match = basename.startswith(toolchain_outputname)
+
+ # files which match the name of the target which produced them;
+ # for example,
+ # poky-glibc-x86_64-core-image-sato-i586-toolchain-ext-2.1+snapshot.sh
+ target_match = re.search(sdk_target.target, basename)
+
+ # targets which produce "*-nativesdk-*" files
+ is_ext_sdk_target = sdk_target.task in \
+ ['do_populate_sdk_ext', 'populate_sdk_ext']
+
+ # SDK files which don't match the target name, i.e.
+ # x86_64-nativesdk-libc.*
+ # poky-glibc-x86_64-buildtools-tarball-i586-buildtools-nativesdk-standalone-2.1+snapshot*
+ is_ext_sdk_file = re.search('-nativesdk-', basename)
+
+ file_from_target = (toolchain_match and target_match) or \
+ (is_ext_sdk_target and is_ext_sdk_file)
+
+ if file_from_target:
+ # don't record the file if it's already been added to this
+ # target
+ matching_files = TargetSDKFile.objects.filter(
+ target=sdk_target, file_name=artifact_path)
+
+ if matching_files.count() == 0:
+ artifact_size = os.stat(artifact_path).st_size
+
+ self.orm_wrapper.save_target_sdk_file(
+ sdk_target, artifact_path, artifact_size)
+
+ def clone_required_sdk_artifacts(self):
+ """
+ If an SDK target doesn't have any SDK artifacts, this means that
+ the postfuncs of populate_sdk or populate_sdk_ext didn't fire, which
+ in turn means that the targets of this build didn't generate any new
+ artifacts.
+
+ In this case, clone SDK artifacts for targets in the current build
+ from existing targets for this build.
+ """
+ sdk_targets = self._get_sdk_targets()
+ for sdk_target in sdk_targets:
+ # only clone for SDK targets which have no TargetSDKFiles yet
+ if sdk_target.targetsdkfile_set.all().count() == 0:
+ similar_target = \
+ self.orm_wrapper.get_similar_target_with_sdk_files(
+ sdk_target)
+ if similar_target:
+ logger.info('SDK artifacts for target %s cloned from ' \
+ 'target %s' % (sdk_target.pk, similar_target.pk))
+ self.orm_wrapper.clone_sdk_artifacts(similar_target,
+ sdk_target)
+
+ def close(self, errorcode):
+ self._store_build_done(errorcode)
+
+ if 'backlog' in self.internal_state:
+ # we save missed events in the database for the current build
+ tempevent = self.internal_state['backlog'].pop()
+ self.store_log_event(tempevent)
+
+ if not connection.features.autocommits_when_autocommit_is_off:
+ transaction.set_autocommit(True)
+
+ # unset the brbe; this is to prevent subsequent command-line builds
+ # being incorrectly attached to the previous Toaster-triggered build;
+ # see https://bugzilla.yoctoproject.org/show_bug.cgi?id=9021
+ self.brbe = None
+
+ # unset the internal Build object to prevent it being reused for the
+ # next build
+ self.internal_state['build'] = None
diff --git a/poky/bitbake/lib/bb/ui/icons/images/images_display.png b/poky/bitbake/lib/bb/ui/icons/images/images_display.png
new file mode 100644
index 000000000..a7f87101a
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/images/images_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/images/images_hover.png b/poky/bitbake/lib/bb/ui/icons/images/images_hover.png
new file mode 100644
index 000000000..2d9cd99b8
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/images/images_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/add-hover.png b/poky/bitbake/lib/bb/ui/icons/indicators/add-hover.png
new file mode 100644
index 000000000..526df770d
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/add-hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/add.png b/poky/bitbake/lib/bb/ui/icons/indicators/add.png
new file mode 100644
index 000000000..31e7090d6
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/add.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/alert.png b/poky/bitbake/lib/bb/ui/icons/indicators/alert.png
new file mode 100644
index 000000000..d1c6f55a2
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/alert.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/confirmation.png b/poky/bitbake/lib/bb/ui/icons/indicators/confirmation.png
new file mode 100644
index 000000000..3a5402d1e
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/confirmation.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/denied.png b/poky/bitbake/lib/bb/ui/icons/indicators/denied.png
new file mode 100644
index 000000000..ee35c7def
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/denied.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/error.png b/poky/bitbake/lib/bb/ui/icons/indicators/error.png
new file mode 100644
index 000000000..d06a8c151
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/error.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/info.png b/poky/bitbake/lib/bb/ui/icons/indicators/info.png
new file mode 100644
index 000000000..ee8e8d846
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/info.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/issues.png b/poky/bitbake/lib/bb/ui/icons/indicators/issues.png
new file mode 100644
index 000000000..b0c746133
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/issues.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/refresh.png b/poky/bitbake/lib/bb/ui/icons/indicators/refresh.png
new file mode 100644
index 000000000..eb6c419db
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/refresh.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/remove-hover.png b/poky/bitbake/lib/bb/ui/icons/indicators/remove-hover.png
new file mode 100644
index 000000000..aa57c6998
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/remove-hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/remove.png b/poky/bitbake/lib/bb/ui/icons/indicators/remove.png
new file mode 100644
index 000000000..05c3c293d
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/remove.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/indicators/tick.png b/poky/bitbake/lib/bb/ui/icons/indicators/tick.png
new file mode 100644
index 000000000..beaad361c
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/indicators/tick.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/info/info_display.png b/poky/bitbake/lib/bb/ui/icons/info/info_display.png
new file mode 100644
index 000000000..5afbba29f
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/info/info_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/info/info_hover.png b/poky/bitbake/lib/bb/ui/icons/info/info_hover.png
new file mode 100644
index 000000000..f9d294dfa
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/info/info_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/layers/layers_display.png b/poky/bitbake/lib/bb/ui/icons/layers/layers_display.png
new file mode 100644
index 000000000..b7f9053a9
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/layers/layers_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/layers/layers_hover.png b/poky/bitbake/lib/bb/ui/icons/layers/layers_hover.png
new file mode 100644
index 000000000..0bf3ce0db
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/layers/layers_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/packages/packages_display.png b/poky/bitbake/lib/bb/ui/icons/packages/packages_display.png
new file mode 100644
index 000000000..f5d0a5064
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/packages/packages_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/packages/packages_hover.png b/poky/bitbake/lib/bb/ui/icons/packages/packages_hover.png
new file mode 100644
index 000000000..c081165f3
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/packages/packages_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/recipe/recipe_display.png b/poky/bitbake/lib/bb/ui/icons/recipe/recipe_display.png
new file mode 100644
index 000000000..e9809bc7d
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/recipe/recipe_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png b/poky/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png
new file mode 100644
index 000000000..7e48da9af
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/settings/settings_display.png b/poky/bitbake/lib/bb/ui/icons/settings/settings_display.png
new file mode 100644
index 000000000..88c464db0
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/settings/settings_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/settings/settings_hover.png b/poky/bitbake/lib/bb/ui/icons/settings/settings_hover.png
new file mode 100644
index 000000000..d92a0bf2c
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/settings/settings_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/templates/templates_display.png b/poky/bitbake/lib/bb/ui/icons/templates/templates_display.png
new file mode 100644
index 000000000..153c7afb6
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/templates/templates_display.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/icons/templates/templates_hover.png b/poky/bitbake/lib/bb/ui/icons/templates/templates_hover.png
new file mode 100644
index 000000000..afb7165fe
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/icons/templates/templates_hover.png
Binary files differ
diff --git a/poky/bitbake/lib/bb/ui/knotty.py b/poky/bitbake/lib/bb/ui/knotty.py
new file mode 100644
index 000000000..fa88e6ccd
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/knotty.py
@@ -0,0 +1,728 @@
+#
+# BitBake (No)TTY UI Implementation
+#
+# Handling output to TTYs or files (no TTY)
+#
+# Copyright (C) 2006-2012 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from __future__ import division
+
+import os
+import sys
+import xmlrpc.client as xmlrpclib
+import logging
+import progressbar
+import signal
+import bb.msg
+import time
+import fcntl
+import struct
+import copy
+import atexit
+
+from bb.ui import uihelper
+
+featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS]
+
+logger = logging.getLogger("BitBake")
+interactive = sys.stdout.isatty()
+
+class BBProgress(progressbar.ProgressBar):
+ def __init__(self, msg, maxval, widgets=None, extrapos=-1, resize_handler=None):
+ self.msg = msg
+ self.extrapos = extrapos
+ if not widgets:
+ widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
+ progressbar.ETA()]
+ self.extrapos = 4
+
+ if resize_handler:
+ self._resize_default = resize_handler
+ else:
+ self._resize_default = signal.getsignal(signal.SIGWINCH)
+ progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets, fd=sys.stdout)
+
+ def _handle_resize(self, signum=None, frame=None):
+ progressbar.ProgressBar._handle_resize(self, signum, frame)
+ if self._resize_default:
+ self._resize_default(signum, frame)
+
+ def finish(self):
+ progressbar.ProgressBar.finish(self)
+ if self._resize_default:
+ signal.signal(signal.SIGWINCH, self._resize_default)
+
+ def setmessage(self, msg):
+ self.msg = msg
+ self.widgets[0] = msg
+
+ def setextra(self, extra):
+ if self.extrapos > -1:
+ if extra:
+ extrastr = str(extra)
+ if extrastr[0] != ' ':
+ extrastr = ' ' + extrastr
+ else:
+ extrastr = ''
+ self.widgets[self.extrapos] = extrastr
+
+ def _need_update(self):
+ # We always want the bar to print when update() is called
+ return True
+
+class NonInteractiveProgress(object):
+ fobj = sys.stdout
+
+ def __init__(self, msg, maxval):
+ self.msg = msg
+ self.maxval = maxval
+ self.finished = False
+
+ def start(self, update=True):
+ self.fobj.write("%s..." % self.msg)
+ self.fobj.flush()
+ return self
+
+ def update(self, value):
+ pass
+
+ def finish(self):
+ if self.finished:
+ return
+ self.fobj.write("done.\n")
+ self.fobj.flush()
+ self.finished = True
+
+def new_progress(msg, maxval):
+ if interactive:
+ return BBProgress(msg, maxval)
+ else:
+ return NonInteractiveProgress(msg, maxval)
+
+def pluralise(singular, plural, qty):
+ if(qty == 1):
+ return singular % qty
+ else:
+ return plural % qty
+
+
+class InteractConsoleLogFilter(logging.Filter):
+ def __init__(self, tf, format):
+ self.tf = tf
+ self.format = format
+
+ def filter(self, record):
+ if record.levelno == self.format.NOTE and (record.msg.startswith("Running") or record.msg.startswith("recipe ")):
+ return False
+ self.tf.clearFooter()
+ return True
+
+class TerminalFilter(object):
+ rows = 25
+ columns = 80
+
+ def sigwinch_handle(self, signum, frame):
+ self.rows, self.columns = self.getTerminalColumns()
+ if self._sigwinch_default:
+ self._sigwinch_default(signum, frame)
+
+ def getTerminalColumns(self):
+ def ioctl_GWINSZ(fd):
+ try:
+ cr = struct.unpack('hh', fcntl.ioctl(fd, self.termios.TIOCGWINSZ, '1234'))
+ except:
+ return None
+ return cr
+ cr = ioctl_GWINSZ(sys.stdout.fileno())
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ cr = ioctl_GWINSZ(fd)
+ os.close(fd)
+ except:
+ pass
+ if not cr:
+ try:
+ cr = (env['LINES'], env['COLUMNS'])
+ except:
+ cr = (25, 80)
+ return cr
+
+ def __init__(self, main, helper, console, errconsole, format, quiet):
+ self.main = main
+ self.helper = helper
+ self.cuu = None
+ self.stdinbackup = None
+ self.interactive = sys.stdout.isatty()
+ self.footer_present = False
+ self.lastpids = []
+ self.lasttime = None
+ self.quiet = quiet
+
+ if not self.interactive:
+ return
+
+ try:
+ import curses
+ except ImportError:
+ sys.exit("FATAL: The knotty ui could not load the required curses python module.")
+
+ import termios
+ self.curses = curses
+ self.termios = termios
+ try:
+ fd = sys.stdin.fileno()
+ self.stdinbackup = termios.tcgetattr(fd)
+ new = copy.deepcopy(self.stdinbackup)
+ new[3] = new[3] & ~termios.ECHO
+ termios.tcsetattr(fd, termios.TCSADRAIN, new)
+ curses.setupterm()
+ if curses.tigetnum("colors") > 2:
+ format.enable_color()
+ self.ed = curses.tigetstr("ed")
+ if self.ed:
+ self.cuu = curses.tigetstr("cuu")
+ try:
+ self._sigwinch_default = signal.getsignal(signal.SIGWINCH)
+ signal.signal(signal.SIGWINCH, self.sigwinch_handle)
+ except:
+ pass
+ self.rows, self.columns = self.getTerminalColumns()
+ except:
+ self.cuu = None
+ if not self.cuu:
+ self.interactive = False
+ bb.note("Unable to use interactive mode for this terminal, using fallback")
+ return
+ if console:
+ console.addFilter(InteractConsoleLogFilter(self, format))
+ if errconsole:
+ errconsole.addFilter(InteractConsoleLogFilter(self, format))
+
+ self.main_progress = None
+
+ def clearFooter(self):
+ if self.footer_present:
+ lines = self.footer_present
+ sys.stdout.buffer.write(self.curses.tparm(self.cuu, lines))
+ sys.stdout.buffer.write(self.curses.tparm(self.ed))
+ sys.stdout.flush()
+ self.footer_present = False
+
+ def updateFooter(self):
+ if not self.cuu:
+ return
+ activetasks = self.helper.running_tasks
+ failedtasks = self.helper.failed_tasks
+ runningpids = self.helper.running_pids
+ currenttime = time.time()
+ if not self.lasttime or (currenttime - self.lasttime > 5):
+ self.helper.needUpdate = True
+ self.lasttime = currenttime
+ if self.footer_present and not self.helper.needUpdate:
+ return
+ self.helper.needUpdate = False
+ if self.footer_present:
+ self.clearFooter()
+ if (not self.helper.tasknumber_total or self.helper.tasknumber_current == self.helper.tasknumber_total) and not len(activetasks):
+ return
+ tasks = []
+ for t in runningpids:
+ progress = activetasks[t].get("progress", None)
+ if progress is not None:
+ pbar = activetasks[t].get("progressbar", None)
+ rate = activetasks[t].get("rate", None)
+ start_time = activetasks[t].get("starttime", None)
+ if not pbar or pbar.bouncing != (progress < 0):
+ if progress < 0:
+ pbar = BBProgress("0: %s (pid %s) " % (activetasks[t]["title"], t), 100, widgets=[progressbar.BouncingSlider(), ''], extrapos=2, resize_handler=self.sigwinch_handle)
+ pbar.bouncing = True
+ else:
+ pbar = BBProgress("0: %s (pid %s) " % (activetasks[t]["title"], t), 100, widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ''], extrapos=4, resize_handler=self.sigwinch_handle)
+ pbar.bouncing = False
+ activetasks[t]["progressbar"] = pbar
+ tasks.append((pbar, progress, rate, start_time))
+ else:
+ start_time = activetasks[t].get("starttime", None)
+ if start_time:
+ tasks.append("%s - %ds (pid %s)" % (activetasks[t]["title"], currenttime - start_time, t))
+ else:
+ tasks.append("%s (pid %s)" % (activetasks[t]["title"], t))
+
+ if self.main.shutdown:
+ content = "Waiting for %s running tasks to finish:" % len(activetasks)
+ print(content)
+ else:
+ if self.quiet:
+ content = "Running tasks (%s of %s)" % (self.helper.tasknumber_current, self.helper.tasknumber_total)
+ elif not len(activetasks):
+ content = "No currently running tasks (%s of %s)" % (self.helper.tasknumber_current, self.helper.tasknumber_total)
+ else:
+ content = "Currently %2s running tasks (%s of %s)" % (len(activetasks), self.helper.tasknumber_current, self.helper.tasknumber_total)
+ maxtask = self.helper.tasknumber_total
+ if not self.main_progress or self.main_progress.maxval != maxtask:
+ widgets = [' ', progressbar.Percentage(), ' ', progressbar.Bar()]
+ self.main_progress = BBProgress("Running tasks", maxtask, widgets=widgets, resize_handler=self.sigwinch_handle)
+ self.main_progress.start(False)
+ self.main_progress.setmessage(content)
+ progress = self.helper.tasknumber_current - 1
+ if progress < 0:
+ progress = 0
+ content = self.main_progress.update(progress)
+ print('')
+ lines = 1 + int(len(content) / (self.columns + 1))
+ if self.quiet == 0:
+ for tasknum, task in enumerate(tasks[:(self.rows - 2)]):
+ if isinstance(task, tuple):
+ pbar, progress, rate, start_time = task
+ if not pbar.start_time:
+ pbar.start(False)
+ if start_time:
+ pbar.start_time = start_time
+ pbar.setmessage('%s:%s' % (tasknum, pbar.msg.split(':', 1)[1]))
+ if progress > -1:
+ pbar.setextra(rate)
+ content = pbar.update(progress)
+ else:
+ content = pbar.update(1)
+ print('')
+ else:
+ content = "%s: %s" % (tasknum, task)
+ print(content)
+ lines = lines + 1 + int(len(content) / (self.columns + 1))
+ self.footer_present = lines
+ self.lastpids = runningpids[:]
+ self.lastcount = self.helper.tasknumber_current
+
+ def finish(self):
+ if self.stdinbackup:
+ fd = sys.stdin.fileno()
+ self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup)
+
+def print_event_log(event, includelogs, loglines, termfilter):
+ # FIXME refactor this out further
+ logfile = event.logfile
+ if logfile and os.path.exists(logfile):
+ termfilter.clearFooter()
+ bb.error("Logfile of failure stored in: %s" % logfile)
+ if includelogs and not event.errprinted:
+ print("Log data follows:")
+ f = open(logfile, "r")
+ lines = []
+ while True:
+ l = f.readline()
+ if l == '':
+ break
+ l = l.rstrip()
+ if loglines:
+ lines.append(' | %s' % l)
+ if len(lines) > int(loglines):
+ lines.pop(0)
+ else:
+ print('| %s' % l)
+ f.close()
+ if lines:
+ for line in lines:
+ print(line)
+
+def _log_settings_from_server(server, observe_only):
+ # Get values of variables which control our output
+ includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
+ if error:
+ logger.error("Unable to get the value of BBINCLUDELOGS variable: %s" % error)
+ raise BaseException(error)
+ loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
+ if error:
+ logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error)
+ raise BaseException(error)
+ if observe_only:
+ cmd = 'getVariable'
+ else:
+ cmd = 'getSetVariable'
+ consolelogfile, error = server.runCommand([cmd, "BB_CONSOLELOG"])
+ if error:
+ logger.error("Unable to get the value of BB_CONSOLELOG variable: %s" % error)
+ raise BaseException(error)
+ return includelogs, loglines, consolelogfile
+
+_evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.LogRecord",
+ "bb.build.TaskFailed", "bb.build.TaskBase", "bb.event.ParseStarted",
+ "bb.event.ParseProgress", "bb.event.ParseCompleted", "bb.event.CacheLoadStarted",
+ "bb.event.CacheLoadProgress", "bb.event.CacheLoadCompleted", "bb.command.CommandFailed",
+ "bb.command.CommandExit", "bb.command.CommandCompleted", "bb.cooker.CookerExit",
+ "bb.event.MultipleProviders", "bb.event.NoProvider", "bb.runqueue.sceneQueueTaskStarted",
+ "bb.runqueue.runQueueTaskStarted", "bb.runqueue.runQueueTaskFailed", "bb.runqueue.sceneQueueTaskFailed",
+ "bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent",
+ "bb.build.TaskProgress", "bb.event.ProcessStarted", "bb.event.ProcessProgress", "bb.event.ProcessFinished"]
+
+def main(server, eventHandler, params, tf = TerminalFilter):
+
+ if not params.observe_only:
+ params.updateToServer(server, os.environ.copy())
+
+ includelogs, loglines, consolelogfile = _log_settings_from_server(server, params.observe_only)
+
+ if sys.stdin.isatty() and sys.stdout.isatty():
+ log_exec_tty = True
+ else:
+ log_exec_tty = False
+
+ helper = uihelper.BBUIHelper()
+
+ console = logging.StreamHandler(sys.stdout)
+ errconsole = logging.StreamHandler(sys.stderr)
+ format_str = "%(levelname)s: %(message)s"
+ format = bb.msg.BBLogFormatter(format_str)
+ if params.options.quiet == 0:
+ forcelevel = None
+ elif params.options.quiet > 2:
+ forcelevel = bb.msg.BBLogFormatter.ERROR
+ else:
+ forcelevel = bb.msg.BBLogFormatter.WARNING
+ bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut, forcelevel)
+ bb.msg.addDefaultlogFilter(errconsole, bb.msg.BBLogFilterStdErr)
+ console.setFormatter(format)
+ errconsole.setFormatter(format)
+ if not bb.msg.has_console_handler(logger):
+ logger.addHandler(console)
+ logger.addHandler(errconsole)
+
+ bb.utils.set_process_name("KnottyUI")
+
+ if params.options.remote_server and params.options.kill_server:
+ server.terminateServer()
+ return
+
+ consolelog = None
+ if consolelogfile and not params.options.show_environment and not params.options.show_versions:
+ bb.utils.mkdirhier(os.path.dirname(consolelogfile))
+ conlogformat = bb.msg.BBLogFormatter(format_str)
+ consolelog = logging.FileHandler(consolelogfile)
+ bb.msg.addDefaultlogFilter(consolelog)
+ consolelog.setFormatter(conlogformat)
+ logger.addHandler(consolelog)
+ loglink = os.path.join(os.path.dirname(consolelogfile), 'console-latest.log')
+ bb.utils.remove(loglink)
+ try:
+ os.symlink(os.path.basename(consolelogfile), loglink)
+ except OSError:
+ pass
+
+ llevel, debug_domains = bb.msg.constructLogOptions()
+ server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
+
+ universe = False
+ if not params.observe_only:
+ params.updateFromServer(server)
+ cmdline = params.parseActions()
+ if not cmdline:
+ print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
+ return 1
+ if 'msg' in cmdline and cmdline['msg']:
+ logger.error(cmdline['msg'])
+ return 1
+ if cmdline['action'][0] == "buildTargets" and "universe" in cmdline['action'][1]:
+ universe = True
+
+ ret, error = server.runCommand(cmdline['action'])
+ if error:
+ logger.error("Command '%s' failed: %s" % (cmdline, error))
+ return 1
+ elif ret != True:
+ logger.error("Command '%s' failed: returned %s" % (cmdline, ret))
+ return 1
+
+
+ parseprogress = None
+ cacheprogress = None
+ main.shutdown = 0
+ interrupted = False
+ return_value = 0
+ errors = 0
+ warnings = 0
+ taskfailures = []
+
+ termfilter = tf(main, helper, console, errconsole, format, params.options.quiet)
+ atexit.register(termfilter.finish)
+
+ while True:
+ try:
+ event = eventHandler.waitEvent(0)
+ if event is None:
+ if main.shutdown > 1:
+ break
+ termfilter.updateFooter()
+ event = eventHandler.waitEvent(0.25)
+ if event is None:
+ continue
+ helper.eventHandler(event)
+ if isinstance(event, bb.runqueue.runQueueExitWait):
+ if not main.shutdown:
+ main.shutdown = 1
+ continue
+ if isinstance(event, bb.event.LogExecTTY):
+ if log_exec_tty:
+ tries = event.retries
+ while tries:
+ print("Trying to run: %s" % event.prog)
+ if os.system(event.prog) == 0:
+ break
+ time.sleep(event.sleep_delay)
+ tries -= 1
+ if tries:
+ continue
+ logger.warning(event.msg)
+ continue
+
+ if isinstance(event, logging.LogRecord):
+ if event.levelno >= format.ERROR:
+ errors = errors + 1
+ return_value = 1
+ elif event.levelno == format.WARNING:
+ warnings = warnings + 1
+
+ if event.taskpid != 0:
+ # For "normal" logging conditions, don't show note logs from tasks
+ # but do show them if the user has changed the default log level to
+ # include verbose/debug messages
+ if event.levelno <= format.NOTE and (event.levelno < llevel or (event.levelno == format.NOTE and llevel != format.VERBOSE)):
+ continue
+
+ # Prefix task messages with recipe/task
+ if event.taskpid in helper.running_tasks and event.levelno != format.PLAIN:
+ taskinfo = helper.running_tasks[event.taskpid]
+ event.msg = taskinfo['title'] + ': ' + event.msg
+ if hasattr(event, 'fn'):
+ event.msg = event.fn + ': ' + event.msg
+ logger.handle(event)
+ continue
+
+ if isinstance(event, bb.build.TaskFailedSilent):
+ logger.warning("Logfile for failed setscene task is %s" % event.logfile)
+ continue
+ if isinstance(event, bb.build.TaskFailed):
+ return_value = 1
+ print_event_log(event, includelogs, loglines, termfilter)
+ if isinstance(event, bb.build.TaskBase):
+ logger.info(event._message)
+ continue
+ if isinstance(event, bb.event.ParseStarted):
+ if params.options.quiet > 1:
+ continue
+ if event.total == 0:
+ continue
+ parseprogress = new_progress("Parsing recipes", event.total).start()
+ continue
+ if isinstance(event, bb.event.ParseProgress):
+ if params.options.quiet > 1:
+ continue
+ if parseprogress:
+ parseprogress.update(event.current)
+ else:
+ bb.warn("Got ParseProgress event for parsing that never started?")
+ continue
+ if isinstance(event, bb.event.ParseCompleted):
+ if params.options.quiet > 1:
+ continue
+ if not parseprogress:
+ continue
+ parseprogress.finish()
+ pasreprogress = None
+ if params.options.quiet == 0:
+ print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
+ % ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
+ continue
+
+ if isinstance(event, bb.event.CacheLoadStarted):
+ if params.options.quiet > 1:
+ continue
+ cacheprogress = new_progress("Loading cache", event.total).start()
+ continue
+ if isinstance(event, bb.event.CacheLoadProgress):
+ if params.options.quiet > 1:
+ continue
+ cacheprogress.update(event.current)
+ continue
+ if isinstance(event, bb.event.CacheLoadCompleted):
+ if params.options.quiet > 1:
+ continue
+ cacheprogress.finish()
+ if params.options.quiet == 0:
+ print("Loaded %d entries from dependency cache." % event.num_entries)
+ continue
+
+ if isinstance(event, bb.command.CommandFailed):
+ return_value = event.exitcode
+ if event.error:
+ errors = errors + 1
+ logger.error(str(event))
+ main.shutdown = 2
+ continue
+ if isinstance(event, bb.command.CommandExit):
+ if not return_value:
+ return_value = event.exitcode
+ continue
+ if isinstance(event, (bb.command.CommandCompleted, bb.cooker.CookerExit)):
+ main.shutdown = 2
+ continue
+ if isinstance(event, bb.event.MultipleProviders):
+ logger.info(str(event))
+ continue
+ if isinstance(event, bb.event.NoProvider):
+ # For universe builds, only show these as warnings, not errors
+ if not universe:
+ return_value = 1
+ errors = errors + 1
+ logger.error(str(event))
+ else:
+ logger.warning(str(event))
+ continue
+
+ if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
+ logger.info("Running setscene task %d of %d (%s)" % (event.stats.completed + event.stats.active + event.stats.failed + 1, event.stats.total, event.taskstring))
+ continue
+
+ if isinstance(event, bb.runqueue.runQueueTaskStarted):
+ if event.noexec:
+ tasktype = 'noexec task'
+ else:
+ tasktype = 'task'
+ logger.info("Running %s %d of %d (%s)",
+ tasktype,
+ event.stats.completed + event.stats.active +
+ event.stats.failed + 1,
+ event.stats.total, event.taskstring)
+ continue
+
+ if isinstance(event, bb.runqueue.runQueueTaskFailed):
+ return_value = 1
+ taskfailures.append(event.taskstring)
+ logger.error(str(event))
+ continue
+
+ if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
+ logger.warning(str(event))
+ continue
+
+ if isinstance(event, bb.event.DepTreeGenerated):
+ continue
+
+ if isinstance(event, bb.event.ProcessStarted):
+ if params.options.quiet > 1:
+ continue
+ parseprogress = new_progress(event.processname, event.total)
+ parseprogress.start(False)
+ continue
+ if isinstance(event, bb.event.ProcessProgress):
+ if params.options.quiet > 1:
+ continue
+ if parseprogress:
+ parseprogress.update(event.progress)
+ else:
+ bb.warn("Got ProcessProgress event for someting that never started?")
+ continue
+ if isinstance(event, bb.event.ProcessFinished):
+ if params.options.quiet > 1:
+ continue
+ if parseprogress:
+ parseprogress.finish()
+ parseprogress = None
+ continue
+
+ # ignore
+ if isinstance(event, (bb.event.BuildBase,
+ bb.event.MetadataEvent,
+ bb.event.StampUpdate,
+ bb.event.ConfigParsed,
+ bb.event.MultiConfigParsed,
+ bb.event.RecipeParsed,
+ bb.event.RecipePreFinalise,
+ bb.runqueue.runQueueEvent,
+ bb.event.OperationStarted,
+ bb.event.OperationCompleted,
+ bb.event.OperationProgress,
+ bb.event.DiskFull,
+ bb.event.HeartbeatEvent,
+ bb.build.TaskProgress)):
+ continue
+
+ logger.error("Unknown event: %s", event)
+
+ except EnvironmentError as ioerror:
+ termfilter.clearFooter()
+ # ignore interrupted io
+ if ioerror.args[0] == 4:
+ continue
+ sys.stderr.write(str(ioerror))
+ if not params.observe_only:
+ _, error = server.runCommand(["stateForceShutdown"])
+ main.shutdown = 2
+ except KeyboardInterrupt:
+ termfilter.clearFooter()
+ if params.observe_only:
+ print("\nKeyboard Interrupt, exiting observer...")
+ main.shutdown = 2
+ if not params.observe_only and main.shutdown == 1:
+ print("\nSecond Keyboard Interrupt, stopping...\n")
+ _, error = server.runCommand(["stateForceShutdown"])
+ if error:
+ logger.error("Unable to cleanly stop: %s" % error)
+ if not params.observe_only and main.shutdown == 0:
+ print("\nKeyboard Interrupt, closing down...\n")
+ interrupted = True
+ _, error = server.runCommand(["stateShutdown"])
+ if error:
+ logger.error("Unable to cleanly shutdown: %s" % error)
+ main.shutdown = main.shutdown + 1
+ pass
+ except Exception as e:
+ import traceback
+ sys.stderr.write(traceback.format_exc())
+ if not params.observe_only:
+ _, error = server.runCommand(["stateForceShutdown"])
+ main.shutdown = 2
+ return_value = 1
+ try:
+ termfilter.clearFooter()
+ summary = ""
+ if taskfailures:
+ summary += pluralise("\nSummary: %s task failed:",
+ "\nSummary: %s tasks failed:", len(taskfailures))
+ for failure in taskfailures:
+ summary += "\n %s" % failure
+ if warnings:
+ summary += pluralise("\nSummary: There was %s WARNING message shown.",
+ "\nSummary: There were %s WARNING messages shown.", warnings)
+ if return_value and errors:
+ summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
+ "\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
+ if summary and params.options.quiet == 0:
+ print(summary)
+
+ if interrupted:
+ print("Execution was interrupted, returning a non-zero exit code.")
+ if return_value == 0:
+ return_value = 1
+ except IOError as e:
+ import errno
+ if e.errno == errno.EPIPE:
+ pass
+
+ if consolelog:
+ logger.removeHandler(consolelog)
+ consolelog.close()
+
+ return return_value
diff --git a/poky/bitbake/lib/bb/ui/ncurses.py b/poky/bitbake/lib/bb/ui/ncurses.py
new file mode 100644
index 000000000..8690c529c
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/ncurses.py
@@ -0,0 +1,373 @@
+#
+# BitBake Curses UI Implementation
+#
+# Implements an ncurses frontend for the BitBake utility.
+#
+# Copyright (C) 2006 Michael 'Mickey' Lauer
+# Copyright (C) 2006-2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ We have the following windows:
+
+ 1.) Main Window: Shows what we are ultimately building and how far we are. Includes status bar
+ 2.) Thread Activity Window: Shows one status line for every concurrent bitbake thread.
+ 3.) Command Line Window: Contains an interactive command line where you can interact w/ Bitbake.
+
+ Basic window layout is like that:
+
+ |---------------------------------------------------------|
+ | <Main Window> | <Thread Activity Window> |
+ | | 0: foo do_compile complete|
+ | Building Gtk+-2.6.10 | 1: bar do_patch complete |
+ | Status: 60% | ... |
+ | | ... |
+ | | ... |
+ |---------------------------------------------------------|
+ |<Command Line Window> |
+ |>>> which virtual/kernel |
+ |openzaurus-kernel |
+ |>>> _ |
+ |---------------------------------------------------------|
+
+"""
+
+
+
+import logging
+import os, sys, itertools, time, subprocess
+
+try:
+ import curses
+except ImportError:
+ sys.exit("FATAL: The ncurses ui could not load the required curses python module.")
+
+import bb
+import xmlrpc.client
+from bb import ui
+from bb.ui import uihelper
+
+parsespin = itertools.cycle( r'|/-\\' )
+
+X = 0
+Y = 1
+WIDTH = 2
+HEIGHT = 3
+
+MAXSTATUSLENGTH = 32
+
+class NCursesUI:
+ """
+ NCurses UI Class
+ """
+ class Window:
+ """Base Window Class"""
+ def __init__( self, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ):
+ self.win = curses.newwin( height, width, y, x )
+ self.dimensions = ( x, y, width, height )
+ """
+ if curses.has_colors():
+ color = 1
+ curses.init_pair( color, fg, bg )
+ self.win.bkgdset( ord(' '), curses.color_pair(color) )
+ else:
+ self.win.bkgdset( ord(' '), curses.A_BOLD )
+ """
+ self.erase()
+ self.setScrolling()
+ self.win.noutrefresh()
+
+ def erase( self ):
+ self.win.erase()
+
+ def setScrolling( self, b = True ):
+ self.win.scrollok( b )
+ self.win.idlok( b )
+
+ def setBoxed( self ):
+ self.boxed = True
+ self.win.box()
+ self.win.noutrefresh()
+
+ def setText( self, x, y, text, *args ):
+ self.win.addstr( y, x, text, *args )
+ self.win.noutrefresh()
+
+ def appendText( self, text, *args ):
+ self.win.addstr( text, *args )
+ self.win.noutrefresh()
+
+ def drawHline( self, y ):
+ self.win.hline( y, 0, curses.ACS_HLINE, self.dimensions[WIDTH] )
+ self.win.noutrefresh()
+
+ class DecoratedWindow( Window ):
+ """Base class for windows with a box and a title bar"""
+ def __init__( self, title, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ):
+ NCursesUI.Window.__init__( self, x+1, y+3, width-2, height-4, fg, bg )
+ self.decoration = NCursesUI.Window( x, y, width, height, fg, bg )
+ self.decoration.setBoxed()
+ self.decoration.win.hline( 2, 1, curses.ACS_HLINE, width-2 )
+ self.setTitle( title )
+
+ def setTitle( self, title ):
+ self.decoration.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
+
+ #-------------------------------------------------------------------------#
+# class TitleWindow( Window ):
+ #-------------------------------------------------------------------------#
+# """Title Window"""
+# def __init__( self, x, y, width, height ):
+# NCursesUI.Window.__init__( self, x, y, width, height )
+# version = bb.__version__
+# title = "BitBake %s" % version
+# credit = "(C) 2003-2007 Team BitBake"
+# #self.win.hline( 2, 1, curses.ACS_HLINE, width-2 )
+# self.win.border()
+# self.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
+# self.setText( 1, 2, credit.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
+
+ #-------------------------------------------------------------------------#
+ class ThreadActivityWindow( DecoratedWindow ):
+ #-------------------------------------------------------------------------#
+ """Thread Activity Window"""
+ def __init__( self, x, y, width, height ):
+ NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height )
+
+ def setStatus( self, thread, text ):
+ line = "%02d: %s" % ( thread, text )
+ width = self.dimensions[WIDTH]
+ if ( len(line) > width ):
+ line = line[:width-3] + "..."
+ else:
+ line = line.ljust( width )
+ self.setText( 0, thread, line )
+
+ #-------------------------------------------------------------------------#
+ class MainWindow( DecoratedWindow ):
+ #-------------------------------------------------------------------------#
+ """Main Window"""
+ def __init__( self, x, y, width, height ):
+ self.StatusPosition = width - MAXSTATUSLENGTH
+ NCursesUI.DecoratedWindow.__init__( self, None, x, y, width, height )
+ curses.nl()
+
+ def setTitle( self, title ):
+ title = "BitBake %s" % bb.__version__
+ self.decoration.setText( 2, 1, title, curses.A_BOLD )
+ self.decoration.setText( self.StatusPosition - 8, 1, "Status:", curses.A_BOLD )
+
+ def setStatus(self, status):
+ while len(status) < MAXSTATUSLENGTH:
+ status = status + " "
+ self.decoration.setText( self.StatusPosition, 1, status, curses.A_BOLD )
+
+
+ #-------------------------------------------------------------------------#
+ class ShellOutputWindow( DecoratedWindow ):
+ #-------------------------------------------------------------------------#
+ """Interactive Command Line Output"""
+ def __init__( self, x, y, width, height ):
+ NCursesUI.DecoratedWindow.__init__( self, "Command Line Window", x, y, width, height )
+
+ #-------------------------------------------------------------------------#
+ class ShellInputWindow( Window ):
+ #-------------------------------------------------------------------------#
+ """Interactive Command Line Input"""
+ def __init__( self, x, y, width, height ):
+ NCursesUI.Window.__init__( self, x, y, width, height )
+
+# put that to the top again from curses.textpad import Textbox
+# self.textbox = Textbox( self.win )
+# t = threading.Thread()
+# t.run = self.textbox.edit
+# t.start()
+
+ #-------------------------------------------------------------------------#
+ def main(self, stdscr, server, eventHandler, params):
+ #-------------------------------------------------------------------------#
+ height, width = stdscr.getmaxyx()
+
+ # for now split it like that:
+ # MAIN_y + THREAD_y = 2/3 screen at the top
+ # MAIN_x = 2/3 left, THREAD_y = 1/3 right
+ # CLI_y = 1/3 of screen at the bottom
+ # CLI_x = full
+
+ main_left = 0
+ main_top = 0
+ main_height = ( height // 3 * 2 )
+ main_width = ( width // 3 ) * 2
+ clo_left = main_left
+ clo_top = main_top + main_height
+ clo_height = height - main_height - main_top - 1
+ clo_width = width
+ cli_left = main_left
+ cli_top = clo_top + clo_height
+ cli_height = 1
+ cli_width = width
+ thread_left = main_left + main_width
+ thread_top = main_top
+ thread_height = main_height
+ thread_width = width - main_width
+
+ #tw = self.TitleWindow( 0, 0, width, main_top )
+ mw = self.MainWindow( main_left, main_top, main_width, main_height )
+ taw = self.ThreadActivityWindow( thread_left, thread_top, thread_width, thread_height )
+ clo = self.ShellOutputWindow( clo_left, clo_top, clo_width, clo_height )
+ cli = self.ShellInputWindow( cli_left, cli_top, cli_width, cli_height )
+ cli.setText( 0, 0, "BB>" )
+
+ mw.setStatus("Idle")
+
+ helper = uihelper.BBUIHelper()
+ shutdown = 0
+
+ try:
+ params.updateFromServer(server)
+ cmdline = params.parseActions()
+ if not cmdline:
+ print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
+ return 1
+ if 'msg' in cmdline and cmdline['msg']:
+ logger.error(cmdline['msg'])
+ return 1
+ cmdline = cmdline['action']
+ ret, error = server.runCommand(cmdline)
+ if error:
+ print("Error running command '%s': %s" % (cmdline, error))
+ return
+ elif ret != True:
+ print("Couldn't get default commandlind! %s" % ret)
+ return
+ except xmlrpc.client.Fault as x:
+ print("XMLRPC Fault getting commandline:\n %s" % x)
+ return
+
+ exitflag = False
+ while not exitflag:
+ try:
+ event = eventHandler.waitEvent(0.25)
+ if not event:
+ continue
+
+ helper.eventHandler(event)
+ if isinstance(event, bb.build.TaskBase):
+ mw.appendText("NOTE: %s\n" % event._message)
+ if isinstance(event, logging.LogRecord):
+ mw.appendText(logging.getLevelName(event.levelno) + ': ' + event.getMessage() + '\n')
+
+ if isinstance(event, bb.event.CacheLoadStarted):
+ self.parse_total = event.total
+ if isinstance(event, bb.event.CacheLoadProgress):
+ x = event.current
+ y = self.parse_total
+ mw.setStatus("Loading Cache: %s [%2d %%]" % ( next(parsespin), x*100/y ) )
+ if isinstance(event, bb.event.CacheLoadCompleted):
+ mw.setStatus("Idle")
+ mw.appendText("Loaded %d entries from dependency cache.\n"
+ % ( event.num_entries))
+
+ if isinstance(event, bb.event.ParseStarted):
+ self.parse_total = event.total
+ if isinstance(event, bb.event.ParseProgress):
+ x = event.current
+ y = self.parse_total
+ mw.setStatus("Parsing Recipes: %s [%2d %%]" % ( next(parsespin), x*100/y ) )
+ if isinstance(event, bb.event.ParseCompleted):
+ mw.setStatus("Idle")
+ mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked.\n"
+ % ( event.cached, event.parsed, event.skipped, event.masked ))
+
+# if isinstance(event, bb.build.TaskFailed):
+# if event.logfile:
+# if data.getVar("BBINCLUDELOGS", d):
+# bb.error("log data follows (%s)" % logfile)
+# number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
+# if number_of_lines:
+# subprocess.check_call('tail -n%s %s' % (number_of_lines, logfile), shell=True)
+# else:
+# f = open(logfile, "r")
+# while True:
+# l = f.readline()
+# if l == '':
+# break
+# l = l.rstrip()
+# print '| %s' % l
+# f.close()
+# else:
+# bb.error("see log in %s" % logfile)
+
+ if isinstance(event, bb.command.CommandCompleted):
+ # stop so the user can see the result of the build, but
+ # also allow them to now exit with a single ^C
+ shutdown = 2
+ if isinstance(event, bb.command.CommandFailed):
+ mw.appendText(str(event))
+ time.sleep(2)
+ exitflag = True
+ if isinstance(event, bb.command.CommandExit):
+ exitflag = True
+ if isinstance(event, bb.cooker.CookerExit):
+ exitflag = True
+
+ if isinstance(event, bb.event.LogExecTTY):
+ mw.appendText('WARN: ' + event.msg + '\n')
+ if helper.needUpdate:
+ activetasks, failedtasks = helper.getTasks()
+ taw.erase()
+ taw.setText(0, 0, "")
+ if activetasks:
+ taw.appendText("Active Tasks:\n")
+ for task in activetasks.values():
+ taw.appendText(task["title"] + '\n')
+ if failedtasks:
+ taw.appendText("Failed Tasks:\n")
+ for task in failedtasks:
+ taw.appendText(task["title"] + '\n')
+
+ curses.doupdate()
+ except EnvironmentError as ioerror:
+ # ignore interrupted io
+ if ioerror.args[0] == 4:
+ pass
+
+ except KeyboardInterrupt:
+ if shutdown == 2:
+ mw.appendText("Third Keyboard Interrupt, exit.\n")
+ exitflag = True
+ if shutdown == 1:
+ mw.appendText("Second Keyboard Interrupt, stopping...\n")
+ _, error = server.runCommand(["stateForceShutdown"])
+ if error:
+ print("Unable to cleanly stop: %s" % error)
+ if shutdown == 0:
+ mw.appendText("Keyboard Interrupt, closing down...\n")
+ _, error = server.runCommand(["stateShutdown"])
+ if error:
+ print("Unable to cleanly shutdown: %s" % error)
+ shutdown = shutdown + 1
+ pass
+
+def main(server, eventHandler, params):
+ if not os.isatty(sys.stdout.fileno()):
+ print("FATAL: Unable to run 'ncurses' UI without a TTY.")
+ return
+ ui = NCursesUI()
+ try:
+ curses.wrapper(ui.main, server, eventHandler, params)
+ except:
+ import traceback
+ traceback.print_exc()
diff --git a/poky/bitbake/lib/bb/ui/taskexp.py b/poky/bitbake/lib/bb/ui/taskexp.py
new file mode 100644
index 000000000..0e8e9d4cf
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/taskexp.py
@@ -0,0 +1,328 @@
+#
+# BitBake Graphical GTK based Dependency Explorer
+#
+# Copyright (C) 2007 Ross Burton
+# Copyright (C) 2007 - 2008 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk, Gdk, GObject
+from multiprocessing import Queue
+import threading
+from xmlrpc import client
+import time
+import bb
+import bb.event
+
+# Package Model
+(COL_PKG_NAME) = (0)
+
+# Dependency Model
+(TYPE_DEP, TYPE_RDEP) = (0, 1)
+(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2)
+
+
+class PackageDepView(Gtk.TreeView):
+ def __init__(self, model, dep_type, label):
+ Gtk.TreeView.__init__(self)
+ self.current = None
+ self.dep_type = dep_type
+ self.filter_model = model.filter_new()
+ self.filter_model.set_visible_func(self._filter, data=None)
+ self.set_model(self.filter_model)
+ self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PACKAGE))
+
+ def _filter(self, model, iter, data):
+ this_type = model[iter][COL_DEP_TYPE]
+ package = model[iter][COL_DEP_PARENT]
+ if this_type != self.dep_type: return False
+ return package == self.current
+
+ def set_current_package(self, package):
+ self.current = package
+ self.filter_model.refilter()
+
+
+class PackageReverseDepView(Gtk.TreeView):
+ def __init__(self, model, label):
+ Gtk.TreeView.__init__(self)
+ self.current = None
+ self.filter_model = model.filter_new()
+ self.filter_model.set_visible_func(self._filter)
+ self.sort_model = self.filter_model.sort_new_with_model()
+ self.sort_model.set_sort_column_id(COL_DEP_PARENT, Gtk.SortType.ASCENDING)
+ self.set_model(self.sort_model)
+ self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PARENT))
+
+ def _filter(self, model, iter, data):
+ package = model[iter][COL_DEP_PACKAGE]
+ return package == self.current
+
+ def set_current_package(self, package):
+ self.current = package
+ self.filter_model.refilter()
+
+
+class DepExplorer(Gtk.Window):
+ def __init__(self):
+ Gtk.Window.__init__(self)
+ self.set_title("Task Dependency Explorer")
+ self.set_default_size(500, 500)
+ self.connect("delete-event", Gtk.main_quit)
+
+ # Create the data models
+ self.pkg_model = Gtk.ListStore(GObject.TYPE_STRING)
+ self.pkg_model.set_sort_column_id(COL_PKG_NAME, Gtk.SortType.ASCENDING)
+ self.depends_model = Gtk.ListStore(GObject.TYPE_INT, GObject.TYPE_STRING, GObject.TYPE_STRING)
+ self.depends_model.set_sort_column_id(COL_DEP_PACKAGE, Gtk.SortType.ASCENDING)
+
+ pane = Gtk.HPaned()
+ pane.set_position(250)
+ self.add(pane)
+
+ # The master list of packages
+ scrolled = Gtk.ScrolledWindow()
+ scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
+ scrolled.set_shadow_type(Gtk.ShadowType.IN)
+
+ self.pkg_treeview = Gtk.TreeView(self.pkg_model)
+ self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
+ column = Gtk.TreeViewColumn("Package", Gtk.CellRendererText(), text=COL_PKG_NAME)
+ self.pkg_treeview.append_column(column)
+ pane.add1(scrolled)
+ scrolled.add(self.pkg_treeview)
+
+ box = Gtk.VBox(homogeneous=True, spacing=4)
+
+ # Task Depends
+ scrolled = Gtk.ScrolledWindow()
+ scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
+ scrolled.set_shadow_type(Gtk.ShadowType.IN)
+ self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Dependencies")
+ self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
+ scrolled.add(self.dep_treeview)
+ box.add(scrolled)
+ pane.add2(box)
+
+ # Reverse Task Depends
+ scrolled = Gtk.ScrolledWindow()
+ scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
+ scrolled.set_shadow_type(Gtk.ShadowType.IN)
+ self.revdep_treeview = PackageReverseDepView(self.depends_model, "Dependent Tasks")
+ self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
+ scrolled.add(self.revdep_treeview)
+ box.add(scrolled)
+ pane.add2(box)
+
+ self.show_all()
+
+ def on_package_activated(self, treeview, path, column, data_col):
+ model = treeview.get_model()
+ package = model.get_value(model.get_iter(path), data_col)
+
+ pkg_path = []
+ def finder(model, path, iter, needle):
+ package = model.get_value(iter, COL_PKG_NAME)
+ if package == needle:
+ pkg_path.append(path)
+ return True
+ else:
+ return False
+ self.pkg_model.foreach(finder, package)
+ if pkg_path:
+ self.pkg_treeview.get_selection().select_path(pkg_path[0])
+ self.pkg_treeview.scroll_to_cell(pkg_path[0])
+
+ def on_cursor_changed(self, selection):
+ (model, it) = selection.get_selected()
+ if it is None:
+ current_package = None
+ else:
+ current_package = model.get_value(it, COL_PKG_NAME)
+ self.dep_treeview.set_current_package(current_package)
+ self.revdep_treeview.set_current_package(current_package)
+
+
+ def parse(self, depgraph):
+ for task in depgraph["tdepends"]:
+ self.pkg_model.insert(0, (task,))
+ for depend in depgraph["tdepends"][task]:
+ self.depends_model.insert (0, (TYPE_DEP, task, depend))
+
+
+class gtkthread(threading.Thread):
+ quit = threading.Event()
+ def __init__(self, shutdown):
+ threading.Thread.__init__(self)
+ self.setDaemon(True)
+ self.shutdown = shutdown
+ if not Gtk.init_check()[0]:
+ sys.stderr.write("Gtk+ init failed. Make sure DISPLAY variable is set.\n")
+ gtkthread.quit.set()
+
+ def run(self):
+ GObject.threads_init()
+ Gdk.threads_init()
+ Gtk.main()
+ gtkthread.quit.set()
+
+
+def main(server, eventHandler, params):
+ shutdown = 0
+
+ gtkgui = gtkthread(shutdown)
+ gtkgui.start()
+
+ try:
+ params.updateFromServer(server)
+ cmdline = params.parseActions()
+ if not cmdline:
+ print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
+ return 1
+ if 'msg' in cmdline and cmdline['msg']:
+ print(cmdline['msg'])
+ return 1
+ cmdline = cmdline['action']
+ if not cmdline or cmdline[0] != "generateDotGraph":
+ print("This UI requires the -g option")
+ return 1
+ ret, error = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]])
+ if error:
+ print("Error running command '%s': %s" % (cmdline, error))
+ return 1
+ elif ret != True:
+ print("Error running command '%s': returned %s" % (cmdline, ret))
+ return 1
+ except client.Fault as x:
+ print("XMLRPC Fault getting commandline:\n %s" % x)
+ return
+
+ if gtkthread.quit.isSet():
+ return
+
+ Gdk.threads_enter()
+ dep = DepExplorer()
+ bardialog = Gtk.Dialog(parent=dep,
+ flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT)
+ bardialog.set_default_size(400, 50)
+ box = bardialog.get_content_area()
+ pbar = Gtk.ProgressBar()
+ box.pack_start(pbar, True, True, 0)
+ bardialog.show_all()
+ bardialog.connect("delete-event", Gtk.main_quit)
+ Gdk.threads_leave()
+
+ progress_total = 0
+ while True:
+ try:
+ event = eventHandler.waitEvent(0.25)
+ if gtkthread.quit.isSet():
+ _, error = server.runCommand(["stateForceShutdown"])
+ if error:
+ print('Unable to cleanly stop: %s' % error)
+ break
+
+ if event is None:
+ continue
+
+ if isinstance(event, bb.event.CacheLoadStarted):
+ progress_total = event.total
+ Gdk.threads_enter()
+ bardialog.set_title("Loading Cache")
+ pbar.set_fraction(0.0)
+ Gdk.threads_leave()
+
+ if isinstance(event, bb.event.CacheLoadProgress):
+ x = event.current
+ Gdk.threads_enter()
+ pbar.set_fraction(x * 1.0 / progress_total)
+ Gdk.threads_leave()
+ continue
+
+ if isinstance(event, bb.event.CacheLoadCompleted):
+ continue
+
+ if isinstance(event, bb.event.ParseStarted):
+ progress_total = event.total
+ if progress_total == 0:
+ continue
+ Gdk.threads_enter()
+ pbar.set_fraction(0.0)
+ bardialog.set_title("Processing recipes")
+ Gdk.threads_leave()
+
+ if isinstance(event, bb.event.ParseProgress):
+ x = event.current
+ Gdk.threads_enter()
+ pbar.set_fraction(x * 1.0 / progress_total)
+ Gdk.threads_leave()
+ continue
+
+ if isinstance(event, bb.event.ParseCompleted):
+ Gdk.threads_enter()
+ bardialog.set_title("Generating dependency tree")
+ Gdk.threads_leave()
+ continue
+
+ if isinstance(event, bb.event.DepTreeGenerated):
+ Gdk.threads_enter()
+ bardialog.hide()
+ dep.parse(event._depgraph)
+ Gdk.threads_leave()
+
+ if isinstance(event, bb.command.CommandCompleted):
+ continue
+
+ if isinstance(event, bb.event.NoProvider):
+ print(str(event))
+
+ _, error = server.runCommand(["stateShutdown"])
+ if error:
+ print('Unable to cleanly shutdown: %s' % error)
+ break
+
+ if isinstance(event, bb.command.CommandFailed):
+ print(str(event))
+ return event.exitcode
+
+ if isinstance(event, bb.command.CommandExit):
+ return event.exitcode
+
+ if isinstance(event, bb.cooker.CookerExit):
+ break
+
+ continue
+ except EnvironmentError as ioerror:
+ # ignore interrupted io
+ if ioerror.args[0] == 4:
+ pass
+ except KeyboardInterrupt:
+ if shutdown == 2:
+ print("\nThird Keyboard Interrupt, exit.\n")
+ break
+ if shutdown == 1:
+ print("\nSecond Keyboard Interrupt, stopping...\n")
+ _, error = server.runCommand(["stateForceShutdown"])
+ if error:
+ print('Unable to cleanly stop: %s' % error)
+ if shutdown == 0:
+ print("\nKeyboard Interrupt, closing down...\n")
+ _, error = server.runCommand(["stateShutdown"])
+ if error:
+ print('Unable to cleanly shutdown: %s' % error)
+ shutdown = shutdown + 1
+ pass
diff --git a/poky/bitbake/lib/bb/ui/toasterui.py b/poky/bitbake/lib/bb/ui/toasterui.py
new file mode 100644
index 000000000..88cec3759
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/toasterui.py
@@ -0,0 +1,487 @@
+#
+# BitBake ToasterUI Implementation
+# based on (No)TTY UI Implementation by Richard Purdie
+#
+# Handling output to TTYs or files (no TTY)
+#
+# Copyright (C) 2006-2012 Richard Purdie
+# Copyright (C) 2013 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from __future__ import division
+import time
+import sys
+try:
+ import bb
+except RuntimeError as exc:
+ sys.exit(str(exc))
+
+from bb.ui import uihelper
+from bb.ui.buildinfohelper import BuildInfoHelper
+
+import bb.msg
+import logging
+import os
+
+# pylint: disable=invalid-name
+# module properties for UI modules are read by bitbake and the contract should not be broken
+
+
+featureSet = [bb.cooker.CookerFeatures.HOB_EXTRA_CACHES, bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING, bb.cooker.CookerFeatures.SEND_SANITYEVENTS]
+
+logger = logging.getLogger("ToasterLogger")
+interactive = sys.stdout.isatty()
+
+def _log_settings_from_server(server):
+ # Get values of variables which control our output
+ includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
+ if error:
+ logger.error("Unable to get the value of BBINCLUDELOGS variable: %s", error)
+ raise BaseException(error)
+ loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
+ if error:
+ logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s", error)
+ raise BaseException(error)
+ consolelogfile, error = server.runCommand(["getVariable", "BB_CONSOLELOG"])
+ if error:
+ logger.error("Unable to get the value of BB_CONSOLELOG variable: %s", error)
+ raise BaseException(error)
+ return consolelogfile
+
+# create a log file for a single build and direct the logger at it;
+# log file name is timestamped to the millisecond (depending
+# on system clock accuracy) to ensure it doesn't overlap with
+# other log file names
+#
+# returns (log file, path to log file) for a build
+def _open_build_log(log_dir):
+ format_str = "%(levelname)s: %(message)s"
+
+ now = time.time()
+ now_ms = int((now - int(now)) * 1000)
+ time_str = time.strftime('build_%Y%m%d_%H%M%S', time.localtime(now))
+ log_file_name = time_str + ('.%d.log' % now_ms)
+ build_log_file_path = os.path.join(log_dir, log_file_name)
+
+ build_log = logging.FileHandler(build_log_file_path)
+
+ logformat = bb.msg.BBLogFormatter(format_str)
+ build_log.setFormatter(logformat)
+
+ bb.msg.addDefaultlogFilter(build_log)
+ logger.addHandler(build_log)
+
+ return (build_log, build_log_file_path)
+
+# stop logging to the build log if it exists
+def _close_build_log(build_log):
+ if build_log:
+ build_log.flush()
+ build_log.close()
+ logger.removeHandler(build_log)
+
+_evt_list = [
+ "bb.build.TaskBase",
+ "bb.build.TaskFailed",
+ "bb.build.TaskFailedSilent",
+ "bb.build.TaskStarted",
+ "bb.build.TaskSucceeded",
+ "bb.command.CommandCompleted",
+ "bb.command.CommandExit",
+ "bb.command.CommandFailed",
+ "bb.cooker.CookerExit",
+ "bb.event.BuildInit",
+ "bb.event.BuildCompleted",
+ "bb.event.BuildStarted",
+ "bb.event.CacheLoadCompleted",
+ "bb.event.CacheLoadProgress",
+ "bb.event.CacheLoadStarted",
+ "bb.event.ConfigParsed",
+ "bb.event.DepTreeGenerated",
+ "bb.event.LogExecTTY",
+ "bb.event.MetadataEvent",
+ "bb.event.MultipleProviders",
+ "bb.event.NoProvider",
+ "bb.event.ParseCompleted",
+ "bb.event.ParseProgress",
+ "bb.event.ParseStarted",
+ "bb.event.RecipeParsed",
+ "bb.event.SanityCheck",
+ "bb.event.SanityCheckPassed",
+ "bb.event.TreeDataPreparationCompleted",
+ "bb.event.TreeDataPreparationStarted",
+ "bb.runqueue.runQueueTaskCompleted",
+ "bb.runqueue.runQueueTaskFailed",
+ "bb.runqueue.runQueueTaskSkipped",
+ "bb.runqueue.runQueueTaskStarted",
+ "bb.runqueue.sceneQueueTaskCompleted",
+ "bb.runqueue.sceneQueueTaskFailed",
+ "bb.runqueue.sceneQueueTaskStarted",
+ "logging.LogRecord"]
+
+def main(server, eventHandler, params):
+ # set to a logging.FileHandler instance when a build starts;
+ # see _open_build_log()
+ build_log = None
+
+ # set to the log path when a build starts
+ build_log_file_path = None
+
+ helper = uihelper.BBUIHelper()
+
+ # TODO don't use log output to determine when bitbake has started
+ #
+ # WARNING: this log handler cannot be removed, as localhostbecontroller
+ # relies on output in the toaster_ui.log file to determine whether
+ # the bitbake server has started, which only happens if
+ # this logger is setup here (see the TODO in the loop below)
+ console = logging.StreamHandler(sys.stdout)
+ format_str = "%(levelname)s: %(message)s"
+ formatter = bb.msg.BBLogFormatter(format_str)
+ bb.msg.addDefaultlogFilter(console)
+ console.setFormatter(formatter)
+ logger.addHandler(console)
+ logger.setLevel(logging.INFO)
+ llevel, debug_domains = bb.msg.constructLogOptions()
+ result, error = server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
+ if not result or error:
+ logger.error("can't set event mask: %s", error)
+ return 1
+
+ # verify and warn
+ build_history_enabled = True
+ inheritlist, _ = server.runCommand(["getVariable", "INHERIT"])
+
+ if not "buildhistory" in inheritlist.split(" "):
+ logger.warning("buildhistory is not enabled. Please enable INHERIT += \"buildhistory\" to see image details.")
+ build_history_enabled = False
+
+ if not "buildstats" in inheritlist.split(" "):
+ logger.warning("buildstats is not enabled. Please enable INHERIT += \"buildstats\" to generate build statistics.")
+
+ if not params.observe_only:
+ params.updateFromServer(server)
+ params.updateToServer(server, os.environ.copy())
+ cmdline = params.parseActions()
+ if not cmdline:
+ print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
+ return 1
+ if 'msg' in cmdline and cmdline['msg']:
+ logger.error(cmdline['msg'])
+ return 1
+
+ ret, error = server.runCommand(cmdline['action'])
+ if error:
+ logger.error("Command '%s' failed: %s" % (cmdline, error))
+ return 1
+ elif ret != True:
+ logger.error("Command '%s' failed: returned %s" % (cmdline, ret))
+ return 1
+
+ # set to 1 when toasterui needs to shut down
+ main.shutdown = 0
+
+ interrupted = False
+ return_value = 0
+ errors = 0
+ warnings = 0
+ taskfailures = []
+ first = True
+
+ buildinfohelper = BuildInfoHelper(server, build_history_enabled,
+ os.getenv('TOASTER_BRBE'))
+
+ # write our own log files into bitbake's log directory;
+ # we're only interested in the path to the parent directory of
+ # this file, as we're writing our own logs into the same directory
+ consolelogfile = _log_settings_from_server(server)
+ log_dir = os.path.dirname(consolelogfile)
+ bb.utils.mkdirhier(log_dir)
+
+ while True:
+ try:
+ event = eventHandler.waitEvent(0.25)
+ if first:
+ first = False
+
+ # TODO don't use log output to determine when bitbake has started
+ #
+ # this is the line localhostbecontroller needs to
+ # see in toaster_ui.log which it uses to decide whether
+ # the bitbake server has started...
+ logger.info("ToasterUI waiting for events")
+
+ if event is None:
+ if main.shutdown > 0:
+ # if shutting down, close any open build log first
+ _close_build_log(build_log)
+
+ break
+ continue
+
+ helper.eventHandler(event)
+
+ # pylint: disable=protected-access
+ # the code will look into the protected variables of the event; no easy way around this
+
+ if isinstance(event, bb.event.HeartbeatEvent):
+ continue
+
+ if isinstance(event, bb.event.ParseStarted):
+ if not (build_log and build_log_file_path):
+ build_log, build_log_file_path = _open_build_log(log_dir)
+
+ buildinfohelper.store_started_build()
+ buildinfohelper.save_build_log_file_path(build_log_file_path)
+ buildinfohelper.set_recipes_to_parse(event.total)
+ continue
+
+ # create a build object in buildinfohelper from either BuildInit
+ # (if available) or BuildStarted (for jethro and previous versions)
+ if isinstance(event, (bb.event.BuildStarted, bb.event.BuildInit)):
+ if not (build_log and build_log_file_path):
+ build_log, build_log_file_path = _open_build_log(log_dir)
+
+ buildinfohelper.save_build_targets(event)
+ buildinfohelper.save_build_log_file_path(build_log_file_path)
+
+ # get additional data from BuildStarted
+ if isinstance(event, bb.event.BuildStarted):
+ buildinfohelper.save_build_layers_and_variables()
+ continue
+
+ if isinstance(event, bb.event.ParseProgress):
+ buildinfohelper.set_recipes_parsed(event.current)
+ continue
+
+ if isinstance(event, bb.event.ParseCompleted):
+ buildinfohelper.set_recipes_parsed(event.total)
+ continue
+
+ if isinstance(event, (bb.build.TaskStarted, bb.build.TaskSucceeded, bb.build.TaskFailedSilent)):
+ buildinfohelper.update_and_store_task(event)
+ logger.info("Logfile for task %s", event.logfile)
+ continue
+
+ if isinstance(event, bb.build.TaskBase):
+ logger.info(event._message)
+
+ if isinstance(event, bb.event.LogExecTTY):
+ logger.info(event.msg)
+ continue
+
+ if isinstance(event, logging.LogRecord):
+ if event.levelno == -1:
+ event.levelno = formatter.ERROR
+
+ buildinfohelper.store_log_event(event)
+
+ if event.levelno >= formatter.ERROR:
+ errors = errors + 1
+ elif event.levelno == formatter.WARNING:
+ warnings = warnings + 1
+
+ # For "normal" logging conditions, don't show note logs from tasks
+ # but do show them if the user has changed the default log level to
+ # include verbose/debug messages
+ if event.taskpid != 0 and event.levelno <= formatter.NOTE:
+ continue
+
+ logger.handle(event)
+ continue
+
+ if isinstance(event, bb.build.TaskFailed):
+ buildinfohelper.update_and_store_task(event)
+ logfile = event.logfile
+ if logfile and os.path.exists(logfile):
+ bb.error("Logfile of failure stored in: %s" % logfile)
+ continue
+
+ # these events are unprocessed now, but may be used in the future to log
+ # timing and error informations from the parsing phase in Toaster
+ if isinstance(event, (bb.event.SanityCheckPassed, bb.event.SanityCheck)):
+ continue
+ if isinstance(event, bb.event.CacheLoadStarted):
+ continue
+ if isinstance(event, bb.event.CacheLoadProgress):
+ continue
+ if isinstance(event, bb.event.CacheLoadCompleted):
+ continue
+ if isinstance(event, bb.event.MultipleProviders):
+ logger.info(str(event))
+ continue
+
+ if isinstance(event, bb.event.NoProvider):
+ errors = errors + 1
+ text = str(event)
+ logger.error(text)
+ buildinfohelper.store_log_error(text)
+ continue
+
+ if isinstance(event, bb.event.ConfigParsed):
+ continue
+ if isinstance(event, bb.event.RecipeParsed):
+ continue
+
+ # end of saved events
+
+ if isinstance(event, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted, bb.runqueue.runQueueTaskSkipped)):
+ buildinfohelper.store_started_task(event)
+ continue
+
+ if isinstance(event, bb.runqueue.runQueueTaskCompleted):
+ buildinfohelper.update_and_store_task(event)
+ continue
+
+ if isinstance(event, bb.runqueue.runQueueTaskFailed):
+ buildinfohelper.update_and_store_task(event)
+ taskfailures.append(event.taskstring)
+ logger.error(str(event))
+ continue
+
+ if isinstance(event, (bb.runqueue.sceneQueueTaskCompleted, bb.runqueue.sceneQueueTaskFailed)):
+ buildinfohelper.update_and_store_task(event)
+ continue
+
+
+ if isinstance(event, (bb.event.TreeDataPreparationStarted, bb.event.TreeDataPreparationCompleted)):
+ continue
+
+ if isinstance(event, (bb.event.BuildCompleted, bb.command.CommandFailed)):
+
+ errorcode = 0
+ if isinstance(event, bb.command.CommandFailed):
+ errors += 1
+ errorcode = 1
+ logger.error(str(event))
+ elif isinstance(event, bb.event.BuildCompleted):
+ buildinfohelper.scan_image_artifacts()
+ buildinfohelper.clone_required_sdk_artifacts()
+
+ # turn off logging to the current build log
+ _close_build_log(build_log)
+
+ # reset ready for next BuildStarted
+ build_log = None
+
+ # update the build info helper on BuildCompleted, not on CommandXXX
+ buildinfohelper.update_build_information(event, errors, warnings, taskfailures)
+
+ brbe = buildinfohelper.brbe
+ buildinfohelper.close(errorcode)
+
+ # we start a new build info
+ if params.observe_only:
+ logger.debug("ToasterUI prepared for new build")
+ errors = 0
+ warnings = 0
+ taskfailures = []
+ buildinfohelper = BuildInfoHelper(server, build_history_enabled)
+ else:
+ main.shutdown = 1
+
+ logger.info("ToasterUI build done, brbe: %s", brbe)
+ continue
+
+ if isinstance(event, (bb.command.CommandCompleted,
+ bb.command.CommandFailed,
+ bb.command.CommandExit)):
+ if params.observe_only:
+ errorcode = 0
+ else:
+ main.shutdown = 1
+
+ continue
+
+ if isinstance(event, bb.event.MetadataEvent):
+ if event.type == "SinglePackageInfo":
+ buildinfohelper.store_build_package_information(event)
+ elif event.type == "LayerInfo":
+ buildinfohelper.store_layer_info(event)
+ elif event.type == "BuildStatsList":
+ buildinfohelper.store_tasks_stats(event)
+ elif event.type == "ImagePkgList":
+ buildinfohelper.store_target_package_data(event)
+ elif event.type == "MissedSstate":
+ buildinfohelper.store_missed_state_tasks(event)
+ elif event.type == "SDKArtifactInfo":
+ buildinfohelper.scan_sdk_artifacts(event)
+ elif event.type == "SetBRBE":
+ buildinfohelper.brbe = buildinfohelper._get_data_from_event(event)
+ elif event.type == "TaskArtifacts":
+ buildinfohelper.scan_task_artifacts(event)
+ elif event.type == "OSErrorException":
+ logger.error(event)
+ else:
+ logger.error("Unprocessed MetadataEvent %s", event.type)
+ continue
+
+ if isinstance(event, bb.cooker.CookerExit):
+ # shutdown when bitbake server shuts down
+ main.shutdown = 1
+ continue
+
+ if isinstance(event, bb.event.DepTreeGenerated):
+ buildinfohelper.store_dependency_information(event)
+ continue
+
+ logger.warning("Unknown event: %s", event)
+ return_value += 1
+
+ except EnvironmentError as ioerror:
+ logger.warning("EnvironmentError: %s" % ioerror)
+ # ignore interrupted io system calls
+ if ioerror.args[0] == 4: # errno 4 is EINTR
+ logger.warning("Skipped EINTR: %s" % ioerror)
+ else:
+ raise
+ except KeyboardInterrupt:
+ if params.observe_only:
+ print("\nKeyboard Interrupt, exiting observer...")
+ main.shutdown = 2
+ if not params.observe_only and main.shutdown == 1:
+ print("\nSecond Keyboard Interrupt, stopping...\n")
+ _, error = server.runCommand(["stateForceShutdown"])
+ if error:
+ logger.error("Unable to cleanly stop: %s" % error)
+ if not params.observe_only and main.shutdown == 0:
+ print("\nKeyboard Interrupt, closing down...\n")
+ interrupted = True
+ _, error = server.runCommand(["stateShutdown"])
+ if error:
+ logger.error("Unable to cleanly shutdown: %s" % error)
+ buildinfohelper.cancel_cli_build()
+ main.shutdown = main.shutdown + 1
+ except Exception as e:
+ # print errors to log
+ import traceback
+ from pprint import pformat
+ exception_data = traceback.format_exc()
+ logger.error("%s\n%s" , e, exception_data)
+
+ # save them to database, if possible; if it fails, we already logged to console.
+ try:
+ buildinfohelper.store_log_exception("%s\n%s" % (str(e), exception_data))
+ except Exception as ce:
+ logger.error("CRITICAL - Failed to to save toaster exception to the database: %s", str(ce))
+
+ # make sure we return with an error
+ return_value += 1
+
+ if interrupted and return_value == 0:
+ return_value += 1
+
+ logger.warning("Return value is %d", return_value)
+ return return_value
diff --git a/poky/bitbake/lib/bb/ui/uievent.py b/poky/bitbake/lib/bb/ui/uievent.py
new file mode 100644
index 000000000..9542b911c
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/uievent.py
@@ -0,0 +1,161 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+"""
+Use this class to fork off a thread to recieve event callbacks from the bitbake
+server and queue them for the UI to process. This process must be used to avoid
+client/server deadlocks.
+"""
+
+import socket, threading, pickle, collections
+from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
+
+class BBUIEventQueue:
+ def __init__(self, BBServer, clientinfo=("localhost, 0")):
+
+ self.eventQueue = []
+ self.eventQueueLock = threading.Lock()
+ self.eventQueueNotify = threading.Event()
+
+ self.BBServer = BBServer
+ self.clientinfo = clientinfo
+
+ server = UIXMLRPCServer(self.clientinfo)
+ self.host, self.port = server.socket.getsockname()
+
+ server.register_function( self.system_quit, "event.quit" )
+ server.register_function( self.send_event, "event.sendpickle" )
+ server.socket.settimeout(1)
+
+ self.EventHandle = None
+
+ # the event handler registration may fail here due to cooker being in invalid state
+ # this is a transient situation, and we should retry a couple of times before
+ # giving up
+
+ for count_tries in range(5):
+ ret = self.BBServer.registerEventHandler(self.host, self.port)
+
+ if isinstance(ret, collections.Iterable):
+ self.EventHandle, error = ret
+ else:
+ self.EventHandle = ret
+ error = ""
+
+ if self.EventHandle != None:
+ break
+
+ errmsg = "Could not register UI event handler. Error: %s, host %s, "\
+ "port %d" % (error, self.host, self.port)
+ bb.warn("%s, retry" % errmsg)
+
+ import time
+ time.sleep(1)
+ else:
+ raise Exception(errmsg)
+
+ self.server = server
+
+ self.t = threading.Thread()
+ self.t.setDaemon(True)
+ self.t.run = self.startCallbackHandler
+ self.t.start()
+
+ def getEvent(self):
+
+ self.eventQueueLock.acquire()
+
+ if len(self.eventQueue) == 0:
+ self.eventQueueLock.release()
+ return None
+
+ item = self.eventQueue.pop(0)
+
+ if len(self.eventQueue) == 0:
+ self.eventQueueNotify.clear()
+
+ self.eventQueueLock.release()
+ return item
+
+ def waitEvent(self, delay):
+ self.eventQueueNotify.wait(delay)
+ return self.getEvent()
+
+ def queue_event(self, event):
+ self.eventQueueLock.acquire()
+ self.eventQueue.append(event)
+ self.eventQueueNotify.set()
+ self.eventQueueLock.release()
+
+ def send_event(self, event):
+ self.queue_event(pickle.loads(event))
+
+ def startCallbackHandler(self):
+
+ self.server.timeout = 1
+ bb.utils.set_process_name("UIEventQueue")
+ while not self.server.quit:
+ try:
+ self.server.handle_request()
+ except Exception as e:
+ import traceback
+ logger.error("BBUIEventQueue.startCallbackHandler: Exception while trying to handle request: %s\n%s" % (e, traceback.format_exc()))
+
+ self.server.server_close()
+
+ def system_quit( self ):
+ """
+ Shut down the callback thread
+ """
+ try:
+ self.BBServer.unregisterEventHandler(self.EventHandle)
+ except:
+ pass
+ self.server.quit = True
+
+class UIXMLRPCServer (SimpleXMLRPCServer):
+
+ def __init__( self, interface ):
+ self.quit = False
+ SimpleXMLRPCServer.__init__( self,
+ interface,
+ requestHandler=SimpleXMLRPCRequestHandler,
+ logRequests=False, allow_none=True, use_builtin_types=True)
+
+ def get_request(self):
+ while not self.quit:
+ try:
+ sock, addr = self.socket.accept()
+ sock.settimeout(1)
+ return (sock, addr)
+ except socket.timeout:
+ pass
+ return (None, None)
+
+ def close_request(self, request):
+ if request is None:
+ return
+ SimpleXMLRPCServer.close_request(self, request)
+
+ def process_request(self, request, client_address):
+ if request is None:
+ return
+ SimpleXMLRPCServer.process_request(self, request, client_address)
+
diff --git a/poky/bitbake/lib/bb/ui/uihelper.py b/poky/bitbake/lib/bb/ui/uihelper.py
new file mode 100644
index 000000000..963c1ea2d
--- /dev/null
+++ b/poky/bitbake/lib/bb/ui/uihelper.py
@@ -0,0 +1,70 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2007 Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import bb.build
+import time
+
+class BBUIHelper:
+ def __init__(self):
+ self.needUpdate = False
+ self.running_tasks = {}
+ # Running PIDs preserves the order tasks were executed in
+ self.running_pids = []
+ self.failed_tasks = []
+ self.tasknumber_current = 0
+ self.tasknumber_total = 0
+
+ def eventHandler(self, event):
+ if isinstance(event, bb.build.TaskStarted):
+ if event._mc != "default":
+ self.running_tasks[event.pid] = { 'title' : "mc:%s:%s %s" % (event._mc, event._package, event._task), 'starttime' : time.time() }
+ else:
+ self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() }
+ self.running_pids.append(event.pid)
+ self.needUpdate = True
+ elif isinstance(event, bb.build.TaskSucceeded):
+ del self.running_tasks[event.pid]
+ self.running_pids.remove(event.pid)
+ self.needUpdate = True
+ elif isinstance(event, bb.build.TaskFailedSilent):
+ del self.running_tasks[event.pid]
+ self.running_pids.remove(event.pid)
+ # Don't add to the failed tasks list since this is e.g. a setscene task failure
+ self.needUpdate = True
+ elif isinstance(event, bb.build.TaskFailed):
+ del self.running_tasks[event.pid]
+ self.running_pids.remove(event.pid)
+ self.failed_tasks.append( { 'title' : "%s %s" % (event._package, event._task)})
+ self.needUpdate = True
+ elif isinstance(event, bb.runqueue.runQueueTaskStarted) or isinstance(event, bb.runqueue.sceneQueueTaskStarted):
+ self.tasknumber_current = event.stats.completed + event.stats.active + event.stats.failed + 1
+ self.tasknumber_total = event.stats.total
+ self.needUpdate = True
+ elif isinstance(event, bb.build.TaskProgress):
+ if event.pid > 0:
+ self.running_tasks[event.pid]['progress'] = event.progress
+ self.running_tasks[event.pid]['rate'] = event.rate
+ self.needUpdate = True
+ else:
+ return False
+ return True
+
+ def getTasks(self):
+ self.needUpdate = False
+ return (self.running_tasks, self.failed_tasks)
diff --git a/poky/bitbake/lib/bb/utils.py b/poky/bitbake/lib/bb/utils.py
new file mode 100644
index 000000000..378e699e0
--- /dev/null
+++ b/poky/bitbake/lib/bb/utils.py
@@ -0,0 +1,1539 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake Utility Functions
+"""
+
+# Copyright (C) 2004 Michael Lauer
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re, fcntl, os, string, stat, shutil, time
+import sys
+import errno
+import logging
+import bb
+import bb.msg
+import multiprocessing
+import fcntl
+import imp
+import itertools
+import subprocess
+import glob
+import fnmatch
+import traceback
+import errno
+import signal
+import ast
+import collections
+import copy
+from subprocess import getstatusoutput
+from contextlib import contextmanager
+from ctypes import cdll
+
+logger = logging.getLogger("BitBake.Util")
+python_extensions = [e for e, _, _ in imp.get_suffixes()]
+
+
+def clean_context():
+ return {
+ "os": os,
+ "bb": bb,
+ "time": time,
+ }
+
+def get_context():
+ return _context
+
+
+def set_context(ctx):
+ _context = ctx
+
+# Context used in better_exec, eval
+_context = clean_context()
+
+class VersionStringException(Exception):
+ """Exception raised when an invalid version specification is found"""
+
+def explode_version(s):
+ r = []
+ alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$')
+ numeric_regexp = re.compile('^(\d+)(.*)$')
+ while (s != ''):
+ if s[0] in string.digits:
+ m = numeric_regexp.match(s)
+ r.append((0, int(m.group(1))))
+ s = m.group(2)
+ continue
+ if s[0] in string.ascii_letters:
+ m = alpha_regexp.match(s)
+ r.append((1, m.group(1)))
+ s = m.group(2)
+ continue
+ if s[0] == '~':
+ r.append((-1, s[0]))
+ else:
+ r.append((2, s[0]))
+ s = s[1:]
+ return r
+
+def split_version(s):
+ """Split a version string into its constituent parts (PE, PV, PR)"""
+ s = s.strip(" <>=")
+ e = 0
+ if s.count(':'):
+ e = int(s.split(":")[0])
+ s = s.split(":")[1]
+ r = ""
+ if s.count('-'):
+ r = s.rsplit("-", 1)[1]
+ s = s.rsplit("-", 1)[0]
+ v = s
+ return (e, v, r)
+
+def vercmp_part(a, b):
+ va = explode_version(a)
+ vb = explode_version(b)
+ while True:
+ if va == []:
+ (oa, ca) = (0, None)
+ else:
+ (oa, ca) = va.pop(0)
+ if vb == []:
+ (ob, cb) = (0, None)
+ else:
+ (ob, cb) = vb.pop(0)
+ if (oa, ca) == (0, None) and (ob, cb) == (0, None):
+ return 0
+ if oa < ob:
+ return -1
+ elif oa > ob:
+ return 1
+ elif ca < cb:
+ return -1
+ elif ca > cb:
+ return 1
+
+def vercmp(ta, tb):
+ (ea, va, ra) = ta
+ (eb, vb, rb) = tb
+
+ r = int(ea or 0) - int(eb or 0)
+ if (r == 0):
+ r = vercmp_part(va, vb)
+ if (r == 0):
+ r = vercmp_part(ra, rb)
+ return r
+
+def vercmp_string(a, b):
+ ta = split_version(a)
+ tb = split_version(b)
+ return vercmp(ta, tb)
+
+def vercmp_string_op(a, b, op):
+ """
+ Compare two versions and check if the specified comparison operator matches the result of the comparison.
+ This function is fairly liberal about what operators it will accept since there are a variety of styles
+ depending on the context.
+ """
+ res = vercmp_string(a, b)
+ if op in ('=', '=='):
+ return res == 0
+ elif op == '<=':
+ return res <= 0
+ elif op == '>=':
+ return res >= 0
+ elif op in ('>', '>>'):
+ return res > 0
+ elif op in ('<', '<<'):
+ return res < 0
+ elif op == '!=':
+ return res != 0
+ else:
+ raise VersionStringException('Unsupported comparison operator "%s"' % op)
+
+def explode_deps(s):
+ """
+ Take an RDEPENDS style string of format:
+ "DEPEND1 (optional version) DEPEND2 (optional version) ..."
+ and return a list of dependencies.
+ Version information is ignored.
+ """
+ r = []
+ l = s.split()
+ flag = False
+ for i in l:
+ if i[0] == '(':
+ flag = True
+ #j = []
+ if not flag:
+ r.append(i)
+ #else:
+ # j.append(i)
+ if flag and i.endswith(')'):
+ flag = False
+ # Ignore version
+ #r[-1] += ' ' + ' '.join(j)
+ return r
+
+def explode_dep_versions2(s, *, sort=True):
+ """
+ Take an RDEPENDS style string of format:
+ "DEPEND1 (optional version) DEPEND2 (optional version) ..."
+ and return a dictionary of dependencies and versions.
+ """
+ r = collections.OrderedDict()
+ l = s.replace(",", "").split()
+ lastdep = None
+ lastcmp = ""
+ lastver = ""
+ incmp = False
+ inversion = False
+ for i in l:
+ if i[0] == '(':
+ incmp = True
+ i = i[1:].strip()
+ if not i:
+ continue
+
+ if incmp:
+ incmp = False
+ inversion = True
+ # This list is based on behavior and supported comparisons from deb, opkg and rpm.
+ #
+ # Even though =<, <<, ==, !=, =>, and >> may not be supported,
+ # we list each possibly valid item.
+ # The build system is responsible for validation of what it supports.
+ if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
+ lastcmp = i[0:2]
+ i = i[2:]
+ elif i.startswith(('<', '>', '=')):
+ lastcmp = i[0:1]
+ i = i[1:]
+ else:
+ # This is an unsupported case!
+ raise VersionStringException('Invalid version specification in "(%s" - invalid or missing operator' % i)
+ lastcmp = (i or "")
+ i = ""
+ i.strip()
+ if not i:
+ continue
+
+ if inversion:
+ if i.endswith(')'):
+ i = i[:-1] or ""
+ inversion = False
+ if lastver and i:
+ lastver += " "
+ if i:
+ lastver += i
+ if lastdep not in r:
+ r[lastdep] = []
+ r[lastdep].append(lastcmp + " " + lastver)
+ continue
+
+ #if not inversion:
+ lastdep = i
+ lastver = ""
+ lastcmp = ""
+ if not (i in r and r[i]):
+ r[lastdep] = []
+
+ if sort:
+ r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0]))
+ return r
+
+def explode_dep_versions(s):
+ r = explode_dep_versions2(s)
+ for d in r:
+ if not r[d]:
+ r[d] = None
+ continue
+ if len(r[d]) > 1:
+ bb.warn("explode_dep_versions(): Item %s appeared in dependency string '%s' multiple times with different values. explode_dep_versions cannot cope with this." % (d, s))
+ r[d] = r[d][0]
+ return r
+
+def join_deps(deps, commasep=True):
+ """
+ Take the result from explode_dep_versions and generate a dependency string
+ """
+ result = []
+ for dep in deps:
+ if deps[dep]:
+ if isinstance(deps[dep], list):
+ for v in deps[dep]:
+ result.append(dep + " (" + v + ")")
+ else:
+ result.append(dep + " (" + deps[dep] + ")")
+ else:
+ result.append(dep)
+ if commasep:
+ return ", ".join(result)
+ else:
+ return " ".join(result)
+
+def _print_trace(body, line):
+ """
+ Print the Environment of a Text Body
+ """
+ error = []
+ # print the environment of the method
+ min_line = max(1, line-4)
+ max_line = min(line + 4, len(body))
+ for i in range(min_line, max_line + 1):
+ if line == i:
+ error.append(' *** %.4d:%s' % (i, body[i-1].rstrip()))
+ else:
+ error.append(' %.4d:%s' % (i, body[i-1].rstrip()))
+ return error
+
+def better_compile(text, file, realfile, mode = "exec", lineno = 0):
+ """
+ A better compile method. This method
+ will print the offending lines.
+ """
+ try:
+ cache = bb.methodpool.compile_cache(text)
+ if cache:
+ return cache
+ # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though
+ text2 = "\n" * int(lineno) + text
+ code = compile(text2, realfile, mode)
+ bb.methodpool.compile_cache_add(text, code)
+ return code
+ except Exception as e:
+ error = []
+ # split the text into lines again
+ body = text.split('\n')
+ error.append("Error in compiling python function in %s, line %s:\n" % (realfile, lineno))
+ if hasattr(e, "lineno"):
+ error.append("The code lines resulting in this error were:")
+ error.extend(_print_trace(body, e.lineno))
+ else:
+ error.append("The function causing this error was:")
+ for line in body:
+ error.append(line)
+ error.append("%s: %s" % (e.__class__.__name__, str(e)))
+
+ logger.error("\n".join(error))
+
+ e = bb.BBHandledException(e)
+ raise e
+
+def _print_exception(t, value, tb, realfile, text, context):
+ error = []
+ try:
+ exception = traceback.format_exception_only(t, value)
+ error.append('Error executing a python function in %s:\n' % realfile)
+
+ # Strip 'us' from the stack (better_exec call) unless that was where the
+ # error came from
+ if tb.tb_next is not None:
+ tb = tb.tb_next
+
+ textarray = text.split('\n')
+
+ linefailed = tb.tb_lineno
+
+ tbextract = traceback.extract_tb(tb)
+ tbformat = traceback.format_list(tbextract)
+ error.append("The stack trace of python calls that resulted in this exception/failure was:")
+ error.append("File: '%s', lineno: %s, function: %s" % (tbextract[0][0], tbextract[0][1], tbextract[0][2]))
+ error.extend(_print_trace(textarray, linefailed))
+
+ # See if this is a function we constructed and has calls back into other functions in
+ # "text". If so, try and improve the context of the error by diving down the trace
+ level = 0
+ nexttb = tb.tb_next
+ while nexttb is not None and (level+1) < len(tbextract):
+ error.append("File: '%s', lineno: %s, function: %s" % (tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2]))
+ if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]:
+ # The code was possibly in the string we compiled ourselves
+ error.extend(_print_trace(textarray, tbextract[level+1][1]))
+ elif tbextract[level+1][0].startswith("/"):
+ # The code looks like it might be in a file, try and load it
+ try:
+ with open(tbextract[level+1][0], "r") as f:
+ text = f.readlines()
+ error.extend(_print_trace(text, tbextract[level+1][1]))
+ except:
+ error.append(tbformat[level+1])
+ else:
+ error.append(tbformat[level+1])
+ nexttb = tb.tb_next
+ level = level + 1
+
+ error.append("Exception: %s" % ''.join(exception))
+
+ # If the exception is from spwaning a task, let's be helpful and display
+ # the output (which hopefully includes stderr).
+ if isinstance(value, subprocess.CalledProcessError) and value.output:
+ error.append("Subprocess output:")
+ error.append(value.output.decode("utf-8", errors="ignore"))
+ finally:
+ logger.error("\n".join(error))
+
+def better_exec(code, context, text = None, realfile = "<code>", pythonexception=False):
+ """
+ Similiar to better_compile, better_exec will
+ print the lines that are responsible for the
+ error.
+ """
+ import bb.parse
+ if not text:
+ text = code
+ if not hasattr(code, "co_filename"):
+ code = better_compile(code, realfile, realfile)
+ try:
+ exec(code, get_context(), context)
+ except (bb.BBHandledException, bb.parse.SkipRecipe, bb.build.FuncFailed, bb.data_smart.ExpansionError):
+ # Error already shown so passthrough, no need for traceback
+ raise
+ except Exception as e:
+ if pythonexception:
+ raise
+ (t, value, tb) = sys.exc_info()
+ try:
+ _print_exception(t, value, tb, realfile, text, context)
+ except Exception as e:
+ logger.error("Exception handler error: %s" % str(e))
+
+ e = bb.BBHandledException(e)
+ raise e
+
+def simple_exec(code, context):
+ exec(code, get_context(), context)
+
+def better_eval(source, locals, extraglobals = None):
+ ctx = get_context()
+ if extraglobals:
+ ctx = copy.copy(ctx)
+ for g in extraglobals:
+ ctx[g] = extraglobals[g]
+ return eval(source, ctx, locals)
+
+@contextmanager
+def fileslocked(files):
+ """Context manager for locking and unlocking file locks."""
+ locks = []
+ if files:
+ for lockfile in files:
+ locks.append(bb.utils.lockfile(lockfile))
+
+ yield
+
+ for lock in locks:
+ bb.utils.unlockfile(lock)
+
+@contextmanager
+def timeout(seconds):
+ def timeout_handler(signum, frame):
+ pass
+
+ original_handler = signal.signal(signal.SIGALRM, timeout_handler)
+
+ try:
+ signal.alarm(seconds)
+ yield
+ finally:
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, original_handler)
+
+def lockfile(name, shared=False, retry=True, block=False):
+ """
+ Use the specified file as a lock file, return when the lock has
+ been acquired. Returns a variable to pass to unlockfile().
+ Parameters:
+ retry: True to re-try locking if it fails, False otherwise
+ block: True to block until the lock succeeds, False otherwise
+ The retry and block parameters are kind of equivalent unless you
+ consider the possibility of sending a signal to the process to break
+ out - at which point you want block=True rather than retry=True.
+ """
+ dirname = os.path.dirname(name)
+ mkdirhier(dirname)
+
+ if not os.access(dirname, os.W_OK):
+ logger.error("Unable to acquire lock '%s', directory is not writable",
+ name)
+ sys.exit(1)
+
+ op = fcntl.LOCK_EX
+ if shared:
+ op = fcntl.LOCK_SH
+ if not retry and not block:
+ op = op | fcntl.LOCK_NB
+
+ while True:
+ # If we leave the lockfiles lying around there is no problem
+ # but we should clean up after ourselves. This gives potential
+ # for races though. To work around this, when we acquire the lock
+ # we check the file we locked was still the lock file on disk.
+ # by comparing inode numbers. If they don't match or the lockfile
+ # no longer exists, we start again.
+
+ # This implementation is unfair since the last person to request the
+ # lock is the most likely to win it.
+
+ try:
+ lf = open(name, 'a+')
+ fileno = lf.fileno()
+ fcntl.flock(fileno, op)
+ statinfo = os.fstat(fileno)
+ if os.path.exists(lf.name):
+ statinfo2 = os.stat(lf.name)
+ if statinfo.st_ino == statinfo2.st_ino:
+ return lf
+ lf.close()
+ except Exception:
+ try:
+ lf.close()
+ except Exception:
+ pass
+ pass
+ if not retry:
+ return None
+
+def unlockfile(lf):
+ """
+ Unlock a file locked using lockfile()
+ """
+ try:
+ # If we had a shared lock, we need to promote to exclusive before
+ # removing the lockfile. Attempt this, ignore failures.
+ fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
+ os.unlink(lf.name)
+ except (IOError, OSError):
+ pass
+ fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
+ lf.close()
+
+def md5_file(filename):
+ """
+ Return the hex string representation of the MD5 checksum of filename.
+ """
+ import hashlib
+ m = hashlib.md5()
+
+ with open(filename, "rb") as f:
+ for line in f:
+ m.update(line)
+ return m.hexdigest()
+
+def sha256_file(filename):
+ """
+ Return the hex string representation of the 256-bit SHA checksum of
+ filename.
+ """
+ import hashlib
+
+ s = hashlib.sha256()
+ with open(filename, "rb") as f:
+ for line in f:
+ s.update(line)
+ return s.hexdigest()
+
+def sha1_file(filename):
+ """
+ Return the hex string representation of the SHA1 checksum of the filename
+ """
+ import hashlib
+
+ s = hashlib.sha1()
+ with open(filename, "rb") as f:
+ for line in f:
+ s.update(line)
+ return s.hexdigest()
+
+def preserved_envvars_exported():
+ """Variables which are taken from the environment and placed in and exported
+ from the metadata"""
+ return [
+ 'BB_TASKHASH',
+ 'HOME',
+ 'LOGNAME',
+ 'PATH',
+ 'PWD',
+ 'SHELL',
+ 'TERM',
+ 'USER',
+ 'LC_ALL',
+ 'BBSERVER',
+ ]
+
+def preserved_envvars():
+ """Variables which are taken from the environment and placed in the metadata"""
+ v = [
+ 'BBPATH',
+ 'BB_PRESERVE_ENV',
+ 'BB_ENV_WHITELIST',
+ 'BB_ENV_EXTRAWHITE',
+ ]
+ return v + preserved_envvars_exported()
+
+def filter_environment(good_vars):
+ """
+ Create a pristine environment for bitbake. This will remove variables that
+ are not known and may influence the build in a negative way.
+ """
+
+ removed_vars = {}
+ for key in list(os.environ):
+ if key in good_vars:
+ continue
+
+ removed_vars[key] = os.environ[key]
+ del os.environ[key]
+
+ # If we spawn a python process, we need to have a UTF-8 locale, else python's file
+ # access methods will use ascii. You can't change that mode once the interpreter is
+ # started so we have to ensure a locale is set. Ideally we'd use C.UTF-8 but not all
+ # distros support that and we need to set something.
+ os.environ["LC_ALL"] = "en_US.UTF-8"
+
+ if removed_vars:
+ logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars.keys()))
+
+ return removed_vars
+
+def approved_variables():
+ """
+ Determine and return the list of whitelisted variables which are approved
+ to remain in the environment.
+ """
+ if 'BB_PRESERVE_ENV' in os.environ:
+ return os.environ.keys()
+ approved = []
+ if 'BB_ENV_WHITELIST' in os.environ:
+ approved = os.environ['BB_ENV_WHITELIST'].split()
+ approved.extend(['BB_ENV_WHITELIST'])
+ else:
+ approved = preserved_envvars()
+ if 'BB_ENV_EXTRAWHITE' in os.environ:
+ approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
+ if 'BB_ENV_EXTRAWHITE' not in approved:
+ approved.extend(['BB_ENV_EXTRAWHITE'])
+ return approved
+
+def clean_environment():
+ """
+ Clean up any spurious environment variables. This will remove any
+ variables the user hasn't chosen to preserve.
+ """
+ if 'BB_PRESERVE_ENV' not in os.environ:
+ good_vars = approved_variables()
+ return filter_environment(good_vars)
+
+ return {}
+
+def empty_environment():
+ """
+ Remove all variables from the environment.
+ """
+ for s in list(os.environ.keys()):
+ os.unsetenv(s)
+ del os.environ[s]
+
+def build_environment(d):
+ """
+ Build an environment from all exported variables.
+ """
+ import bb.data
+ for var in bb.data.keys(d):
+ export = d.getVarFlag(var, "export", False)
+ if export:
+ os.environ[var] = d.getVar(var) or ""
+
+def _check_unsafe_delete_path(path):
+ """
+ Basic safeguard against recursively deleting something we shouldn't. If it returns True,
+ the caller should raise an exception with an appropriate message.
+ NOTE: This is NOT meant to be a security mechanism - just a guard against silly mistakes
+ with potentially disastrous results.
+ """
+ extra = ''
+ # HOME might not be /home/something, so in case we can get it, check against it
+ homedir = os.environ.get('HOME', '')
+ if homedir:
+ extra = '|%s' % homedir
+ if re.match('(/|//|/home|/home/[^/]*%s)$' % extra, os.path.abspath(path)):
+ return True
+ return False
+
+def remove(path, recurse=False):
+ """Equivalent to rm -f or rm -rf"""
+ if not path:
+ return
+ if recurse:
+ for name in glob.glob(path):
+ if _check_unsafe_delete_path(path):
+ raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
+ # shutil.rmtree(name) would be ideal but its too slow
+ subprocess.check_call(['rm', '-rf'] + glob.glob(path))
+ return
+ for name in glob.glob(path):
+ try:
+ os.unlink(name)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+def prunedir(topdir):
+ # Delete everything reachable from the directory named in 'topdir'.
+ # CAUTION: This is dangerous!
+ if _check_unsafe_delete_path(topdir):
+ raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir)
+ for root, dirs, files in os.walk(topdir, topdown = False):
+ for name in files:
+ os.remove(os.path.join(root, name))
+ for name in dirs:
+ if os.path.islink(os.path.join(root, name)):
+ os.remove(os.path.join(root, name))
+ else:
+ os.rmdir(os.path.join(root, name))
+ os.rmdir(topdir)
+
+#
+# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
+# but thats possibly insane and suffixes is probably going to be small
+#
+def prune_suffix(var, suffixes, d):
+ # See if var ends with any of the suffixes listed and
+ # remove it if found
+ for suffix in suffixes:
+ if var.endswith(suffix):
+ return var.replace(suffix, "")
+ return var
+
+def mkdirhier(directory):
+ """Create a directory like 'mkdir -p', but does not complain if
+ directory already exists like os.makedirs
+ """
+
+ try:
+ os.makedirs(directory)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+def movefile(src, dest, newmtime = None, sstat = None):
+ """Moves a file from src to dest, preserving all permissions and
+ attributes; mtime will be preserved even when moving across
+ filesystems. Returns true on success and false on failure. Move is
+ atomic.
+ """
+
+ #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
+ try:
+ if not sstat:
+ sstat = os.lstat(src)
+ except Exception as e:
+ print("movefile: Stating source file failed...", e)
+ return None
+
+ destexists = 1
+ try:
+ dstat = os.lstat(dest)
+ except:
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists = 0
+ except Exception as e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target = os.readlink(src)
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ os.symlink(target, dest)
+ #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.unlink(src)
+ return os.lstat(dest)
+ except Exception as e:
+ print("movefile: failed to properly create symlink:", dest, "->", target, e)
+ return None
+
+ renamefailed = 1
+ # os.rename needs to know the dest path ending with file name
+ # so append the file name to a path only if it's a dir specified
+ srcfname = os.path.basename(src)
+ destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \
+ else dest
+
+ if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
+ try:
+ os.rename(src, destpath)
+ renamefailed = 0
+ except Exception as e:
+ if e[0] != errno.EXDEV:
+ # Some random error.
+ print("movefile: Failed to move", src, "to", dest, e)
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+
+ if renamefailed:
+ didcopy = 0
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ shutil.copyfile(src, destpath + "#new")
+ os.rename(destpath + "#new", destpath)
+ didcopy = 1
+ except Exception as e:
+ print('movefile: copy', src, '->', dest, 'failed.', e)
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
+ if a[0] != 0:
+ print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
+ return None # failure
+ try:
+ if didcopy:
+ os.lchown(destpath, sstat[stat.ST_UID], sstat[stat.ST_GID])
+ os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ os.unlink(src)
+ except Exception as e:
+ print("movefile: Failed to chown/chmod/unlink", dest, e)
+ return None
+
+ if newmtime:
+ os.utime(destpath, (newmtime, newmtime))
+ else:
+ os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime = sstat[stat.ST_MTIME]
+ return newmtime
+
+def copyfile(src, dest, newmtime = None, sstat = None):
+ """
+ Copies a file from src to dest, preserving all permissions and
+ attributes; mtime will be preserved even when moving across
+ filesystems. Returns true on success and false on failure.
+ """
+ #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
+ try:
+ if not sstat:
+ sstat = os.lstat(src)
+ except Exception as e:
+ logger.warning("copyfile: stat of %s failed (%s)" % (src, e))
+ return False
+
+ destexists = 1
+ try:
+ dstat = os.lstat(dest)
+ except:
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists = 0
+ except Exception as e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target = os.readlink(src)
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ os.symlink(target, dest)
+ #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ return os.lstat(dest)
+ except Exception as e:
+ logger.warning("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e))
+ return False
+
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try:
+ srcchown = False
+ if not os.access(src, os.R_OK):
+ # Make sure we can read it
+ srcchown = True
+ os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR)
+
+ # For safety copy then move it over.
+ shutil.copyfile(src, dest + "#new")
+ os.rename(dest + "#new", dest)
+ except Exception as e:
+ logger.warning("copyfile: copy %s to %s failed (%s)" % (src, dest, e))
+ return False
+ finally:
+ if srcchown:
+ os.chmod(src, sstat[stat.ST_MODE])
+ os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
+ if a[0] != 0:
+ logger.warning("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a))
+ return False # failure
+ try:
+ os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ except Exception as e:
+ logger.warning("copyfile: failed to chown/chmod %s (%s)" % (dest, e))
+ return False
+
+ if newmtime:
+ os.utime(dest, (newmtime, newmtime))
+ else:
+ os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime = sstat[stat.ST_MTIME]
+ return newmtime
+
+def which(path, item, direction = 0, history = False, executable=False):
+ """
+ Locate `item` in the list of paths `path` (colon separated string like $PATH).
+ If `direction` is non-zero then the list is reversed.
+ If `history` is True then the list of candidates also returned as result,history.
+ If `executable` is True then the candidate has to be an executable file,
+ otherwise the candidate simply has to exist.
+ """
+
+ if executable:
+ is_candidate = lambda p: os.path.isfile(p) and os.access(p, os.X_OK)
+ else:
+ is_candidate = lambda p: os.path.exists(p)
+
+ hist = []
+ paths = (path or "").split(':')
+ if direction != 0:
+ paths.reverse()
+
+ for p in paths:
+ next = os.path.join(p, item)
+ hist.append(next)
+ if is_candidate(next):
+ if not os.path.isabs(next):
+ next = os.path.abspath(next)
+ if history:
+ return next, hist
+ return next
+
+ if history:
+ return "", hist
+ return ""
+
+def to_boolean(string, default=None):
+ if not string:
+ return default
+
+ normalized = string.lower()
+ if normalized in ("y", "yes", "1", "true"):
+ return True
+ elif normalized in ("n", "no", "0", "false"):
+ return False
+ else:
+ raise ValueError("Invalid value for to_boolean: %s" % string)
+
+def contains(variable, checkvalues, truevalue, falsevalue, d):
+ """Check if a variable contains all the values specified.
+
+ Arguments:
+
+ variable -- the variable name. This will be fetched and expanded (using
+ d.getVar(variable)) and then split into a set().
+
+ checkvalues -- if this is a string it is split on whitespace into a set(),
+ otherwise coerced directly into a set().
+
+ truevalue -- the value to return if checkvalues is a subset of variable.
+
+ falsevalue -- the value to return if variable is empty or if checkvalues is
+ not a subset of variable.
+
+ d -- the data store.
+ """
+
+ val = d.getVar(variable)
+ if not val:
+ return falsevalue
+ val = set(val.split())
+ if isinstance(checkvalues, str):
+ checkvalues = set(checkvalues.split())
+ else:
+ checkvalues = set(checkvalues)
+ if checkvalues.issubset(val):
+ return truevalue
+ return falsevalue
+
+def contains_any(variable, checkvalues, truevalue, falsevalue, d):
+ val = d.getVar(variable)
+ if not val:
+ return falsevalue
+ val = set(val.split())
+ if isinstance(checkvalues, str):
+ checkvalues = set(checkvalues.split())
+ else:
+ checkvalues = set(checkvalues)
+ if checkvalues & val:
+ return truevalue
+ return falsevalue
+
+def filter(variable, checkvalues, d):
+ """Return all words in the variable that are present in the checkvalues.
+
+ Arguments:
+
+ variable -- the variable name. This will be fetched and expanded (using
+ d.getVar(variable)) and then split into a set().
+
+ checkvalues -- if this is a string it is split on whitespace into a set(),
+ otherwise coerced directly into a set().
+
+ d -- the data store.
+ """
+
+ val = d.getVar(variable)
+ if not val:
+ return ''
+ val = set(val.split())
+ if isinstance(checkvalues, str):
+ checkvalues = set(checkvalues.split())
+ else:
+ checkvalues = set(checkvalues)
+ return ' '.join(sorted(checkvalues & val))
+
+def cpu_count():
+ return multiprocessing.cpu_count()
+
+def nonblockingfd(fd):
+ fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+def process_profilelog(fn, pout = None):
+ # Either call with a list of filenames and set pout or a filename and optionally pout.
+ if not pout:
+ pout = fn + '.processed'
+ pout = open(pout, 'w')
+
+ import pstats
+ if isinstance(fn, list):
+ p = pstats.Stats(*fn, stream=pout)
+ else:
+ p = pstats.Stats(fn, stream=pout)
+ p.sort_stats('time')
+ p.print_stats()
+ p.print_callers()
+ p.sort_stats('cumulative')
+ p.print_stats()
+
+ pout.flush()
+ pout.close()
+
+#
+# Was present to work around multiprocessing pool bugs in python < 2.7.3
+#
+def multiprocessingpool(*args, **kwargs):
+
+ import multiprocessing.pool
+ #import multiprocessing.util
+ #multiprocessing.util.log_to_stderr(10)
+ # Deal with a multiprocessing bug where signals to the processes would be delayed until the work
+ # completes. Putting in a timeout means the signals (like SIGINT/SIGTERM) get processed.
+ def wrapper(func):
+ def wrap(self, timeout=None):
+ return func(self, timeout=timeout if timeout is not None else 1e100)
+ return wrap
+ multiprocessing.pool.IMapIterator.next = wrapper(multiprocessing.pool.IMapIterator.next)
+
+ return multiprocessing.Pool(*args, **kwargs)
+
+def exec_flat_python_func(func, *args, **kwargs):
+ """Execute a flat python function (defined with def funcname(args):...)"""
+ # Prepare a small piece of python code which calls the requested function
+ # To do this we need to prepare two things - a set of variables we can use to pass
+ # the values of arguments into the calling function, and the list of arguments for
+ # the function being called
+ context = {}
+ funcargs = []
+ # Handle unnamed arguments
+ aidx = 1
+ for arg in args:
+ argname = 'arg_%s' % aidx
+ context[argname] = arg
+ funcargs.append(argname)
+ aidx += 1
+ # Handle keyword arguments
+ context.update(kwargs)
+ funcargs.extend(['%s=%s' % (arg, arg) for arg in kwargs.keys()])
+ code = 'retval = %s(%s)' % (func, ', '.join(funcargs))
+ comp = bb.utils.better_compile(code, '<string>', '<string>')
+ bb.utils.better_exec(comp, context, code, '<string>')
+ return context['retval']
+
+def edit_metadata(meta_lines, variables, varfunc, match_overrides=False):
+ """Edit lines from a recipe or config file and modify one or more
+ specified variable values set in the file using a specified callback
+ function. Lines are expected to have trailing newlines.
+ Parameters:
+ meta_lines: lines from the file; can be a list or an iterable
+ (e.g. file pointer)
+ variables: a list of variable names to look for. Functions
+ may also be specified, but must be specified with '()' at
+ the end of the name. Note that the function doesn't have
+ any intrinsic understanding of _append, _prepend, _remove,
+ or overrides, so these are considered as part of the name.
+ These values go into a regular expression, so regular
+ expression syntax is allowed.
+ varfunc: callback function called for every variable matching
+ one of the entries in the variables parameter. The function
+ should take four arguments:
+ varname: name of variable matched
+ origvalue: current value in file
+ op: the operator (e.g. '+=')
+ newlines: list of lines up to this point. You can use
+ this to prepend lines before this variable setting
+ if you wish.
+ and should return a four-element tuple:
+ newvalue: new value to substitute in, or None to drop
+ the variable setting entirely. (If the removal
+ results in two consecutive blank lines, one of the
+ blank lines will also be dropped).
+ newop: the operator to use - if you specify None here,
+ the original operation will be used.
+ indent: number of spaces to indent multi-line entries,
+ or -1 to indent up to the level of the assignment
+ and opening quote, or a string to use as the indent.
+ minbreak: True to allow the first element of a
+ multi-line value to continue on the same line as
+ the assignment, False to indent before the first
+ element.
+ To clarify, if you wish not to change the value, then you
+ would return like this: return origvalue, None, 0, True
+ match_overrides: True to match items with _overrides on the end,
+ False otherwise
+ Returns a tuple:
+ updated:
+ True if changes were made, False otherwise.
+ newlines:
+ Lines after processing
+ """
+
+ var_res = {}
+ if match_overrides:
+ override_re = '(_[a-zA-Z0-9-_$(){}]+)?'
+ else:
+ override_re = ''
+ for var in variables:
+ if var.endswith('()'):
+ var_res[var] = re.compile('^(%s%s)[ \\t]*\([ \\t]*\)[ \\t]*{' % (var[:-2].rstrip(), override_re))
+ else:
+ var_res[var] = re.compile('^(%s%s)[ \\t]*[?+:.]*=[+.]*[ \\t]*(["\'])' % (var, override_re))
+
+ updated = False
+ varset_start = ''
+ varlines = []
+ newlines = []
+ in_var = None
+ full_value = ''
+ var_end = ''
+
+ def handle_var_end():
+ prerun_newlines = newlines[:]
+ op = varset_start[len(in_var):].strip()
+ (newvalue, newop, indent, minbreak) = varfunc(in_var, full_value, op, newlines)
+ changed = (prerun_newlines != newlines)
+
+ if newvalue is None:
+ # Drop the value
+ return True
+ elif newvalue != full_value or (newop not in [None, op]):
+ if newop not in [None, op]:
+ # Callback changed the operator
+ varset_new = "%s %s" % (in_var, newop)
+ else:
+ varset_new = varset_start
+
+ if isinstance(indent, int):
+ if indent == -1:
+ indentspc = ' ' * (len(varset_new) + 2)
+ else:
+ indentspc = ' ' * indent
+ else:
+ indentspc = indent
+ if in_var.endswith('()'):
+ # A function definition
+ if isinstance(newvalue, list):
+ newlines.append('%s {\n%s%s\n}\n' % (varset_new, indentspc, ('\n%s' % indentspc).join(newvalue)))
+ else:
+ if not newvalue.startswith('\n'):
+ newvalue = '\n' + newvalue
+ if not newvalue.endswith('\n'):
+ newvalue = newvalue + '\n'
+ newlines.append('%s {%s}\n' % (varset_new, newvalue))
+ else:
+ # Normal variable
+ if isinstance(newvalue, list):
+ if not newvalue:
+ # Empty list -> empty string
+ newlines.append('%s ""\n' % varset_new)
+ elif minbreak:
+ # First item on first line
+ if len(newvalue) == 1:
+ newlines.append('%s "%s"\n' % (varset_new, newvalue[0]))
+ else:
+ newlines.append('%s "%s \\\n' % (varset_new, newvalue[0]))
+ for item in newvalue[1:]:
+ newlines.append('%s%s \\\n' % (indentspc, item))
+ newlines.append('%s"\n' % indentspc)
+ else:
+ # No item on first line
+ newlines.append('%s " \\\n' % varset_new)
+ for item in newvalue:
+ newlines.append('%s%s \\\n' % (indentspc, item))
+ newlines.append('%s"\n' % indentspc)
+ else:
+ newlines.append('%s "%s"\n' % (varset_new, newvalue))
+ return True
+ else:
+ # Put the old lines back where they were
+ newlines.extend(varlines)
+ # If newlines was touched by the function, we'll need to return True
+ return changed
+
+ checkspc = False
+
+ for line in meta_lines:
+ if in_var:
+ value = line.rstrip()
+ varlines.append(line)
+ if in_var.endswith('()'):
+ full_value += '\n' + value
+ else:
+ full_value += value[:-1]
+ if value.endswith(var_end):
+ if in_var.endswith('()'):
+ if full_value.count('{') - full_value.count('}') >= 0:
+ continue
+ full_value = full_value[:-1]
+ if handle_var_end():
+ updated = True
+ checkspc = True
+ in_var = None
+ else:
+ skip = False
+ for (varname, var_re) in var_res.items():
+ res = var_re.match(line)
+ if res:
+ isfunc = varname.endswith('()')
+ if isfunc:
+ splitvalue = line.split('{', 1)
+ var_end = '}'
+ else:
+ var_end = res.groups()[-1]
+ splitvalue = line.split(var_end, 1)
+ varset_start = splitvalue[0].rstrip()
+ value = splitvalue[1].rstrip()
+ if not isfunc and value.endswith('\\'):
+ value = value[:-1]
+ full_value = value
+ varlines = [line]
+ in_var = res.group(1)
+ if isfunc:
+ in_var += '()'
+ if value.endswith(var_end):
+ full_value = full_value[:-1]
+ if handle_var_end():
+ updated = True
+ checkspc = True
+ in_var = None
+ skip = True
+ break
+ if not skip:
+ if checkspc:
+ checkspc = False
+ if newlines and newlines[-1] == '\n' and line == '\n':
+ # Squash blank line if there are two consecutive blanks after a removal
+ continue
+ newlines.append(line)
+ return (updated, newlines)
+
+
+def edit_metadata_file(meta_file, variables, varfunc):
+ """Edit a recipe or config file and modify one or more specified
+ variable values set in the file using a specified callback function.
+ The file is only written to if the value(s) actually change.
+ This is basically the file version of edit_metadata(), see that
+ function's description for parameter/usage information.
+ Returns True if the file was written to, False otherwise.
+ """
+ with open(meta_file, 'r') as f:
+ (updated, newlines) = edit_metadata(f, variables, varfunc)
+ if updated:
+ with open(meta_file, 'w') as f:
+ f.writelines(newlines)
+ return updated
+
+
+def edit_bblayers_conf(bblayers_conf, add, remove):
+ """Edit bblayers.conf, adding and/or removing layers
+ Parameters:
+ bblayers_conf: path to bblayers.conf file to edit
+ add: layer path (or list of layer paths) to add; None or empty
+ list to add nothing
+ remove: layer path (or list of layer paths) to remove; None or
+ empty list to remove nothing
+ Returns a tuple:
+ notadded: list of layers specified to be added but weren't
+ (because they were already in the list)
+ notremoved: list of layers that were specified to be removed
+ but weren't (because they weren't in the list)
+ """
+
+ import fnmatch
+
+ def remove_trailing_sep(pth):
+ if pth and pth[-1] == os.sep:
+ pth = pth[:-1]
+ return pth
+
+ approved = bb.utils.approved_variables()
+ def canonicalise_path(pth):
+ pth = remove_trailing_sep(pth)
+ if 'HOME' in approved and '~' in pth:
+ pth = os.path.expanduser(pth)
+ return pth
+
+ def layerlist_param(value):
+ if not value:
+ return []
+ elif isinstance(value, list):
+ return [remove_trailing_sep(x) for x in value]
+ else:
+ return [remove_trailing_sep(value)]
+
+ addlayers = layerlist_param(add)
+ removelayers = layerlist_param(remove)
+
+ # Need to use a list here because we can't set non-local variables from a callback in python 2.x
+ bblayercalls = []
+ removed = []
+ plusequals = False
+ orig_bblayers = []
+
+ def handle_bblayers_firstpass(varname, origvalue, op, newlines):
+ bblayercalls.append(op)
+ if op == '=':
+ del orig_bblayers[:]
+ orig_bblayers.extend([canonicalise_path(x) for x in origvalue.split()])
+ return (origvalue, None, 2, False)
+
+ def handle_bblayers(varname, origvalue, op, newlines):
+ updated = False
+ bblayers = [remove_trailing_sep(x) for x in origvalue.split()]
+ if removelayers:
+ for removelayer in removelayers:
+ for layer in bblayers:
+ if fnmatch.fnmatch(canonicalise_path(layer), canonicalise_path(removelayer)):
+ updated = True
+ bblayers.remove(layer)
+ removed.append(removelayer)
+ break
+ if addlayers and not plusequals:
+ for addlayer in addlayers:
+ if addlayer not in bblayers:
+ updated = True
+ bblayers.append(addlayer)
+ del addlayers[:]
+
+ if updated:
+ if op == '+=' and not bblayers:
+ bblayers = None
+ return (bblayers, None, 2, False)
+ else:
+ return (origvalue, None, 2, False)
+
+ with open(bblayers_conf, 'r') as f:
+ (_, newlines) = edit_metadata(f, ['BBLAYERS'], handle_bblayers_firstpass)
+
+ if not bblayercalls:
+ raise Exception('Unable to find BBLAYERS in %s' % bblayers_conf)
+
+ # Try to do the "smart" thing depending on how the user has laid out
+ # their bblayers.conf file
+ if bblayercalls.count('+=') > 1:
+ plusequals = True
+
+ removelayers_canon = [canonicalise_path(layer) for layer in removelayers]
+ notadded = []
+ for layer in addlayers:
+ layer_canon = canonicalise_path(layer)
+ if layer_canon in orig_bblayers and not layer_canon in removelayers_canon:
+ notadded.append(layer)
+ notadded_canon = [canonicalise_path(layer) for layer in notadded]
+ addlayers[:] = [layer for layer in addlayers if canonicalise_path(layer) not in notadded_canon]
+
+ (updated, newlines) = edit_metadata(newlines, ['BBLAYERS'], handle_bblayers)
+ if addlayers:
+ # Still need to add these
+ for addlayer in addlayers:
+ newlines.append('BBLAYERS += "%s"\n' % addlayer)
+ updated = True
+
+ if updated:
+ with open(bblayers_conf, 'w') as f:
+ f.writelines(newlines)
+
+ notremoved = list(set(removelayers) - set(removed))
+
+ return (notadded, notremoved)
+
+
+def get_file_layer(filename, d):
+ """Determine the collection (as defined by a layer's layer.conf file) containing the specified file"""
+ collections = (d.getVar('BBFILE_COLLECTIONS') or '').split()
+ collection_res = {}
+ for collection in collections:
+ collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or ''
+
+ def path_to_layer(path):
+ # Use longest path so we handle nested layers
+ matchlen = 0
+ match = None
+ for collection, regex in collection_res.items():
+ if len(regex) > matchlen and re.match(regex, path):
+ matchlen = len(regex)
+ match = collection
+ return match
+
+ result = None
+ bbfiles = (d.getVar('BBFILES') or '').split()
+ bbfilesmatch = False
+ for bbfilesentry in bbfiles:
+ if fnmatch.fnmatch(filename, bbfilesentry):
+ bbfilesmatch = True
+ result = path_to_layer(bbfilesentry)
+
+ if not bbfilesmatch:
+ # Probably a bbclass
+ result = path_to_layer(filename)
+
+ return result
+
+
+# Constant taken from http://linux.die.net/include/linux/prctl.h
+PR_SET_PDEATHSIG = 1
+
+class PrCtlError(Exception):
+ pass
+
+def signal_on_parent_exit(signame):
+ """
+ Trigger signame to be sent when the parent process dies
+ """
+ signum = getattr(signal, signame)
+ # http://linux.die.net/man/2/prctl
+ result = cdll['libc.so.6'].prctl(PR_SET_PDEATHSIG, signum)
+ if result != 0:
+ raise PrCtlError('prctl failed with error code %s' % result)
+
+#
+# Manually call the ioprio syscall. We could depend on other libs like psutil
+# however this gets us enough of what we need to bitbake for now without the
+# dependency
+#
+_unamearch = os.uname()[4]
+IOPRIO_WHO_PROCESS = 1
+IOPRIO_CLASS_SHIFT = 13
+
+def ioprio_set(who, cls, value):
+ NR_ioprio_set = None
+ if _unamearch == "x86_64":
+ NR_ioprio_set = 251
+ elif _unamearch[0] == "i" and _unamearch[2:3] == "86":
+ NR_ioprio_set = 289
+
+ if NR_ioprio_set:
+ ioprio = value | (cls << IOPRIO_CLASS_SHIFT)
+ rc = cdll['libc.so.6'].syscall(NR_ioprio_set, IOPRIO_WHO_PROCESS, who, ioprio)
+ if rc != 0:
+ raise ValueError("Unable to set ioprio, syscall returned %s" % rc)
+ else:
+ bb.warn("Unable to set IO Prio for arch %s" % _unamearch)
+
+def set_process_name(name):
+ from ctypes import cdll, byref, create_string_buffer
+ # This is nice to have for debugging, not essential
+ try:
+ libc = cdll.LoadLibrary('libc.so.6')
+ buf = create_string_buffer(bytes(name, 'utf-8'))
+ libc.prctl(15, byref(buf), 0, 0, 0)
+ except:
+ pass
+
+# export common proxies variables from datastore to environment
+def export_proxies(d):
+ import os
+
+ variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
+ 'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY',
+ 'GIT_PROXY_COMMAND']
+ exported = False
+
+ for v in variables:
+ if v in os.environ.keys():
+ exported = True
+ else:
+ v_proxy = d.getVar(v)
+ if v_proxy is not None:
+ os.environ[v] = v_proxy
+ exported = True
+
+ return exported
+
+
+def load_plugins(logger, plugins, pluginpath):
+ def load_plugin(name):
+ logger.debug(1, 'Loading plugin %s' % name)
+ fp, pathname, description = imp.find_module(name, [pluginpath])
+ try:
+ return imp.load_module(name, fp, pathname, description)
+ finally:
+ if fp:
+ fp.close()
+
+ logger.debug(1, 'Loading plugins from %s...' % pluginpath)
+
+ expanded = (glob.glob(os.path.join(pluginpath, '*' + ext))
+ for ext in python_extensions)
+ files = itertools.chain.from_iterable(expanded)
+ names = set(os.path.splitext(os.path.basename(fn))[0] for fn in files)
+ for name in names:
+ if name != '__init__':
+ plugin = load_plugin(name)
+ if hasattr(plugin, 'plugin_init'):
+ obj = plugin.plugin_init(plugins)
+ plugins.append(obj or plugin)
+ else:
+ plugins.append(plugin)
+
+
+class LogCatcher(logging.Handler):
+ """Logging handler for collecting logged messages so you can check them later"""
+ def __init__(self):
+ self.messages = []
+ logging.Handler.__init__(self, logging.WARNING)
+ def emit(self, record):
+ self.messages.append(bb.build.logformatter.format(record))
+ def contains(self, message):
+ return (message in self.messages)
OpenPOWER on IntegriCloud