summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/meta/lib/oe
diff options
context:
space:
mode:
authorPatrick Williams <patrick@stwcx.xyz>2016-08-17 14:31:25 -0500
committerPatrick Williams <patrick@stwcx.xyz>2016-08-22 16:43:26 +0000
commit60f9d69e016b11c468c98ea75ba0a60c44afbbc4 (patch)
treeecb49581a9e41a37943c22cd9ef3f63451b20ee7 /import-layers/yocto-poky/meta/lib/oe
parente18c61205e0234b03697129c20cc69c9b3940efc (diff)
downloadtalos-openbmc-60f9d69e016b11c468c98ea75ba0a60c44afbbc4.tar.gz
talos-openbmc-60f9d69e016b11c468c98ea75ba0a60c44afbbc4.zip
yocto-poky: Move to import-layers subdir
We are going to import additional layers, so create a subdir to hold all of the layers that we import with git-subtree. Change-Id: I6f732153a22be8ca663035c518837e3cc5ec0799 Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
Diffstat (limited to 'import-layers/yocto-poky/meta/lib/oe')
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/__init__.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py456
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/cachedpath.py233
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/classextend.py120
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/classutils.py43
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py217
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/data.py17
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/distro_check.py399
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/gpg_sign.py116
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/license.py217
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/lsb.py88
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/maketype.py99
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/manifest.py345
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/package.py157
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/package_manager.py2061
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/packagedata.py95
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/packagegroup.py36
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/patch.py704
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/path.py243
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/prservice.py126
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/qa.py151
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/recipeutils.py849
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/rootfs.py986
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/sdk.py367
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/sstatesig.py356
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/terminal.py275
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/tests/__init__.py0
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/tests/test_license.py68
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/tests/test_path.py89
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/tests/test_types.py62
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py51
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/types.py153
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/utils.py306
33 files changed, 9487 insertions, 0 deletions
diff --git a/import-layers/yocto-poky/meta/lib/oe/__init__.py b/import-layers/yocto-poky/meta/lib/oe/__init__.py
new file mode 100644
index 000000000..3ad9513f4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/__init__.py
@@ -0,0 +1,2 @@
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
new file mode 100644
index 000000000..5395c768a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
@@ -0,0 +1,456 @@
+# Report significant differences in the buildhistory repository since a specific revision
+#
+# Copyright (C) 2012 Intel Corporation
+# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Note: requires GitPython 0.3.1+
+#
+# You can use this from the command line by running scripts/buildhistory-diff
+#
+
+import sys
+import os.path
+import difflib
+import git
+import re
+import bb.utils
+
+
+# How to display fields
+list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+list_order_fields = ['PACKAGES']
+defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
+numeric_fields = ['PKGSIZE', 'IMAGESIZE']
+# Fields to monitor
+monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
+ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
+# Percentage change to alert for numeric fields
+monitor_numeric_threshold = 10
+# Image files to monitor (note that image-info.txt is handled separately)
+img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
+# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
+related_fields = {}
+related_fields['RDEPENDS'] = ['DEPENDS']
+related_fields['RRECOMMENDS'] = ['DEPENDS']
+related_fields['FILELIST'] = ['FILES']
+related_fields['PKGSIZE'] = ['FILELIST']
+related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
+related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+
+
+class ChangeRecord:
+ def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
+ self.path = path
+ self.fieldname = fieldname
+ self.oldvalue = oldvalue
+ self.newvalue = newvalue
+ self.monitored = monitored
+ self.related = []
+ self.filechanges = None
+
+ def __str__(self):
+ return self._str_internal(True)
+
+ def _str_internal(self, outer):
+ if outer:
+ if '/image-files/' in self.path:
+ prefix = '%s: ' % self.path.split('/image-files/')[0]
+ else:
+ prefix = '%s: ' % self.path
+ else:
+ prefix = ''
+
+ def pkglist_combine(depver):
+ pkglist = []
+ for k,v in depver.iteritems():
+ if v:
+ pkglist.append("%s (%s)" % (k,v))
+ else:
+ pkglist.append(k)
+ return pkglist
+
+ if self.fieldname in list_fields or self.fieldname in list_order_fields:
+ if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
+ aitems = pkglist_combine(depvera)
+ bitems = pkglist_combine(depverb)
+ else:
+ aitems = self.oldvalue.split()
+ bitems = self.newvalue.split()
+ removed = list(set(aitems) - set(bitems))
+ added = list(set(bitems) - set(aitems))
+
+ if removed or added:
+ if removed and not bitems:
+ out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
+ else:
+ out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
+ else:
+ out = '%s changed order' % self.fieldname
+ elif self.fieldname in numeric_fields:
+ aval = int(self.oldvalue or 0)
+ bval = int(self.newvalue or 0)
+ if aval != 0:
+ percentchg = ((bval - aval) / float(aval)) * 100
+ else:
+ percentchg = 100
+ out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+ elif self.fieldname in defaultval_map:
+ out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+ if self.fieldname == 'PKG' and '[default]' in self.newvalue:
+ out += ' - may indicate debian renaming failure'
+ elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
+ if self.oldvalue and self.newvalue:
+ out = '%s changed:\n ' % self.fieldname
+ elif self.newvalue:
+ out = '%s added:\n ' % self.fieldname
+ elif self.oldvalue:
+ out = '%s cleared:\n ' % self.fieldname
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
+ out += '\n '.join(list(diff)[2:])
+ out += '\n --'
+ elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
+ fieldname = self.fieldname
+ if '/image-files/' in self.path:
+ fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
+ out = 'Changes to %s:\n ' % fieldname
+ else:
+ if outer:
+ prefix = 'Changes to %s ' % self.path
+ out = '(%s):\n ' % self.fieldname
+ if self.filechanges:
+ out += '\n '.join(['%s' % i for i in self.filechanges])
+ else:
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
+ out += '\n '.join(list(diff))
+ out += '\n --'
+ else:
+ out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
+
+ if self.related:
+ for chg in self.related:
+ if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
+ continue
+ for line in chg._str_internal(False).splitlines():
+ out += '\n * %s' % line
+
+ return '%s%s' % (prefix, out)
+
+class FileChange:
+ changetype_add = 'A'
+ changetype_remove = 'R'
+ changetype_type = 'T'
+ changetype_perms = 'P'
+ changetype_ownergroup = 'O'
+ changetype_link = 'L'
+
+ def __init__(self, path, changetype, oldvalue = None, newvalue = None):
+ self.path = path
+ self.changetype = changetype
+ self.oldvalue = oldvalue
+ self.newvalue = newvalue
+
+ def _ftype_str(self, ftype):
+ if ftype == '-':
+ return 'file'
+ elif ftype == 'd':
+ return 'directory'
+ elif ftype == 'l':
+ return 'symlink'
+ elif ftype == 'c':
+ return 'char device'
+ elif ftype == 'b':
+ return 'block device'
+ elif ftype == 'p':
+ return 'fifo'
+ elif ftype == 's':
+ return 'socket'
+ else:
+ return 'unknown (%s)' % ftype
+
+ def __str__(self):
+ if self.changetype == self.changetype_add:
+ return '%s was added' % self.path
+ elif self.changetype == self.changetype_remove:
+ return '%s was removed' % self.path
+ elif self.changetype == self.changetype_type:
+ return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
+ elif self.changetype == self.changetype_perms:
+ return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_ownergroup:
+ return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_link:
+ return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ else:
+ return '%s changed (unknown)' % self.path
+
+
+def blob_to_dict(blob):
+ alines = blob.data_stream.read().splitlines()
+ adict = {}
+ for line in alines:
+ splitv = [i.strip() for i in line.split('=',1)]
+ if len(splitv) > 1:
+ adict[splitv[0]] = splitv[1]
+ return adict
+
+
+def file_list_to_dict(lines):
+ adict = {}
+ for line in lines:
+ # Leave the last few fields intact so we handle file names containing spaces
+ splitv = line.split(None,4)
+ # Grab the path and remove the leading .
+ path = splitv[4][1:].strip()
+ # Handle symlinks
+ if(' -> ' in path):
+ target = path.split(' -> ')[1]
+ path = path.split(' -> ')[0]
+ adict[path] = splitv[0:3] + [target]
+ else:
+ adict[path] = splitv[0:3]
+ return adict
+
+
+def compare_file_lists(alines, blines):
+ adict = file_list_to_dict(alines)
+ bdict = file_list_to_dict(blines)
+ filechanges = []
+ for path, splitv in adict.iteritems():
+ newsplitv = bdict.pop(path, None)
+ if newsplitv:
+ # Check type
+ oldvalue = splitv[0][0]
+ newvalue = newsplitv[0][0]
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+ # Check permissions
+ oldvalue = splitv[0][1:]
+ newvalue = newsplitv[0][1:]
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
+ # Check owner/group
+ oldvalue = '%s/%s' % (splitv[1], splitv[2])
+ newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+ # Check symlink target
+ if newsplitv[0][0] == 'l':
+ if len(splitv) > 3:
+ oldvalue = splitv[3]
+ else:
+ oldvalue = None
+ newvalue = newsplitv[3]
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
+ else:
+ filechanges.append(FileChange(path, FileChange.changetype_remove))
+
+ # Whatever is left over has been added
+ for path in bdict:
+ filechanges.append(FileChange(path, FileChange.changetype_add))
+
+ return filechanges
+
+
+def compare_lists(alines, blines):
+ removed = list(set(alines) - set(blines))
+ added = list(set(blines) - set(alines))
+
+ filechanges = []
+ for pkg in removed:
+ filechanges.append(FileChange(pkg, FileChange.changetype_remove))
+ for pkg in added:
+ filechanges.append(FileChange(pkg, FileChange.changetype_add))
+
+ return filechanges
+
+
+def compare_pkg_lists(astr, bstr):
+ depvera = bb.utils.explode_dep_versions2(astr)
+ depverb = bb.utils.explode_dep_versions2(bstr)
+
+ # Strip out changes where the version has increased
+ remove = []
+ for k in depvera:
+ if k in depverb:
+ dva = depvera[k]
+ dvb = depverb[k]
+ if dva and dvb and len(dva) == len(dvb):
+ # Since length is the same, sort so that prefixes (e.g. >=) will line up
+ dva.sort()
+ dvb.sort()
+ removeit = True
+ for dvai, dvbi in zip(dva, dvb):
+ if dvai != dvbi:
+ aiprefix = dvai.split(' ')[0]
+ biprefix = dvbi.split(' ')[0]
+ if aiprefix == biprefix and aiprefix in ['>=', '=']:
+ if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
+ removeit = False
+ break
+ else:
+ removeit = False
+ break
+ if removeit:
+ remove.append(k)
+
+ for k in remove:
+ depvera.pop(k)
+ depverb.pop(k)
+
+ return (depvera, depverb)
+
+
+def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
+ adict = blob_to_dict(ablob)
+ bdict = blob_to_dict(bblob)
+
+ pkgname = os.path.basename(path)
+
+ defaultvals = {}
+ defaultvals['PKG'] = pkgname
+ defaultvals['PKGE'] = '0'
+
+ changes = []
+ keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
+ for key in keys:
+ astr = adict.get(key, '')
+ bstr = bdict.get(key, '')
+ if key in ver_monitor_fields:
+ monitored = report_ver or astr or bstr
+ else:
+ monitored = key in monitor_fields
+ mapped_key = defaultval_map.get(key, '')
+ if mapped_key:
+ if not astr:
+ astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
+ if not bstr:
+ bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
+
+ if astr != bstr:
+ if (not report_all) and key in numeric_fields:
+ aval = int(astr or 0)
+ bval = int(bstr or 0)
+ if aval != 0:
+ percentchg = ((bval - aval) / float(aval)) * 100
+ else:
+ percentchg = 100
+ if abs(percentchg) < monitor_numeric_threshold:
+ continue
+ elif (not report_all) and key in list_fields:
+ if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ continue
+ if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ (depvera, depverb) = compare_pkg_lists(astr, bstr)
+ if depvera == depverb:
+ continue
+ alist = astr.split()
+ alist.sort()
+ blist = bstr.split()
+ blist.sort()
+ # We don't care about the removal of self-dependencies
+ if pkgname in alist and not pkgname in blist:
+ alist.remove(pkgname)
+ if ' '.join(alist) == ' '.join(blist):
+ continue
+
+ chg = ChangeRecord(path, key, astr, bstr, monitored)
+ changes.append(chg)
+ return changes
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
+ repo = git.Repo(repopath)
+ assert repo.bare == False
+ commit = repo.commit(revision1)
+ diff = commit.diff(revision2)
+
+ changes = []
+ for d in diff.iter_change_type('M'):
+ path = os.path.dirname(d.a_blob.path)
+ if path.startswith('packages/'):
+ filename = os.path.basename(d.a_blob.path)
+ if filename == 'latest':
+ changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
+ elif filename.startswith('latest.'):
+ chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
+ changes.append(chg)
+ elif path.startswith('images/'):
+ filename = os.path.basename(d.a_blob.path)
+ if filename in img_monitor_files:
+ if filename == 'files-in-image.txt':
+ alines = d.a_blob.data_stream.read().splitlines()
+ blines = d.b_blob.data_stream.read().splitlines()
+ filechanges = compare_file_lists(alines,blines)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+ elif filename == 'installed-package-names.txt':
+ alines = d.a_blob.data_stream.read().splitlines()
+ blines = d.b_blob.data_stream.read().splitlines()
+ filechanges = compare_lists(alines,blines)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+ else:
+ chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
+ changes.append(chg)
+ elif filename == 'image-info.txt':
+ changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
+ elif '/image-files/' in path:
+ chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
+ changes.append(chg)
+
+ # Look for added preinst/postinst/prerm/postrm
+ # (without reporting newly added recipes)
+ addedpkgs = []
+ addedchanges = []
+ for d in diff.iter_change_type('A'):
+ path = os.path.dirname(d.b_blob.path)
+ if path.startswith('packages/'):
+ filename = os.path.basename(d.b_blob.path)
+ if filename == 'latest':
+ addedpkgs.append(path)
+ elif filename.startswith('latest.'):
+ chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True)
+ addedchanges.append(chg)
+ for chg in addedchanges:
+ found = False
+ for pkg in addedpkgs:
+ if chg.path.startswith(pkg):
+ found = True
+ break
+ if not found:
+ changes.append(chg)
+
+ # Look for cleared preinst/postinst/prerm/postrm
+ for d in diff.iter_change_type('D'):
+ path = os.path.dirname(d.a_blob.path)
+ if path.startswith('packages/'):
+ filename = os.path.basename(d.a_blob.path)
+ if filename != 'latest' and filename.startswith('latest.'):
+ chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True)
+ changes.append(chg)
+
+ # Link related changes
+ for chg in changes:
+ if chg.monitored:
+ for chg2 in changes:
+ # (Check dirname in the case of fields from recipe info files)
+ if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
+ if chg2.fieldname in related_fields.get(chg.fieldname, []):
+ chg.related.append(chg2)
+ elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
+ chg.related.append(chg2)
+
+ if report_all:
+ return changes
+ else:
+ return [chg for chg in changes if chg.monitored]
diff --git a/import-layers/yocto-poky/meta/lib/oe/cachedpath.py b/import-layers/yocto-poky/meta/lib/oe/cachedpath.py
new file mode 100644
index 000000000..0840cc4c3
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/cachedpath.py
@@ -0,0 +1,233 @@
+#
+# Based on standard python library functions but avoid
+# repeated stat calls. Its assumed the files will not change from under us
+# so we can cache stat calls.
+#
+
+import os
+import errno
+import stat as statmod
+
+class CachedPath(object):
+ def __init__(self):
+ self.statcache = {}
+ self.lstatcache = {}
+ self.normpathcache = {}
+ return
+
+ def updatecache(self, x):
+ x = self.normpath(x)
+ if x in self.statcache:
+ del self.statcache[x]
+ if x in self.lstatcache:
+ del self.lstatcache[x]
+
+ def normpath(self, path):
+ if path in self.normpathcache:
+ return self.normpathcache[path]
+ newpath = os.path.normpath(path)
+ self.normpathcache[path] = newpath
+ return newpath
+
+ def _callstat(self, path):
+ if path in self.statcache:
+ return self.statcache[path]
+ try:
+ st = os.stat(path)
+ self.statcache[path] = st
+ return st
+ except os.error:
+ self.statcache[path] = False
+ return False
+
+ # We might as well call lstat and then only
+ # call stat as well in the symbolic link case
+ # since this turns out to be much more optimal
+ # in real world usage of this cache
+ def callstat(self, path):
+ path = self.normpath(path)
+ self.calllstat(path)
+ return self.statcache[path]
+
+ def calllstat(self, path):
+ path = self.normpath(path)
+ if path in self.lstatcache:
+ return self.lstatcache[path]
+ #bb.error("LStatpath:" + path)
+ try:
+ lst = os.lstat(path)
+ self.lstatcache[path] = lst
+ if not statmod.S_ISLNK(lst.st_mode):
+ self.statcache[path] = lst
+ else:
+ self._callstat(path)
+ return lst
+ except (os.error, AttributeError):
+ self.lstatcache[path] = False
+ self.statcache[path] = False
+ return False
+
+ # This follows symbolic links, so both islink() and isdir() can be true
+ # for the same path ono systems that support symlinks
+ def isfile(self, path):
+ """Test whether a path is a regular file"""
+ st = self.callstat(path)
+ if not st:
+ return False
+ return statmod.S_ISREG(st.st_mode)
+
+ # Is a path a directory?
+ # This follows symbolic links, so both islink() and isdir()
+ # can be true for the same path on systems that support symlinks
+ def isdir(self, s):
+ """Return true if the pathname refers to an existing directory."""
+ st = self.callstat(s)
+ if not st:
+ return False
+ return statmod.S_ISDIR(st.st_mode)
+
+ def islink(self, path):
+ """Test whether a path is a symbolic link"""
+ st = self.calllstat(path)
+ if not st:
+ return False
+ return statmod.S_ISLNK(st.st_mode)
+
+ # Does a path exist?
+ # This is false for dangling symbolic links on systems that support them.
+ def exists(self, path):
+ """Test whether a path exists. Returns False for broken symbolic links"""
+ if self.callstat(path):
+ return True
+ return False
+
+ def lexists(self, path):
+ """Test whether a path exists. Returns True for broken symbolic links"""
+ if self.calllstat(path):
+ return True
+ return False
+
+ def stat(self, path):
+ return self.callstat(path)
+
+ def lstat(self, path):
+ return self.calllstat(path)
+
+ def walk(self, top, topdown=True, onerror=None, followlinks=False):
+ # Matches os.walk, not os.path.walk()
+
+ # We may not have read permission for top, in which case we can't
+ # get a list of the files the directory contains. os.path.walk
+ # always suppressed the exception then, rather than blow up for a
+ # minor reason when (say) a thousand readable directories are still
+ # left to visit. That logic is copied here.
+ try:
+ names = os.listdir(top)
+ except os.error as err:
+ if onerror is not None:
+ onerror(err)
+ return
+
+ dirs, nondirs = [], []
+ for name in names:
+ if self.isdir(os.path.join(top, name)):
+ dirs.append(name)
+ else:
+ nondirs.append(name)
+
+ if topdown:
+ yield top, dirs, nondirs
+ for name in dirs:
+ new_path = os.path.join(top, name)
+ if followlinks or not self.islink(new_path):
+ for x in self.walk(new_path, topdown, onerror, followlinks):
+ yield x
+ if not topdown:
+ yield top, dirs, nondirs
+
+ ## realpath() related functions
+ def __is_path_below(self, file, root):
+ return (file + os.path.sep).startswith(root)
+
+ def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
+ """Calculates real path of symlink 'start' + 'rel_path' below
+ 'root'; no part of 'start' below 'root' must contain symlinks. """
+ have_dir = True
+
+ for d in rel_path.split(os.path.sep):
+ if not have_dir and not assume_dir:
+ raise OSError(errno.ENOENT, "no such directory %s" % start)
+
+ if d == os.path.pardir: # '..'
+ if len(start) >= len(root):
+ # do not follow '..' before root
+ start = os.path.dirname(start)
+ else:
+ # emit warning?
+ pass
+ else:
+ (start, have_dir) = self.__realpath(os.path.join(start, d),
+ root, loop_cnt, assume_dir)
+
+ assert(self.__is_path_below(start, root))
+
+ return start
+
+ def __realpath(self, file, root, loop_cnt, assume_dir):
+ while self.islink(file) and len(file) >= len(root):
+ if loop_cnt == 0:
+ raise OSError(errno.ELOOP, file)
+
+ loop_cnt -= 1
+ target = os.path.normpath(os.readlink(file))
+
+ if not os.path.isabs(target):
+ tdir = os.path.dirname(file)
+ assert(self.__is_path_below(tdir, root))
+ else:
+ tdir = root
+
+ file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
+
+ try:
+ is_dir = self.isdir(file)
+ except:
+ is_dir = False
+
+ return (file, is_dir)
+
+ def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
+ """ Returns the canonical path of 'file' with assuming a
+ toplevel 'root' directory. When 'use_physdir' is set, all
+ preceding path components of 'file' will be resolved first;
+ this flag should be set unless it is guaranteed that there is
+ no symlink in the path. When 'assume_dir' is not set, missing
+ path components will raise an ENOENT error"""
+
+ root = os.path.normpath(root)
+ file = os.path.normpath(file)
+
+ if not root.endswith(os.path.sep):
+ # letting root end with '/' makes some things easier
+ root = root + os.path.sep
+
+ if not self.__is_path_below(file, root):
+ raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
+
+ try:
+ if use_physdir:
+ file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
+ else:
+ file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
+ except OSError as e:
+ if e.errno == errno.ELOOP:
+ # make ELOOP more readable; without catching it, there will
+ # be printed a backtrace with 100s of OSError exceptions
+ # else
+ raise OSError(errno.ELOOP,
+ "too much recursions while resolving '%s'; loop in '%s'" %
+ (file, e.strerror))
+
+ raise
+
+ return file
diff --git a/import-layers/yocto-poky/meta/lib/oe/classextend.py b/import-layers/yocto-poky/meta/lib/oe/classextend.py
new file mode 100644
index 000000000..5107ecde2
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/classextend.py
@@ -0,0 +1,120 @@
+class ClassExtender(object):
+ def __init__(self, extname, d):
+ self.extname = extname
+ self.d = d
+ self.pkgs_mapping = []
+
+ def extend_name(self, name):
+ if name.startswith("kernel-") or name == "virtual/kernel":
+ return name
+ if name.startswith("rtld"):
+ return name
+ if name.endswith("-crosssdk"):
+ return name
+ if name.endswith("-" + self.extname):
+ name = name.replace("-" + self.extname, "")
+ if name.startswith("virtual/"):
+ subs = name.split("/", 1)[1]
+ if not subs.startswith(self.extname):
+ return "virtual/" + self.extname + "-" + subs
+ return name
+ if not name.startswith(self.extname):
+ return self.extname + "-" + name
+ return name
+
+ def map_variable(self, varname, setvar = True):
+ var = self.d.getVar(varname, True)
+ if not var:
+ return ""
+ var = var.split()
+ newvar = []
+ for v in var:
+ newvar.append(self.extend_name(v))
+ newdata = " ".join(newvar)
+ if setvar:
+ self.d.setVar(varname, newdata)
+ return newdata
+
+ def map_regexp_variable(self, varname, setvar = True):
+ var = self.d.getVar(varname, True)
+ if not var:
+ return ""
+ var = var.split()
+ newvar = []
+ for v in var:
+ if v.startswith("^" + self.extname):
+ newvar.append(v)
+ elif v.startswith("^"):
+ newvar.append("^" + self.extname + "-" + v[1:])
+ else:
+ newvar.append(self.extend_name(v))
+ newdata = " ".join(newvar)
+ if setvar:
+ self.d.setVar(varname, newdata)
+ return newdata
+
+ def map_depends(self, dep):
+ if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
+ return dep
+ else:
+ # Do not extend for that already have multilib prefix
+ var = self.d.getVar("MULTILIB_VARIANTS", True)
+ if var:
+ var = var.split()
+ for v in var:
+ if dep.startswith(v):
+ return dep
+ return self.extend_name(dep)
+
+ def map_depends_variable(self, varname, suffix = ""):
+ # We need to preserve EXTENDPKGV so it can be expanded correctly later
+ if suffix:
+ varname = varname + "_" + suffix
+ orig = self.d.getVar("EXTENDPKGV", False)
+ self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
+ deps = self.d.getVar(varname, True)
+ if not deps:
+ self.d.setVar("EXTENDPKGV", orig)
+ return
+ deps = bb.utils.explode_dep_versions2(deps)
+ newdeps = {}
+ for dep in deps:
+ newdeps[self.map_depends(dep)] = deps[dep]
+
+ self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
+ self.d.setVar("EXTENDPKGV", orig)
+
+ def map_packagevars(self):
+ for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
+ self.map_depends_variable("RDEPENDS", pkg)
+ self.map_depends_variable("RRECOMMENDS", pkg)
+ self.map_depends_variable("RSUGGESTS", pkg)
+ self.map_depends_variable("RPROVIDES", pkg)
+ self.map_depends_variable("RREPLACES", pkg)
+ self.map_depends_variable("RCONFLICTS", pkg)
+ self.map_depends_variable("PKG", pkg)
+
+ def rename_packages(self):
+ for pkg in (self.d.getVar("PACKAGES", True) or "").split():
+ if pkg.startswith(self.extname):
+ self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
+ continue
+ self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
+
+ self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
+
+ def rename_package_variables(self, variables):
+ for pkg_mapping in self.pkgs_mapping:
+ for subs in variables:
+ self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+
+class NativesdkClassExtender(ClassExtender):
+ def map_depends(self, dep):
+ if dep.startswith(self.extname):
+ return dep
+ if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
+ return dep + "-crosssdk"
+ elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
+ return dep
+ else:
+ return self.extend_name(dep)
diff --git a/import-layers/yocto-poky/meta/lib/oe/classutils.py b/import-layers/yocto-poky/meta/lib/oe/classutils.py
new file mode 100644
index 000000000..58188fdd6
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/classutils.py
@@ -0,0 +1,43 @@
+class ClassRegistry(type):
+ """Maintain a registry of classes, indexed by name.
+
+Note that this implementation requires that the names be unique, as it uses
+a dictionary to hold the classes by name.
+
+The name in the registry can be overridden via the 'name' attribute of the
+class, and the 'priority' attribute controls priority. The prioritized()
+method returns the registered classes in priority order.
+
+Subclasses of ClassRegistry may define an 'implemented' property to exert
+control over whether the class will be added to the registry (e.g. to keep
+abstract base classes out of the registry)."""
+ priority = 0
+ class __metaclass__(type):
+ """Give each ClassRegistry their own registry"""
+ def __init__(cls, name, bases, attrs):
+ cls.registry = {}
+ type.__init__(cls, name, bases, attrs)
+
+ def __init__(cls, name, bases, attrs):
+ super(ClassRegistry, cls).__init__(name, bases, attrs)
+ try:
+ if not cls.implemented:
+ return
+ except AttributeError:
+ pass
+
+ try:
+ cls.name
+ except AttributeError:
+ cls.name = name
+ cls.registry[cls.name] = cls
+
+ @classmethod
+ def prioritized(tcls):
+ return sorted(tcls.registry.values(),
+ key=lambda v: v.priority, reverse=True)
+
+ def unregister(cls):
+ for key in cls.registry.keys():
+ if cls.registry[key] is cls:
+ del cls.registry[key]
diff --git a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
new file mode 100644
index 000000000..7b9a0ee06
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
@@ -0,0 +1,217 @@
+# This class should provide easy access to the different aspects of the
+# buildsystem such as layers, bitbake location, etc.
+import stat
+import shutil
+
+def _smart_copy(src, dest):
+ # smart_copy will choose the correct function depending on whether the
+ # source is a file or a directory.
+ mode = os.stat(src).st_mode
+ if stat.S_ISDIR(mode):
+ shutil.copytree(src, dest, symlinks=True, ignore=shutil.ignore_patterns('.git'))
+ else:
+ shutil.copyfile(src, dest)
+ shutil.copymode(src, dest)
+
+class BuildSystem(object):
+ def __init__(self, context, d):
+ self.d = d
+ self.context = context
+ self.layerdirs = d.getVar('BBLAYERS', True).split()
+ self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE', True) or "").split()
+
+ def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+ # Copy in all metadata layers + bitbake (as repositories)
+ layers_copied = []
+ bb.utils.mkdirhier(destdir)
+ layers = list(self.layerdirs)
+
+ corebase = self.d.getVar('COREBASE', True)
+ layers.append(corebase)
+
+ # Exclude layers
+ for layer_exclude in self.layers_exclude:
+ if layer_exclude in layers:
+ layers.remove(layer_exclude)
+
+ workspace_newname = workspace_name
+ if workspace_newname:
+ layernames = [os.path.basename(layer) for layer in layers]
+ extranum = 0
+ while workspace_newname in layernames:
+ extranum += 1
+ workspace_newname = '%s-%d' % (workspace_name, extranum)
+
+ corebase_files = self.d.getVar('COREBASE_FILES', True).split()
+ corebase_files = [corebase + '/' +x for x in corebase_files]
+ # Make sure bitbake goes in
+ bitbake_dir = bb.__file__.rsplit('/', 3)[0]
+ corebase_files.append(bitbake_dir)
+
+ for layer in layers:
+ layerconf = os.path.join(layer, 'conf', 'layer.conf')
+ layernewname = os.path.basename(layer)
+ workspace = False
+ if os.path.exists(layerconf):
+ with open(layerconf, 'r') as f:
+ if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
+ if workspace_newname:
+ layernewname = workspace_newname
+ workspace = True
+ else:
+ bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context))
+ continue
+
+ # If the layer was already under corebase, leave it there
+ # since layers such as meta have issues when moved.
+ layerdestpath = destdir
+ if corebase == os.path.dirname(layer):
+ layerdestpath += '/' + os.path.basename(corebase)
+ layerdestpath += '/' + layernewname
+
+ layer_relative = os.path.relpath(layerdestpath,
+ destdir)
+ layers_copied.append(layer_relative)
+
+ # Treat corebase as special since it typically will contain
+ # build directories or other custom items.
+ if corebase == layer:
+ bb.utils.mkdirhier(layerdestpath)
+ for f in corebase_files:
+ f_basename = os.path.basename(f)
+ destname = os.path.join(layerdestpath, f_basename)
+ _smart_copy(f, destname)
+ else:
+ if os.path.exists(layerdestpath):
+ bb.note("Skipping layer %s, already handled" % layer)
+ else:
+ _smart_copy(layer, layerdestpath)
+
+ if workspace:
+ # Make some adjustments original workspace layer
+ # Drop sources (recipe tasks will be locked, so we don't need them)
+ srcdir = os.path.join(layerdestpath, 'sources')
+ if os.path.isdir(srcdir):
+ shutil.rmtree(srcdir)
+ # Drop all bbappends except the one for the image the SDK is being built for
+ # (because of externalsrc, the workspace bbappends will interfere with the
+ # locked signatures if present, and we don't need them anyway)
+ image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE', True)))[0] + '.bbappend'
+ appenddir = os.path.join(layerdestpath, 'appends')
+ if os.path.isdir(appenddir):
+ for fn in os.listdir(appenddir):
+ if fn == image_bbappend:
+ continue
+ else:
+ os.remove(os.path.join(appenddir, fn))
+ # Drop README
+ readme = os.path.join(layerdestpath, 'README')
+ if os.path.exists(readme):
+ os.remove(readme)
+ # Filter out comments in layer.conf and change layer name
+ layerconf = os.path.join(layerdestpath, 'conf', 'layer.conf')
+ with open(layerconf, 'r') as f:
+ origlines = f.readlines()
+ with open(layerconf, 'w') as f:
+ for line in origlines:
+ if line.startswith('#'):
+ continue
+ line = line.replace('workspacelayer', workspace_newname)
+ f.write(line)
+
+ return layers_copied
+
+def generate_locked_sigs(sigfile, d):
+ bb.utils.mkdirhier(os.path.dirname(sigfile))
+ depd = d.getVar('BB_TASKDEPDATA', False)
+ tasks = ['%s.%s' % (v[2], v[1]) for v in depd.itervalues()]
+ bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
+
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+ with open(lockedsigs, 'r') as infile:
+ bb.utils.mkdirhier(os.path.dirname(pruned_output))
+ with open(pruned_output, 'w') as f:
+ invalue = False
+ for line in infile:
+ if invalue:
+ if line.endswith('\\\n'):
+ splitval = line.strip().split(':')
+ if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
+ f.write(line)
+ else:
+ f.write(line)
+ invalue = False
+ elif line.startswith('SIGGEN_LOCKEDSIGS'):
+ invalue = True
+ f.write(line)
+
+def merge_lockedsigs(copy_tasks, lockedsigs_main, lockedsigs_extra, merged_output, copy_output):
+ merged = {}
+ arch_order = []
+ with open(lockedsigs_main, 'r') as f:
+ invalue = None
+ for line in f:
+ if invalue:
+ if line.endswith('\\\n'):
+ merged[invalue].append(line)
+ else:
+ invalue = None
+ elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
+ invalue = line[18:].split('=', 1)[0].rstrip()
+ merged[invalue] = []
+ arch_order.append(invalue)
+
+ with open(lockedsigs_extra, 'r') as f:
+ invalue = None
+ tocopy = {}
+ for line in f:
+ if invalue:
+ if line.endswith('\\\n'):
+ if not line in merged[invalue]:
+ target, task = line.strip().split(':')[:2]
+ if not copy_tasks or task in copy_tasks:
+ tocopy[invalue].append(line)
+ merged[invalue].append(line)
+ else:
+ invalue = None
+ elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
+ invalue = line[18:].split('=', 1)[0].rstrip()
+ if not invalue in merged:
+ merged[invalue] = []
+ arch_order.append(invalue)
+ tocopy[invalue] = []
+
+ def write_sigs_file(fn, types, sigs):
+ fulltypes = []
+ bb.utils.mkdirhier(os.path.dirname(fn))
+ with open(fn, 'w') as f:
+ for typename in types:
+ lines = sigs[typename]
+ if lines:
+ f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % typename)
+ for line in lines:
+ f.write(line)
+ f.write(' "\n')
+ fulltypes.append(typename)
+ f.write('SIGGEN_LOCKEDSIGS_TYPES = "%s"\n' % ' '.join(fulltypes))
+
+ write_sigs_file(copy_output, tocopy.keys(), tocopy)
+ if merged_output:
+ write_sigs_file(merged_output, arch_order, merged)
+
+def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring=""):
+ bb.note('Generating sstate-cache...')
+
+ nativelsbstring = d.getVar('NATIVELSBSTRING', True)
+ bb.process.run("gen-lockedsig-cache %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring))
+ if fixedlsbstring:
+ nativedir = output_sstate_cache + '/' + nativelsbstring
+ if os.path.isdir(nativedir):
+ destdir = os.path.join(output_sstate_cache, fixedlsbstring)
+ bb.utils.mkdirhier(destdir)
+
+ dirlist = os.listdir(nativedir)
+ for i in dirlist:
+ src = os.path.join(nativedir, i)
+ dest = os.path.join(destdir, i)
+ os.rename(src, dest)
diff --git a/import-layers/yocto-poky/meta/lib/oe/data.py b/import-layers/yocto-poky/meta/lib/oe/data.py
new file mode 100644
index 000000000..e49572177
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/data.py
@@ -0,0 +1,17 @@
+import oe.maketype
+
+def typed_value(key, d):
+ """Construct a value for the specified metadata variable, using its flags
+ to determine the type and parameters for construction."""
+ var_type = d.getVarFlag(key, 'type', True)
+ flags = d.getVarFlags(key)
+ if flags is not None:
+ flags = dict((flag, d.expand(value))
+ for flag, value in flags.iteritems())
+ else:
+ flags = {}
+
+ try:
+ return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
+ except (TypeError, ValueError), exc:
+ bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
diff --git a/import-layers/yocto-poky/meta/lib/oe/distro_check.py b/import-layers/yocto-poky/meta/lib/oe/distro_check.py
new file mode 100644
index 000000000..8655a6fc1
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/distro_check.py
@@ -0,0 +1,399 @@
+from contextlib import contextmanager
+@contextmanager
+def create_socket(url, d):
+ import urllib
+ socket = urllib.urlopen(url, proxies=get_proxies(d))
+ try:
+ yield socket
+ finally:
+ socket.close()
+
+def get_proxies(d):
+ proxies = {}
+ for key in ['http', 'https', 'ftp', 'ftps', 'no', 'all']:
+ proxy = d.getVar(key + '_proxy', True)
+ if proxy:
+ proxies[key] = proxy
+ return proxies
+
+def get_links_from_url(url, d):
+ "Return all the href links found on the web location"
+
+ import sgmllib
+
+ class LinksParser(sgmllib.SGMLParser):
+ def parse(self, s):
+ "Parse the given string 's'."
+ self.feed(s)
+ self.close()
+
+ def __init__(self, verbose=0):
+ "Initialise an object passing 'verbose' to the superclass."
+ sgmllib.SGMLParser.__init__(self, verbose)
+ self.hyperlinks = []
+
+ def start_a(self, attributes):
+ "Process a hyperlink and its 'attributes'."
+ for name, value in attributes:
+ if name == "href":
+ self.hyperlinks.append(value.strip('/'))
+
+ def get_hyperlinks(self):
+ "Return the list of hyperlinks."
+ return self.hyperlinks
+
+ with create_socket(url,d) as sock:
+ webpage = sock.read()
+
+ linksparser = LinksParser()
+ linksparser.parse(webpage)
+ return linksparser.get_hyperlinks()
+
+def find_latest_numeric_release(url, d):
+ "Find the latest listed numeric release on the given url"
+ max=0
+ maxstr=""
+ for link in get_links_from_url(url, d):
+ try:
+ release = float(link)
+ except:
+ release = 0
+ if release > max:
+ max = release
+ maxstr = link
+ return maxstr
+
+def is_src_rpm(name):
+ "Check if the link is pointing to a src.rpm file"
+ if name[-8:] == ".src.rpm":
+ return True
+ else:
+ return False
+
+def package_name_from_srpm(srpm):
+ "Strip out the package name from the src.rpm filename"
+ strings = srpm.split('-')
+ package_name = strings[0]
+ for i in range(1, len (strings) - 1):
+ str = strings[i]
+ if not str[0].isdigit():
+ package_name += '-' + str
+ return package_name
+
+def clean_package_list(package_list):
+ "Removes multiple entries of packages and sorts the list"
+ set = {}
+ map(set.__setitem__, package_list, [])
+ return set.keys()
+
+
+def get_latest_released_meego_source_package_list(d):
+ "Returns list of all the name os packages in the latest meego distro"
+
+ package_names = []
+ try:
+ f = open("/tmp/Meego-1.1", "r")
+ for line in f:
+ package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
+ except IOError: pass
+ package_list=clean_package_list(package_names)
+ return "1.0", package_list
+
+def get_source_package_list_from_url(url, section, d):
+ "Return a sectioned list of package names from a URL list"
+
+ bb.note("Reading %s: %s" % (url, section))
+ links = get_links_from_url(url, d)
+ srpms = filter(is_src_rpm, links)
+ names_list = map(package_name_from_srpm, srpms)
+
+ new_pkgs = []
+ for pkgs in names_list:
+ new_pkgs.append(pkgs + ":" + section)
+
+ return new_pkgs
+
+def get_latest_released_fedora_source_package_list(d):
+ "Returns list of all the name os packages in the latest fedora distro"
+ latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
+
+ package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main", d)
+
+# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
+ package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
+
+ package_list=clean_package_list(package_names)
+
+ return latest, package_list
+
+def get_latest_released_opensuse_source_package_list(d):
+ "Returns list of all the name os packages in the latest opensuse distro"
+ latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/",d)
+
+ package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main", d)
+ package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates", d)
+
+ package_list=clean_package_list(package_names)
+ return latest, package_list
+
+def get_latest_released_mandriva_source_package_list(d):
+ "Returns list of all the name os packages in the latest mandriva distro"
+ latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/", d)
+ package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main", d)
+# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
+ package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
+
+ package_list=clean_package_list(package_names)
+ return latest, package_list
+
+def find_latest_debian_release(url, d):
+ "Find the latest listed debian release on the given url"
+
+ releases = []
+ for link in get_links_from_url(url, d):
+ if link[:6] == "Debian":
+ if ';' not in link:
+ releases.append(link)
+ releases.sort()
+ try:
+ return releases.pop()[6:]
+ except:
+ return "_NotFound_"
+
+def get_debian_style_source_package_list(url, section, d):
+ "Return the list of package-names stored in the debian style Sources.gz file"
+ with create_socket(url,d) as sock:
+ webpage = sock.read()
+ import tempfile
+ tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
+ tmpfilename=tmpfile.name
+ tmpfile.write(sock.read())
+ tmpfile.close()
+ import gzip
+ bb.note("Reading %s: %s" % (url, section))
+
+ f = gzip.open(tmpfilename)
+ package_names = []
+ for line in f:
+ if line[:9] == "Package: ":
+ package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
+ os.unlink(tmpfilename)
+
+ return package_names
+
+def get_latest_released_debian_source_package_list(d):
+ "Returns list of all the name os packages in the latest debian distro"
+ latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
+ url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
+ package_names = get_debian_style_source_package_list(url, "main", d)
+# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
+# package_names += get_debian_style_source_package_list(url, "contrib")
+ url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
+ package_names += get_debian_style_source_package_list(url, "updates", d)
+ package_list=clean_package_list(package_names)
+ return latest, package_list
+
+def find_latest_ubuntu_release(url, d):
+ "Find the latest listed ubuntu release on the given url"
+ url += "?C=M;O=D" # Descending Sort by Last Modified
+ for link in get_links_from_url(url, d):
+ if link[-8:] == "-updates":
+ return link[:-8]
+ return "_NotFound_"
+
+def get_latest_released_ubuntu_source_package_list(d):
+ "Returns list of all the name os packages in the latest ubuntu distro"
+ latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
+ url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
+ package_names = get_debian_style_source_package_list(url, "main", d)
+# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
+# package_names += get_debian_style_source_package_list(url, "multiverse")
+# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
+# package_names += get_debian_style_source_package_list(url, "universe")
+ url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
+ package_names += get_debian_style_source_package_list(url, "updates", d)
+ package_list=clean_package_list(package_names)
+ return latest, package_list
+
+def create_distro_packages_list(distro_check_dir, d):
+ pkglst_dir = os.path.join(distro_check_dir, "package_lists")
+ if not os.path.isdir (pkglst_dir):
+ os.makedirs(pkglst_dir)
+ # first clear old stuff
+ for file in os.listdir(pkglst_dir):
+ os.unlink(os.path.join(pkglst_dir, file))
+
+ per_distro_functions = [
+ ["Debian", get_latest_released_debian_source_package_list],
+ ["Ubuntu", get_latest_released_ubuntu_source_package_list],
+ ["Fedora", get_latest_released_fedora_source_package_list],
+ ["OpenSuSE", get_latest_released_opensuse_source_package_list],
+ ["Mandriva", get_latest_released_mandriva_source_package_list],
+ ["Meego", get_latest_released_meego_source_package_list]
+ ]
+
+ from datetime import datetime
+ begin = datetime.now()
+ for distro in per_distro_functions:
+ name = distro[0]
+ release, package_list = distro[1](d)
+ bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
+ package_list_file = os.path.join(pkglst_dir, name + "-" + release)
+ f = open(package_list_file, "w+b")
+ for pkg in package_list:
+ f.write(pkg + "\n")
+ f.close()
+ end = datetime.now()
+ delta = end - begin
+ bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
+
+def update_distro_data(distro_check_dir, datetime, d):
+ """
+ If distro packages list data is old then rebuild it.
+ The operations has to be protected by a lock so that
+ only one thread performes it at a time.
+ """
+ if not os.path.isdir (distro_check_dir):
+ try:
+ bb.note ("Making new directory: %s" % distro_check_dir)
+ os.makedirs (distro_check_dir)
+ except OSError:
+ raise Exception('Unable to create directory %s' % (distro_check_dir))
+
+
+ datetime_file = os.path.join(distro_check_dir, "build_datetime")
+ saved_datetime = "_invalid_"
+ import fcntl
+ try:
+ if not os.path.exists(datetime_file):
+ open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail
+
+ f = open(datetime_file, "r+b")
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ saved_datetime = f.read()
+ if saved_datetime[0:8] != datetime[0:8]:
+ bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
+ bb.note("Regenerating distro package lists")
+ create_distro_packages_list(distro_check_dir, d)
+ f.seek(0)
+ f.write(datetime)
+
+ except OSError:
+ raise Exception('Unable to read/write this file: %s' % (datetime_file))
+ finally:
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ f.close()
+
+def compare_in_distro_packages_list(distro_check_dir, d):
+ if not os.path.isdir(distro_check_dir):
+ raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
+
+ localdata = bb.data.createCopy(d)
+ pkglst_dir = os.path.join(distro_check_dir, "package_lists")
+ matching_distros = []
+ pn = d.getVar('PN', True)
+ recipe_name = d.getVar('PN', True)
+ bb.note("Checking: %s" % pn)
+
+ trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
+
+ if pn.find("-native") != -1:
+ pnstripped = pn.split("-native")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+ recipe_name = pnstripped[0]
+
+ if pn.startswith("nativesdk-"):
+ pnstripped = pn.split("nativesdk-")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+ recipe_name = pnstripped[1]
+
+ if pn.find("-cross") != -1:
+ pnstripped = pn.split("-cross")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+ recipe_name = pnstripped[0]
+
+ if pn.find("-initial") != -1:
+ pnstripped = pn.split("-initial")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+ recipe_name = pnstripped[0]
+
+ bb.note("Recipe: %s" % recipe_name)
+ tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
+
+ distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
+
+ if tmp:
+ list = tmp.split(' ')
+ for str in list:
+ if str and str.find("=") == -1 and distro_exceptions[str]:
+ matching_distros.append(str)
+
+ distro_pn_aliases = {}
+ if tmp:
+ list = tmp.split(' ')
+ for str in list:
+ if str.find("=") != -1:
+ (dist, pn_alias) = str.split('=')
+ distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
+
+ for file in os.listdir(pkglst_dir):
+ (distro, distro_release) = file.split("-")
+ f = open(os.path.join(pkglst_dir, file), "rb")
+ for line in f:
+ (pkg, section) = line.split(":")
+ if distro.lower() in distro_pn_aliases:
+ pn = distro_pn_aliases[distro.lower()]
+ else:
+ pn = recipe_name
+ if pn == pkg:
+ matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
+ f.close()
+ break
+ f.close()
+
+
+ if tmp != None:
+ list = tmp.split(' ')
+ for item in list:
+ matching_distros.append(item)
+ bb.note("Matching: %s" % matching_distros)
+ return matching_distros
+
+def create_log_file(d, logname):
+ import subprocess
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ logfn, logsuffix = os.path.splitext(logname)
+ logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
+ if not os.path.exists(logfile):
+ slogfile = os.path.join(logpath, logname)
+ if os.path.exists(slogfile):
+ os.remove(slogfile)
+ subprocess.call("touch %s" % logfile, shell=True)
+ os.symlink(logfile, slogfile)
+ d.setVar('LOG_FILE', logfile)
+ return logfile
+
+
+def save_distro_check_result(result, datetime, result_file, d):
+ pn = d.getVar('PN', True)
+ logdir = d.getVar('LOG_DIR', True)
+ if not logdir:
+ bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
+ return
+ if not os.path.isdir(logdir):
+ os.makedirs(logdir)
+ line = pn
+ for i in result:
+ line = line + "," + i
+ f = open(result_file, "a")
+ import fcntl
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ f.seek(0, os.SEEK_END) # seek to the end of file
+ f.write(line + "\n")
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ f.close()
diff --git a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
new file mode 100644
index 000000000..b83ee8672
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
@@ -0,0 +1,116 @@
+"""Helper module for GPG signing"""
+import os
+
+import bb
+import oe.utils
+
+class LocalSigner(object):
+ """Class for handling local (on the build host) signing"""
+ def __init__(self, d):
+ self.gpg_bin = d.getVar('GPG_BIN', True) or \
+ bb.utils.which(os.getenv('PATH'), 'gpg')
+ self.gpg_path = d.getVar('GPG_PATH', True)
+ self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpm")
+
+ def export_pubkey(self, output_file, keyid, armor=True):
+ """Export GPG public key to a file"""
+ cmd = '%s --batch --yes --export -o %s ' % \
+ (self.gpg_bin, output_file)
+ if self.gpg_path:
+ cmd += "--homedir %s " % self.gpg_path
+ if armor:
+ cmd += "--armor "
+ cmd += keyid
+ status, output = oe.utils.getstatusoutput(cmd)
+ if status:
+ raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
+ (keyid, output))
+
+ def sign_rpms(self, files, keyid, passphrase):
+ """Sign RPM files"""
+
+ cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
+ cmd += "--define '_gpg_passphrase %s' " % passphrase
+ if self.gpg_bin:
+ cmd += "--define '%%__gpg %s' " % self.gpg_bin
+ if self.gpg_path:
+ cmd += "--define '_gpg_path %s' " % self.gpg_path
+ cmd += ' '.join(files)
+
+ status, output = oe.utils.getstatusoutput(cmd)
+ if status:
+ raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+
+ def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
+ """Create a detached signature of a file"""
+ import subprocess
+
+ if passphrase_file and passphrase:
+ raise Exception("You should use either passphrase_file of passphrase, not both")
+
+ cmd = [self.gpg_bin, '--detach-sign', '--batch', '--no-tty', '--yes',
+ '--passphrase-fd', '0', '-u', keyid]
+
+ if self.gpg_path:
+ cmd += ['--homedir', self.gpg_path]
+ if armor:
+ cmd += ['--armor']
+
+ #gpg > 2.1 supports password pipes only through the loopback interface
+ #gpg < 2.1 errors out if given unknown parameters
+ dots = self.get_gpg_version().split('.')
+ assert len(dots) >= 2
+ if int(dots[0]) >= 2 and int(dots[1]) >= 1:
+ cmd += ['--pinentry-mode', 'loopback']
+
+ cmd += [input_file]
+
+ try:
+ if passphrase_file:
+ with open(passphrase_file) as fobj:
+ passphrase = fobj.readline();
+
+ job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ (_, stderr) = job.communicate(passphrase)
+
+ if job.returncode:
+ raise bb.build.FuncFailed("GPG exited with code %d: %s" %
+ (job.returncode, stderr))
+
+ except IOError as e:
+ bb.error("IO error (%s): %s" % (e.errno, e.strerror))
+ raise Exception("Failed to sign '%s'" % input_file)
+
+ except OSError as e:
+ bb.error("OS error (%s): %s" % (e.errno, e.strerror))
+ raise Exception("Failed to sign '%s" % input_file)
+
+
+ def get_gpg_version(self):
+ """Return the gpg version"""
+ import subprocess
+ try:
+ return subprocess.check_output((self.gpg_bin, "--version")).split()[2]
+ except subprocess.CalledProcessError as e:
+ raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
+
+
+ def verify(self, sig_file):
+ """Verify signature"""
+ cmd = self.gpg_bin + " --verify "
+ if self.gpg_path:
+ cmd += "--homedir %s " % self.gpg_path
+ cmd += sig_file
+ status, _ = oe.utils.getstatusoutput(cmd)
+ ret = False if status else True
+ return ret
+
+
+def get_signer(d, backend):
+ """Get signer object for the specified backend"""
+ # Use local signing by default
+ if backend == 'local':
+ return LocalSigner(d)
+ else:
+ bb.fatal("Unsupported signing backend '%s'" % backend)
+
diff --git a/import-layers/yocto-poky/meta/lib/oe/license.py b/import-layers/yocto-poky/meta/lib/oe/license.py
new file mode 100644
index 000000000..f0f661c3b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/license.py
@@ -0,0 +1,217 @@
+# vi:sts=4:sw=4:et
+"""Code for parsing OpenEmbedded license strings"""
+
+import ast
+import re
+from fnmatch import fnmatchcase as fnmatch
+
+def license_ok(license, dont_want_licenses):
+ """ Return False if License exist in dont_want_licenses else True """
+ for dwl in dont_want_licenses:
+ # If you want to exclude license named generically 'X', we
+ # surely want to exclude 'X+' as well. In consequence, we
+ # will exclude a trailing '+' character from LICENSE in
+ # case INCOMPATIBLE_LICENSE is not a 'X+' license.
+ lic = license
+ if not re.search('\+$', dwl):
+ lic = re.sub('\+', '', license)
+ if fnmatch(lic, dwl):
+ return False
+ return True
+
+class LicenseError(Exception):
+ pass
+
+class LicenseSyntaxError(LicenseError):
+ def __init__(self, licensestr, exc):
+ self.licensestr = licensestr
+ self.exc = exc
+ LicenseError.__init__(self)
+
+ def __str__(self):
+ return "error in '%s': %s" % (self.licensestr, self.exc)
+
+class InvalidLicense(LicenseError):
+ def __init__(self, license):
+ self.license = license
+ LicenseError.__init__(self)
+
+ def __str__(self):
+ return "invalid characters in license '%s'" % self.license
+
+license_operator_chars = '&|() '
+license_operator = re.compile('([' + license_operator_chars + '])')
+license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+
+class LicenseVisitor(ast.NodeVisitor):
+ """Get elements based on OpenEmbedded license strings"""
+ def get_elements(self, licensestr):
+ new_elements = []
+ elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
+ for pos, element in enumerate(elements):
+ if license_pattern.match(element):
+ if pos > 0 and license_pattern.match(elements[pos-1]):
+ new_elements.append('&')
+ element = '"' + element + '"'
+ elif not license_operator.match(element):
+ raise InvalidLicense(element)
+ new_elements.append(element)
+
+ return new_elements
+
+ """Syntax tree visitor which can accept elements previously generated with
+ OpenEmbedded license string"""
+ def visit_elements(self, elements):
+ self.visit(ast.parse(' '.join(elements)))
+
+ """Syntax tree visitor which can accept OpenEmbedded license strings"""
+ def visit_string(self, licensestr):
+ self.visit_elements(self.get_elements(licensestr))
+
+class FlattenVisitor(LicenseVisitor):
+ """Flatten a license tree (parsed from a string) by selecting one of each
+ set of OR options, in the way the user specifies"""
+ def __init__(self, choose_licenses):
+ self.choose_licenses = choose_licenses
+ self.licenses = []
+ LicenseVisitor.__init__(self)
+
+ def visit_Str(self, node):
+ self.licenses.append(node.s)
+
+ def visit_BinOp(self, node):
+ if isinstance(node.op, ast.BitOr):
+ left = FlattenVisitor(self.choose_licenses)
+ left.visit(node.left)
+
+ right = FlattenVisitor(self.choose_licenses)
+ right.visit(node.right)
+
+ selected = self.choose_licenses(left.licenses, right.licenses)
+ self.licenses.extend(selected)
+ else:
+ self.generic_visit(node)
+
+def flattened_licenses(licensestr, choose_licenses):
+ """Given a license string and choose_licenses function, return a flat list of licenses"""
+ flatten = FlattenVisitor(choose_licenses)
+ try:
+ flatten.visit_string(licensestr)
+ except SyntaxError as exc:
+ raise LicenseSyntaxError(licensestr, exc)
+ return flatten.licenses
+
+def is_included(licensestr, whitelist=None, blacklist=None):
+ """Given a license string and whitelist and blacklist, determine if the
+ license string matches the whitelist and does not match the blacklist.
+
+ Returns a tuple holding the boolean state and a list of the applicable
+ licenses which were excluded (or None, if the state is True)
+ """
+
+ def include_license(license):
+ return any(fnmatch(license, pattern) for pattern in whitelist)
+
+ def exclude_license(license):
+ return any(fnmatch(license, pattern) for pattern in blacklist)
+
+ def choose_licenses(alpha, beta):
+ """Select the option in an OR which is the 'best' (has the most
+ included licenses)."""
+ alpha_weight = len(filter(include_license, alpha))
+ beta_weight = len(filter(include_license, beta))
+ if alpha_weight > beta_weight:
+ return alpha
+ else:
+ return beta
+
+ if not whitelist:
+ whitelist = ['*']
+
+ if not blacklist:
+ blacklist = []
+
+ licenses = flattened_licenses(licensestr, choose_licenses)
+ excluded = filter(lambda lic: exclude_license(lic), licenses)
+ included = filter(lambda lic: include_license(lic), licenses)
+ if excluded:
+ return False, excluded
+ else:
+ return True, included
+
+class ManifestVisitor(LicenseVisitor):
+ """Walk license tree (parsed from a string) removing the incompatible
+ licenses specified"""
+ def __init__(self, dont_want_licenses, canonical_license, d):
+ self._dont_want_licenses = dont_want_licenses
+ self._canonical_license = canonical_license
+ self._d = d
+ self._operators = []
+
+ self.licenses = []
+ self.licensestr = ''
+
+ LicenseVisitor.__init__(self)
+
+ def visit(self, node):
+ if isinstance(node, ast.Str):
+ lic = node.s
+
+ if license_ok(self._canonical_license(self._d, lic),
+ self._dont_want_licenses) == True:
+ if self._operators:
+ ops = []
+ for op in self._operators:
+ if op == '[':
+ ops.append(op)
+ elif op == ']':
+ ops.append(op)
+ else:
+ if not ops:
+ ops.append(op)
+ elif ops[-1] in ['[', ']']:
+ ops.append(op)
+ else:
+ ops[-1] = op
+
+ for op in ops:
+ if op == '[' or op == ']':
+ self.licensestr += op
+ elif self.licenses:
+ self.licensestr += ' ' + op + ' '
+
+ self._operators = []
+
+ self.licensestr += lic
+ self.licenses.append(lic)
+ elif isinstance(node, ast.BitAnd):
+ self._operators.append("&")
+ elif isinstance(node, ast.BitOr):
+ self._operators.append("|")
+ elif isinstance(node, ast.List):
+ self._operators.append("[")
+ elif isinstance(node, ast.Load):
+ self.licensestr += "]"
+
+ self.generic_visit(node)
+
+def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d):
+ """Given a license string and dont_want_licenses list,
+ return license string filtered and a list of licenses"""
+ manifest = ManifestVisitor(dont_want_licenses, canonical_license, d)
+
+ try:
+ elements = manifest.get_elements(licensestr)
+
+ # Replace '()' to '[]' for handle in ast as List and Load types.
+ elements = ['[' if e == '(' else e for e in elements]
+ elements = [']' if e == ')' else e for e in elements]
+
+ manifest.visit_elements(elements)
+ except SyntaxError as exc:
+ raise LicenseSyntaxError(licensestr, exc)
+
+ # Replace '[]' to '()' for output correct license.
+ manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')')
+
+ return (manifest.licensestr, manifest.licenses)
diff --git a/import-layers/yocto-poky/meta/lib/oe/lsb.py b/import-layers/yocto-poky/meta/lib/oe/lsb.py
new file mode 100644
index 000000000..e0bdfba25
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/lsb.py
@@ -0,0 +1,88 @@
+def release_dict():
+ """Return the output of lsb_release -ir as a dictionary"""
+ from subprocess import PIPE
+
+ try:
+ output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
+ except bb.process.CmdError as exc:
+ return None
+
+ data = {}
+ for line in output.splitlines():
+ if line.startswith("-e"): line = line[3:]
+ try:
+ key, value = line.split(":\t", 1)
+ except ValueError:
+ continue
+ else:
+ data[key] = value
+ return data
+
+def release_dict_file():
+ """ Try to gather LSB release information manually when lsb_release tool is unavailable """
+ data = None
+ try:
+ if os.path.exists('/etc/lsb-release'):
+ data = {}
+ with open('/etc/lsb-release') as f:
+ for line in f:
+ key, value = line.split("=", 1)
+ data[key] = value.strip()
+ elif os.path.exists('/etc/redhat-release'):
+ data = {}
+ with open('/etc/redhat-release') as f:
+ distro = f.readline().strip()
+ import re
+ match = re.match(r'(.*) release (.*) \((.*)\)', distro)
+ if match:
+ data['DISTRIB_ID'] = match.group(1)
+ data['DISTRIB_RELEASE'] = match.group(2)
+ elif os.path.exists('/etc/os-release'):
+ data = {}
+ with open('/etc/os-release') as f:
+ for line in f:
+ if line.startswith('NAME='):
+ data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
+ if line.startswith('VERSION_ID='):
+ data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
+ elif os.path.exists('/etc/SuSE-release'):
+ data = {}
+ data['DISTRIB_ID'] = 'SUSE LINUX'
+ with open('/etc/SuSE-release') as f:
+ for line in f:
+ if line.startswith('VERSION = '):
+ data['DISTRIB_RELEASE'] = line[10:].rstrip()
+ break
+
+ except IOError:
+ return None
+ return data
+
+def distro_identifier(adjust_hook=None):
+ """Return a distro identifier string based upon lsb_release -ri,
+ with optional adjustment via a hook"""
+
+ import re
+
+ lsb_data = release_dict()
+ if lsb_data:
+ distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
+ else:
+ lsb_data_file = release_dict_file()
+ if lsb_data_file:
+ distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
+ else:
+ distro_id, release = None, None
+
+ if adjust_hook:
+ distro_id, release = adjust_hook(distro_id, release)
+ if not distro_id:
+ return "Unknown"
+ # Filter out any non-alphanumerics
+ distro_id = re.sub(r'\W', '', distro_id)
+
+ if release:
+ id_str = '{0}-{1}'.format(distro_id, release)
+ else:
+ id_str = distro_id
+ return id_str.replace(' ','-').replace('/','-')
diff --git a/import-layers/yocto-poky/meta/lib/oe/maketype.py b/import-layers/yocto-poky/meta/lib/oe/maketype.py
new file mode 100644
index 000000000..139f33369
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/maketype.py
@@ -0,0 +1,99 @@
+"""OpenEmbedded variable typing support
+
+Types are defined in the metadata by name, using the 'type' flag on a
+variable. Other flags may be utilized in the construction of the types. See
+the arguments of the type's factory for details.
+"""
+
+import inspect
+import types
+
+available_types = {}
+
+class MissingFlag(TypeError):
+ """A particular flag is required to construct the type, but has not been
+ provided."""
+ def __init__(self, flag, type):
+ self.flag = flag
+ self.type = type
+ TypeError.__init__(self)
+
+ def __str__(self):
+ return "Type '%s' requires flag '%s'" % (self.type, self.flag)
+
+def factory(var_type):
+ """Return the factory for a specified type."""
+ if var_type is None:
+ raise TypeError("No type specified. Valid types: %s" %
+ ', '.join(available_types))
+ try:
+ return available_types[var_type]
+ except KeyError:
+ raise TypeError("Invalid type '%s':\n Valid types: %s" %
+ (var_type, ', '.join(available_types)))
+
+def create(value, var_type, **flags):
+ """Create an object of the specified type, given the specified flags and
+ string value."""
+ obj = factory(var_type)
+ objflags = {}
+ for flag in obj.flags:
+ if flag not in flags:
+ if flag not in obj.optflags:
+ raise MissingFlag(flag, var_type)
+ else:
+ objflags[flag] = flags[flag]
+
+ return obj(value, **objflags)
+
+def get_callable_args(obj):
+ """Grab all but the first argument of the specified callable, returning
+ the list, as well as a list of which of the arguments have default
+ values."""
+ if type(obj) is type:
+ obj = obj.__init__
+
+ args, varargs, keywords, defaults = inspect.getargspec(obj)
+ flaglist = []
+ if args:
+ if len(args) > 1 and args[0] == 'self':
+ args = args[1:]
+ flaglist.extend(args)
+
+ optional = set()
+ if defaults:
+ optional |= set(flaglist[-len(defaults):])
+ return flaglist, optional
+
+def factory_setup(name, obj):
+ """Prepare a factory for use."""
+ args, optional = get_callable_args(obj)
+ extra_args = args[1:]
+ if extra_args:
+ obj.flags, optional = extra_args, optional
+ obj.optflags = set(optional)
+ else:
+ obj.flags = obj.optflags = ()
+
+ if not hasattr(obj, 'name'):
+ obj.name = name
+
+def register(name, factory):
+ """Register a type, given its name and a factory callable.
+
+ Determines the required and optional flags from the factory's
+ arguments."""
+ factory_setup(name, factory)
+ available_types[factory.name] = factory
+
+
+# Register all our included types
+for name in dir(types):
+ if name.startswith('_'):
+ continue
+
+ obj = getattr(types, name)
+ if not callable(obj):
+ continue
+
+ register(name, obj)
diff --git a/import-layers/yocto-poky/meta/lib/oe/manifest.py b/import-layers/yocto-poky/meta/lib/oe/manifest.py
new file mode 100644
index 000000000..42832f15d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/manifest.py
@@ -0,0 +1,345 @@
+from abc import ABCMeta, abstractmethod
+import os
+import re
+import bb
+
+
+class Manifest(object):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ __metaclass__ = ABCMeta
+
+ PKG_TYPE_MUST_INSTALL = "mip"
+ PKG_TYPE_MULTILIB = "mlp"
+ PKG_TYPE_LANGUAGE = "lgp"
+ PKG_TYPE_ATTEMPT_ONLY = "aop"
+
+ MANIFEST_TYPE_IMAGE = "image"
+ MANIFEST_TYPE_SDK_HOST = "sdk_host"
+ MANIFEST_TYPE_SDK_TARGET = "sdk_target"
+
+ var_maps = {
+ MANIFEST_TYPE_IMAGE: {
+ "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
+ "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
+ "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
+ },
+ MANIFEST_TYPE_SDK_HOST: {
+ "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
+ "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
+ },
+ MANIFEST_TYPE_SDK_TARGET: {
+ "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
+ "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
+ }
+ }
+
+ INSTALL_ORDER = [
+ PKG_TYPE_LANGUAGE,
+ PKG_TYPE_MUST_INSTALL,
+ PKG_TYPE_ATTEMPT_ONLY,
+ PKG_TYPE_MULTILIB
+ ]
+
+ initial_manifest_file_header = \
+ "# This file was generated automatically and contains the packages\n" \
+ "# passed on to the package manager in order to create the rootfs.\n\n" \
+ "# Format:\n" \
+ "# <package_type>,<package_name>\n" \
+ "# where:\n" \
+ "# <package_type> can be:\n" \
+ "# 'mip' = must install package\n" \
+ "# 'aop' = attempt only package\n" \
+ "# 'mlp' = multilib package\n" \
+ "# 'lgp' = language package\n\n"
+
+ def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
+ self.d = d
+ self.manifest_type = manifest_type
+
+ if manifest_dir is None:
+ if manifest_type != self.MANIFEST_TYPE_IMAGE:
+ self.manifest_dir = self.d.getVar('SDK_DIR', True)
+ else:
+ self.manifest_dir = self.d.getVar('WORKDIR', True)
+ else:
+ self.manifest_dir = manifest_dir
+
+ bb.utils.mkdirhier(self.manifest_dir)
+
+ self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
+ self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
+ self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
+
+ # packages in the following vars will be split in 'must install' and
+ # 'multilib'
+ self.vars_to_split = ["PACKAGE_INSTALL",
+ "TOOLCHAIN_HOST_TASK",
+ "TOOLCHAIN_TARGET_TASK"]
+
+ """
+ This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
+ This will be used for testing until the class is implemented properly!
+ """
+ def _create_dummy_initial(self):
+ image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+ pkg_list = dict()
+ if image_rootfs.find("core-image-sato-sdk") > 0:
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
+ "packagegroup-core-x11-sato-games packagegroup-base-extended " \
+ "packagegroup-core-x11-sato packagegroup-core-x11-base " \
+ "packagegroup-core-sdk packagegroup-core-tools-debug " \
+ "packagegroup-core-boot packagegroup-core-tools-testapps " \
+ "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
+ "apt packagegroup-core-tools-profile psplash " \
+ "packagegroup-core-standalone-sdk-target " \
+ "packagegroup-core-ssh-openssh dpkg kernel-dev"
+ pkg_list[self.PKG_TYPE_LANGUAGE] = \
+ "locale-base-en-us locale-base-en-gb"
+ elif image_rootfs.find("core-image-sato") > 0:
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
+ "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
+ "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
+ "packagegroup-core-x11-sato packagegroup-core-boot"
+ pkg_list['lgp'] = \
+ "locale-base-en-us locale-base-en-gb"
+ elif image_rootfs.find("core-image-minimal") > 0:
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for pkg_type in pkg_list:
+ for pkg in pkg_list[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ """
+ This will create the initial manifest which will be used by Rootfs class to
+ generate the rootfs
+ """
+ @abstractmethod
+ def create_initial(self):
+ pass
+
+ """
+ This creates the manifest after everything has been installed.
+ """
+ @abstractmethod
+ def create_final(self):
+ pass
+
+ """
+ This creates the manifest after the package in initial manifest has been
+ dummy installed. It lists all *to be installed* packages. There is no real
+ installation, just a test.
+ """
+ @abstractmethod
+ def create_full(self, pm):
+ pass
+
+ """
+ The following function parses an initial manifest and returns a dictionary
+ object with the must install, attempt only, multilib and language packages.
+ """
+ def parse_initial_manifest(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest) as manifest:
+ for line in manifest.read().split('\n'):
+ comment = re.match("^#.*", line)
+ pattern = "^(%s|%s|%s|%s),(.*)$" % \
+ (self.PKG_TYPE_MUST_INSTALL,
+ self.PKG_TYPE_ATTEMPT_ONLY,
+ self.PKG_TYPE_MULTILIB,
+ self.PKG_TYPE_LANGUAGE)
+ pkg = re.match(pattern, line)
+
+ if comment is not None:
+ continue
+
+ if pkg is not None:
+ pkg_type = pkg.group(1)
+ pkg_name = pkg.group(2)
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = [pkg_name]
+ else:
+ pkgs[pkg_type].append(pkg_name)
+
+ return pkgs
+
+ '''
+ This following function parses a full manifest and return a list
+ object with packages.
+ '''
+ def parse_full_manifest(self):
+ installed_pkgs = list()
+ if not os.path.exists(self.full_manifest):
+ bb.note('full manifest not exist')
+ return installed_pkgs
+
+ with open(self.full_manifest, 'r') as manifest:
+ for pkg in manifest.read().split('\n'):
+ installed_pkgs.append(pkg.strip())
+
+ return installed_pkgs
+
+
+class RpmManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var, True))
+ if split_pkgs is not None:
+ pkgs = dict(pkgs.items() + split_pkgs.items())
+ else:
+ pkg_list = self.d.getVar(var, True)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+
+ for pkg_type in pkgs:
+ for pkg in pkgs[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
+
+
+class OpkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var, True))
+ if split_pkgs is not None:
+ pkgs = dict(pkgs.items() + split_pkgs.items())
+ else:
+ pkg_list = self.d.getVar(var, True)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+
+ for pkg_type in pkgs:
+ for pkg in pkgs[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ if not os.path.exists(self.initial_manifest):
+ self.create_initial()
+
+ initial_manifest = self.parse_initial_manifest()
+ pkgs_to_install = list()
+ for pkg_type in initial_manifest:
+ pkgs_to_install += initial_manifest[pkg_type]
+ if len(pkgs_to_install) == 0:
+ return
+
+ output = pm.dummy_install(pkgs_to_install)
+
+ with open(self.full_manifest, 'w+') as manifest:
+ pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
+ for line in set(output.split('\n')):
+ m = pkg_re.match(line)
+ if m:
+ manifest.write(m.group(1) + '\n')
+
+ return
+
+
+class DpkgManifest(Manifest):
+ def create_initial(self):
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ pkg_list = self.d.getVar(var, True)
+
+ if pkg_list is None:
+ continue
+
+ for pkg in pkg_list.split():
+ manifest.write("%s,%s\n" %
+ (self.var_maps[self.manifest_type][var], pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
+
+
+def create_manifest(d, final_manifest=False, manifest_dir=None,
+ manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
+ manifest_map = {'rpm': RpmManifest,
+ 'ipk': OpkgManifest,
+ 'deb': DpkgManifest}
+
+ manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
+
+ if final_manifest:
+ manifest.create_final()
+ else:
+ manifest.create_initial()
+
+
+if __name__ == "__main__":
+ pass
diff --git a/import-layers/yocto-poky/meta/lib/oe/package.py b/import-layers/yocto-poky/meta/lib/oe/package.py
new file mode 100644
index 000000000..288768954
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/package.py
@@ -0,0 +1,157 @@
+def runstrip(arg):
+ # Function to strip a single file, called from split_and_strip_files below
+ # A working 'file' (one which works on the target architecture)
+ #
+ # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # us what type of file we're processing...
+ # 4 - executable
+ # 8 - shared library
+ # 16 - kernel module
+
+ import commands, stat, subprocess
+
+ (file, elftype, strip) = arg
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ extraflags = ""
+
+ # kernel module
+ if elftype & 16:
+ extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
+ # .so and shared library
+ elif ".so" in file and elftype & 8:
+ extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
+ # shared or executable:
+ elif elftype & 8 or elftype & 4:
+ extraflags = "--remove-section=.comment --remove-section=.note"
+
+ stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
+ bb.debug(1, "runstrip: %s" % stripcmd)
+
+ try:
+ output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return
+
+
+def file_translate(file):
+ ft = file.replace("@", "@at@")
+ ft = ft.replace(" ", "@space@")
+ ft = ft.replace("\t", "@tab@")
+ ft = ft.replace("[", "@openbrace@")
+ ft = ft.replace("]", "@closebrace@")
+ ft = ft.replace("_", "@underscore@")
+ return ft
+
+def filedeprunner(arg):
+ import re, subprocess, shlex
+
+ (pkg, pkgfiles, rpmdeps, pkgdest) = arg
+ provides = {}
+ requires = {}
+
+ r = re.compile(r'[<>=]+ +[^ ]*')
+
+ def process_deps(pipe, pkg, pkgdest, provides, requires):
+ for line in pipe:
+ f = line.split(" ", 1)[0].strip()
+ line = line.split(" ", 1)[1].strip()
+
+ if line.startswith("Requires:"):
+ i = requires
+ elif line.startswith("Provides:"):
+ i = provides
+ else:
+ continue
+
+ file = f.replace(pkgdest + "/" + pkg, "")
+ file = file_translate(file)
+ value = line.split(":", 1)[1].strip()
+ value = r.sub(r'(\g<0>)', value)
+
+ if value.startswith("rpmlib("):
+ continue
+ if value == "python":
+ continue
+ if file not in i:
+ i[file] = []
+ i[file].append(value)
+
+ return provides, requires
+
+ try:
+ dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
+ provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
+ except OSError as e:
+ bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
+ raise e
+
+ return (pkg, provides, requires)
+
+
+def read_shlib_providers(d):
+ import re
+
+ shlib_provider = {}
+ shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
+ list_re = re.compile('^(.*)\.list$')
+ # Go from least to most specific since the last one found wins
+ for dir in reversed(shlibs_dirs):
+ bb.debug(2, "Reading shlib providers in %s" % (dir))
+ if not os.path.exists(dir):
+ continue
+ for file in os.listdir(dir):
+ m = list_re.match(file)
+ if m:
+ dep_pkg = m.group(1)
+ fd = open(os.path.join(dir, file))
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ s = l.strip().split(":")
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
+ return shlib_provider
+
+
+def npm_split_package_dirs(pkgdir):
+ """
+ Work out the packages fetched and unpacked by BitBake's npm fetcher
+ Returns a dict of packagename -> (relpath, package.json) ordered
+ such that it is suitable for use in PACKAGES and FILES
+ """
+ from collections import OrderedDict
+ import json
+ packages = {}
+ for root, dirs, files in os.walk(pkgdir):
+ if os.path.basename(root) == 'node_modules':
+ for dn in dirs:
+ relpth = os.path.relpath(os.path.join(root, dn), pkgdir)
+ pkgitems = ['${PN}']
+ for pathitem in relpth.split('/'):
+ if pathitem == 'node_modules':
+ continue
+ pkgitems.append(pathitem)
+ pkgname = '-'.join(pkgitems).replace('_', '-')
+ pkgfile = os.path.join(root, dn, 'package.json')
+ data = None
+ if os.path.exists(pkgfile):
+ with open(pkgfile, 'r') as f:
+ data = json.loads(f.read())
+ packages[pkgname] = (relpth, data)
+ # We want the main package for a module sorted *after* its subpackages
+ # (so that it doesn't otherwise steal the files for the subpackage), so
+ # this is a cheap way to do that whilst still having an otherwise
+ # alphabetical sort
+ return OrderedDict((key, packages[key]) for key in sorted(packages, key=lambda pkg: pkg + '~'))
diff --git a/import-layers/yocto-poky/meta/lib/oe/package_manager.py b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
new file mode 100644
index 000000000..b4b359a8c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
@@ -0,0 +1,2061 @@
+from abc import ABCMeta, abstractmethod
+import os
+import glob
+import subprocess
+import shutil
+import multiprocessing
+import re
+import bb
+import tempfile
+import oe.utils
+import string
+from oe.gpg_sign import get_signer
+
+# this can be used by all PM backends to create the index files in parallel
+def create_index(arg):
+ index_cmd = arg
+
+ try:
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ return("Index creation command '%s' failed with return code %d:\n%s" %
+ (e.cmd, e.returncode, e.output))
+
+ if result:
+ bb.note(result)
+
+ return None
+
+
+class Indexer(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, d, deploy_dir):
+ self.d = d
+ self.deploy_dir = deploy_dir
+
+ @abstractmethod
+ def write_index(self):
+ pass
+
+
+class RpmIndexer(Indexer):
+ def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
+ package_archs = {
+ 'default': [],
+ }
+
+ target_os = {
+ 'default': "",
+ }
+
+ if arch_var is not None and os_var is not None:
+ package_archs['default'] = self.d.getVar(arch_var, True).split()
+ package_archs['default'].reverse()
+ target_os['default'] = self.d.getVar(os_var, True).strip()
+ else:
+ package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
+ # arch order is reversed. This ensures the -best- match is
+ # listed first!
+ package_archs['default'].reverse()
+ target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
+ multilibs = self.d.getVar('MULTILIBS', True) or ""
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ localdata = bb.data.createCopy(self.d)
+ default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
+ default_tune = localdata.getVar(default_tune_key, False)
+ if default_tune is None:
+ default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
+ default_tune = localdata.getVar(default_tune_key, False)
+ if default_tune:
+ localdata.setVar("DEFAULTTUNE", default_tune)
+ bb.data.update_data(localdata)
+ package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
+ True).split()
+ package_archs[eext[1]].reverse()
+ target_os[eext[1]] = localdata.getVar("TARGET_OS",
+ True).strip()
+
+ ml_prefix_list = dict()
+ for mlib in package_archs:
+ if mlib == 'default':
+ ml_prefix_list[mlib] = package_archs[mlib]
+ else:
+ ml_prefix_list[mlib] = list()
+ for arch in package_archs[mlib]:
+ if arch in ['all', 'noarch', 'any']:
+ ml_prefix_list[mlib].append(arch)
+ else:
+ ml_prefix_list[mlib].append(mlib + "_" + arch)
+
+ return (ml_prefix_list, target_os)
+
+ def write_index(self):
+ sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+ all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+
+ mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
+
+ archs = set()
+ for item in mlb_prefix_list:
+ archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
+
+ if len(archs) == 0:
+ archs = archs.union(set(all_mlb_pkg_archs))
+
+ archs = archs.union(set(sdk_pkg_archs))
+
+ rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
+ if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ else:
+ signer = None
+ index_cmds = []
+ repomd_files = []
+ rpm_dirs_found = False
+ for arch in archs:
+ dbpath = os.path.join(self.d.getVar('WORKDIR', True), 'rpmdb', arch)
+ if os.path.exists(dbpath):
+ bb.utils.remove(dbpath, True)
+ arch_dir = os.path.join(self.deploy_dir, arch)
+ if not os.path.isdir(arch_dir):
+ continue
+
+ index_cmds.append("%s --dbpath %s --update -q %s" % \
+ (rpm_createrepo, dbpath, arch_dir))
+ repomd_files.append(os.path.join(arch_dir, 'repodata', 'repomd.xml'))
+
+ rpm_dirs_found = True
+
+ if not rpm_dirs_found:
+ bb.note("There are no packages in %s" % self.deploy_dir)
+ return
+
+ # Create repodata
+ result = oe.utils.multiprocess_exec(index_cmds, create_index)
+ if result:
+ bb.fatal('%s' % ('\n'.join(result)))
+ # Sign repomd
+ if signer:
+ for repomd in repomd_files:
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
+ is_ascii_sig = (feed_sig_type.upper() != "BIN")
+ signer.detach_sign(repomd,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+ armor=is_ascii_sig)
+
+
+class OpkgIndexer(Indexer):
+ def write_index(self):
+ arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
+ "SDK_PACKAGE_ARCHS",
+ "MULTILIB_ARCHS"]
+
+ opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ else:
+ signer = None
+
+ if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
+ open(os.path.join(self.deploy_dir, "Packages"), "w").close()
+
+ index_cmds = set()
+ index_sign_files = set()
+ for arch_var in arch_vars:
+ archs = self.d.getVar(arch_var, True)
+ if archs is None:
+ continue
+
+ for arch in archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ pkgs_file = os.path.join(pkgs_dir, "Packages")
+
+ if not os.path.isdir(pkgs_dir):
+ continue
+
+ if not os.path.exists(pkgs_file):
+ open(pkgs_file, "w").close()
+
+ index_cmds.add('%s -r %s -p %s -m %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+
+ index_sign_files.add(pkgs_file)
+
+ if len(index_cmds) == 0:
+ bb.note("There are no packages in %s!" % self.deploy_dir)
+ return
+
+ result = oe.utils.multiprocess_exec(index_cmds, create_index)
+ if result:
+ bb.fatal('%s' % ('\n'.join(result)))
+
+ if signer:
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
+ is_ascii_sig = (feed_sig_type.upper() != "BIN")
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+ armor=is_ascii_sig)
+
+
+class DpkgIndexer(Indexer):
+ def _create_configs(self):
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"),
+ "w") as prefs_file:
+ pass
+ with open(os.path.join(self.apt_conf_dir, "sources.list"),
+ "w+") as sources_file:
+ pass
+
+ with open(self.apt_conf_file, "w") as apt_conf:
+ with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
+ "apt", "apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ line = re.sub("#ROOTFS#", "/dev/null", line)
+ line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ def write_index(self):
+ self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
+ "apt-ftparchive")
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self._create_configs()
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
+ if pkg_archs is not None:
+ arch_list = pkg_archs.split()
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
+ if sdk_pkg_archs is not None:
+ for a in sdk_pkg_archs.split():
+ if a not in pkg_archs:
+ arch_list.append(a)
+
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+ arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
+
+ apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
+ gzip = bb.utils.which(os.getenv('PATH'), "gzip")
+
+ index_cmds = []
+ deb_dirs_found = False
+ for arch in arch_list:
+ arch_dir = os.path.join(self.deploy_dir, arch)
+ if not os.path.isdir(arch_dir):
+ continue
+
+ cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
+
+ cmd += "%s -fc Packages > Packages.gz;" % gzip
+
+ with open(os.path.join(arch_dir, "Release"), "w+") as release:
+ release.write("Label: %s\n" % arch)
+
+ cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
+
+ index_cmds.append(cmd)
+
+ deb_dirs_found = True
+
+ if not deb_dirs_found:
+ bb.note("There are no packages in %s" % self.deploy_dir)
+ return
+
+ result = oe.utils.multiprocess_exec(index_cmds, create_index)
+ if result:
+ bb.fatal('%s' % ('\n'.join(result)))
+ if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ raise NotImplementedError('Package feed signing not implementd for dpkg')
+
+
+
+class PkgsList(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, d, rootfs_dir):
+ self.d = d
+ self.rootfs_dir = rootfs_dir
+
+ @abstractmethod
+ def list_pkgs(self):
+ pass
+
+
+ """
+ This method parse the output from the package manager
+ and return a dictionary with the information of the
+ installed packages. This is used whne the packages are
+ in deb or ipk format
+ """
+ def opkg_query(self, cmd_output):
+ verregex = re.compile(' \([=<>]* [^ )]*\)')
+ output = dict()
+ filename = ""
+ dep = []
+ pkg = ""
+ for line in cmd_output.splitlines():
+ line = line.rstrip()
+ if ':' in line:
+ if line.startswith("Package: "):
+ pkg = line.split(": ")[1]
+ elif line.startswith("Architecture: "):
+ arch = line.split(": ")[1]
+ elif line.startswith("Version: "):
+ ver = line.split(": ")[1]
+ elif line.startswith("File: "):
+ filename = line.split(": ")[1]
+ elif line.startswith("Depends: "):
+ depends = verregex.sub('', line.split(": ")[1])
+ for depend in depends.split(", "):
+ dep.append(depend)
+ elif line.startswith("Recommends: "):
+ recommends = verregex.sub('', line.split(": ")[1])
+ for recommend in recommends.split(", "):
+ dep.append("%s [REC]" % recommend)
+ else:
+ # IPK doesn't include the filename
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ if pkg:
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep }
+ pkg = ""
+ filename = ""
+ dep = []
+
+ if pkg:
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep }
+
+ return output
+
+
+class RpmPkgsList(PkgsList):
+ def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
+ super(RpmPkgsList, self).__init__(d, rootfs_dir)
+
+ self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
+
+ self.ml_prefix_list, self.ml_os_list = \
+ RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
+
+ # Determine rpm version
+ cmd = "%s --version" % self.rpm_cmd
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Getting rpm version failed. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ '''
+ Translate the RPM/Smart format names to the OE multilib format names
+ '''
+ def _pkg_translate_smart_to_oe(self, pkg, arch):
+ new_pkg = pkg
+ new_arch = arch
+ fixed_arch = arch.replace('_', '-')
+ found = 0
+ for mlib in self.ml_prefix_list:
+ for cmp_arch in self.ml_prefix_list[mlib]:
+ fixed_cmp_arch = cmp_arch.replace('_', '-')
+ if fixed_arch == fixed_cmp_arch:
+ if mlib == 'default':
+ new_pkg = pkg
+ new_arch = cmp_arch
+ else:
+ new_pkg = mlib + '-' + pkg
+ # We need to strip off the ${mlib}_ prefix on the arch
+ new_arch = cmp_arch.replace(mlib + '_', '')
+
+ # Workaround for bug 3565. Simply look to see if we
+ # know of a package with that name, if not try again!
+ filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
+ 'runtime-reverse',
+ new_pkg)
+ if os.path.exists(filename):
+ found = 1
+ break
+
+ if found == 1 and fixed_arch == fixed_cmp_arch:
+ break
+ #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
+ return new_pkg, new_arch
+
+ def _list_pkg_deps(self):
+ cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
+ "-t", self.image_rpmlib]
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the package dependencies. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
+
+ return output
+
+ def list_pkgs(self):
+ cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
+ cmd += ' -D "_dbpath /var/lib/rpm" -qa'
+ cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
+
+ try:
+ # bb.note(cmd)
+ tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ output = dict()
+ deps = dict()
+ dependencies = self._list_pkg_deps()
+
+ # Populate deps dictionary for better manipulation
+ for line in dependencies.splitlines():
+ try:
+ pkg, dep = line.split("|")
+ if not pkg in deps:
+ deps[pkg] = list()
+ if not dep in deps[pkg]:
+ deps[pkg].append(dep)
+ except:
+ # Ignore any other lines they're debug or errors
+ pass
+
+ for line in tmp_output.split('\n'):
+ if len(line.strip()) == 0:
+ continue
+ pkg = line.split()[0]
+ arch = line.split()[1]
+ ver = line.split()[2]
+ dep = deps.get(pkg, [])
+
+ # Skip GPG keys
+ if pkg == 'gpg-pubkey':
+ continue
+
+ pkgorigin = line.split()[3]
+ new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
+
+ output[new_pkg] = {"arch":new_arch, "ver":ver,
+ "filename":pkgorigin, "deps":dep}
+
+ return output
+
+
+class OpkgPkgsList(PkgsList):
+ def __init__(self, d, rootfs_dir, config_file):
+ super(OpkgPkgsList, self).__init__(d, rootfs_dir)
+
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
+ self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+
+ def list_pkgs(self, format=None):
+ cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
+
+ # opkg returns success even when it printed some
+ # "Collected errors:" report to stderr. Mixing stderr into
+ # stdout then leads to random failures later on when
+ # parsing the output. To avoid this we need to collect both
+ # output streams separately and check for empty stderr.
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ cmd_output, cmd_stderr = p.communicate()
+ if p.returncode or cmd_stderr:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
+
+ return self.opkg_query(cmd_output)
+
+
+class DpkgPkgsList(PkgsList):
+
+ def list_pkgs(self):
+ cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
+ "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
+ "-W"]
+
+ cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
+
+ try:
+ cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
+
+ return self.opkg_query(cmd_output)
+
+
+class PackageManager(object):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, d):
+ self.d = d
+ self.deploy_dir = None
+ self.deploy_lock = None
+ self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
+ self.feed_base_paths = self.d.getVar('PACKAGE_FEED_BASE_PATHS', True) or ""
+ self.feed_archs = self.d.getVar('PACKAGE_FEED_ARCHS', True)
+
+ """
+ Update the package manager package database.
+ """
+ @abstractmethod
+ def update(self):
+ pass
+
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
+ @abstractmethod
+ def install(self, pkgs, attempt_only=False):
+ pass
+
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, the any dependencies are left in place.
+ """
+ @abstractmethod
+ def remove(self, pkgs, with_dependencies=True):
+ pass
+
+ """
+ This function creates the index files
+ """
+ @abstractmethod
+ def write_index(self):
+ pass
+
+ @abstractmethod
+ def remove_packaging_data(self):
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ pass
+
+ @abstractmethod
+ def insert_feeds_uris(self):
+ pass
+
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+ these packages, if they don't exist then no error will occur. Note: every
+ backend needs to call this function explicitly after the normal package
+ installation
+ """
+ def install_complementary(self, globs=None):
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
+ "installed_pkgs.txt")
+ with open(installed_pkgs_file, "w+") as installed_pkgs:
+ pkgs = self.list_installed()
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+
+ if globs is None:
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
+ split_linguas = set()
+
+ for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
+ split_linguas.add(translation)
+ split_linguas.add(translation.split('-')[0])
+
+ split_linguas = sorted(split_linguas)
+
+ for lang in split_linguas:
+ globs += " *-locale-%s" % lang
+
+ if globs is None:
+ return
+
+ cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
+ "-p", self.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
+ if exclude:
+ cmd.extend(['-x', exclude])
+ try:
+ bb.note("Installing complementary packages ...")
+ bb.note('Running %s' % cmd)
+ complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output))
+ self.install(complementary_pkgs.split(), attempt_only=True)
+ os.remove(installed_pkgs_file)
+
+ def deploy_dir_lock(self):
+ if self.deploy_dir is None:
+ raise RuntimeError("deploy_dir is not set!")
+
+ lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
+
+ self.deploy_lock = bb.utils.lockfile(lock_file_name)
+
+ def deploy_dir_unlock(self):
+ if self.deploy_lock is None:
+ return
+
+ bb.utils.unlockfile(self.deploy_lock)
+
+ self.deploy_lock = None
+
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
+ def construct_uris(self, uris, base_paths):
+ def _append(arr1, arr2, sep='/'):
+ res = []
+ narr1 = map(lambda a: string.rstrip(a, sep), arr1)
+ narr2 = map(lambda a: string.lstrip(string.rstrip(a, sep), sep), arr2)
+ for a1 in narr1:
+ if arr2:
+ for a2 in narr2:
+ res.append("%s%s%s" % (a1, sep, a2))
+ else:
+ res.append(a1)
+ return res
+ return _append(uris, base_paths)
+
+class RpmPM(PackageManager):
+ def __init__(self,
+ d,
+ target_rootfs,
+ target_vendor,
+ task_name='target',
+ providename=None,
+ arch_var=None,
+ os_var=None):
+ super(RpmPM, self).__init__(d)
+ self.target_rootfs = target_rootfs
+ self.target_vendor = target_vendor
+ self.task_name = task_name
+ self.providename = providename
+ self.fullpkglist = list()
+ self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
+ self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
+ self.install_dir_name = "oe_install"
+ self.install_dir_path = os.path.join(self.target_rootfs, self.install_dir_name)
+ self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
+ # 0 = default, only warnings
+ # 1 = --log-level=info (includes information about executing scriptlets and their output)
+ # 2 = --log-level=debug
+ # 3 = --log-level=debug plus dumps of scriplet content and command invocation
+ self.debug_level = int(d.getVar('ROOTFS_RPM_DEBUG', True) or "0")
+ self.smart_opt = "--log-level=%s --data-dir=%s" % \
+ ("warning" if self.debug_level == 0 else
+ "info" if self.debug_level == 1 else
+ "debug",
+ os.path.join(target_rootfs, 'var/lib/smart'))
+ self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
+ self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
+ self.task_name)
+ self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
+ self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
+
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ self.indexer = RpmIndexer(self.d, self.deploy_dir)
+ self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
+
+ self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
+
+ def insert_feeds_uris(self):
+ if self.feed_uris == "":
+ return
+
+ arch_list = []
+ if self.feed_archs is not None:
+ # User define feed architectures
+ arch_list = self.feed_archs.split()
+ else:
+ # List must be prefered to least preferred order
+ default_platform_extra = set()
+ platform_extra = set()
+ bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
+ for mlib in self.ml_os_list:
+ for arch in self.ml_prefix_list[mlib]:
+ plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
+ if mlib == bbextendvariant:
+ default_platform_extra.add(plt)
+ else:
+ platform_extra.add(plt)
+
+ platform_extra = platform_extra.union(default_platform_extra)
+
+ for canonical_arch in platform_extra:
+ arch = canonical_arch.split('-')[0]
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+
+ feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
+
+ uri_iterator = 0
+ channel_priority = 10 + 5 * len(feed_uris) * (len(arch_list) if arch_list else 1)
+
+ for uri in feed_uris:
+ if arch_list:
+ for arch in arch_list:
+ bb.note('Note: adding Smart channel url%d%s (%s)' %
+ (uri_iterator, arch, channel_priority))
+ self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/%s -y'
+ % (uri_iterator, arch, uri, arch))
+ self._invoke_smart('channel --set url%d-%s priority=%d' %
+ (uri_iterator, arch, channel_priority))
+ channel_priority -= 5
+ else:
+ bb.note('Note: adding Smart channel url%d (%s)' %
+ (uri_iterator, channel_priority))
+ self._invoke_smart('channel --add url%d type=rpm-md baseurl=%s -y'
+ % (uri_iterator, uri))
+ self._invoke_smart('channel --set url%d priority=%d' %
+ (uri_iterator, channel_priority))
+ channel_priority -= 5
+
+ uri_iterator += 1
+
+ '''
+ Create configs for rpm and smart, and multilib is supported
+ '''
+ def create_configs(self):
+ target_arch = self.d.getVar('TARGET_ARCH', True)
+ platform = '%s%s-%s' % (target_arch.replace('-', '_'),
+ self.target_vendor,
+ self.ml_os_list['default'])
+
+ # List must be prefered to least preferred order
+ default_platform_extra = list()
+ platform_extra = list()
+ bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
+ for mlib in self.ml_os_list:
+ for arch in self.ml_prefix_list[mlib]:
+ plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
+ if mlib == bbextendvariant:
+ if plt not in default_platform_extra:
+ default_platform_extra.append(plt)
+ else:
+ if plt not in platform_extra:
+ platform_extra.append(plt)
+ platform_extra = default_platform_extra + platform_extra
+
+ self._create_configs(platform, platform_extra)
+
+ def _invoke_smart(self, args):
+ cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
+ # bb.note(cmd)
+ try:
+ complementary_pkgs = subprocess.check_output(cmd,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ # bb.note(complementary_pkgs)
+ return complementary_pkgs
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke smart. Command "
+ "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ def _search_pkg_name_in_feeds(self, pkg, feed_archs):
+ for arch in feed_archs:
+ arch = arch.replace('-', '_')
+ regex_match = re.compile(r"^%s-[^-]*-[^-]*@%s$" % \
+ (re.escape(pkg), re.escape(arch)))
+ for p in self.fullpkglist:
+ if regex_match.match(p) is not None:
+ # First found is best match
+ # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
+ return pkg + '@' + arch
+
+ # Search provides if not found by pkgname.
+ bb.note('Not found %s by name, searching provides ...' % pkg)
+ cmd = "%s %s query --provides %s --show-format='$name-$version'" % \
+ (self.smart_cmd, self.smart_opt, pkg)
+ cmd += " | sed -ne 's/ *Provides://p'"
+ bb.note('cmd: %s' % cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ # Found a provider
+ if output:
+ bb.note('Found providers for %s: %s' % (pkg, output))
+ for p in output.split():
+ for arch in feed_archs:
+ arch = arch.replace('-', '_')
+ if p.rstrip().endswith('@' + arch):
+ return p
+
+ return ""
+
+ '''
+ Translate the OE multilib format names to the RPM/Smart format names
+ It searched the RPM/Smart format names in probable multilib feeds first,
+ and then searched the default base feed.
+ '''
+ def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
+ new_pkgs = list()
+
+ for pkg in pkgs:
+ new_pkg = pkg
+ # Search new_pkg in probable multilibs first
+ for mlib in self.ml_prefix_list:
+ # Jump the default archs
+ if mlib == 'default':
+ continue
+
+ subst = pkg.replace(mlib + '-', '')
+ # if the pkg in this multilib feed
+ if subst != pkg:
+ feed_archs = self.ml_prefix_list[mlib]
+ new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
+ if not new_pkg:
+ # Failed to translate, package not found!
+ err_msg = '%s not found in the %s feeds (%s).\n' % \
+ (pkg, mlib, " ".join(feed_archs))
+ if not attempt_only:
+ err_msg += " ".join(self.fullpkglist)
+ bb.fatal(err_msg)
+ bb.warn(err_msg)
+ else:
+ new_pkgs.append(new_pkg)
+
+ break
+
+ # Apparently not a multilib package...
+ if pkg == new_pkg:
+ # Search new_pkg in default archs
+ default_archs = self.ml_prefix_list['default']
+ new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
+ if not new_pkg:
+ err_msg = '%s not found in the base feeds (%s).\n' % \
+ (pkg, ' '.join(default_archs))
+ if not attempt_only:
+ err_msg += " ".join(self.fullpkglist)
+ bb.fatal(err_msg)
+ bb.warn(err_msg)
+ else:
+ new_pkgs.append(new_pkg)
+
+ return new_pkgs
+
+ def _create_configs(self, platform, platform_extra):
+ # Setup base system configuration
+ bb.note("configuring RPM platform settings")
+
+ # Configure internal RPM environment when using Smart
+ os.environ['RPM_ETCRPM'] = self.etcrpm_dir
+ bb.utils.mkdirhier(self.etcrpm_dir)
+
+ # Setup temporary directory -- install...
+ if os.path.exists(self.install_dir_path):
+ bb.utils.remove(self.install_dir_path, True)
+ bb.utils.mkdirhier(os.path.join(self.install_dir_path, 'tmp'))
+
+ channel_priority = 5
+ platform_dir = os.path.join(self.etcrpm_dir, "platform")
+ sdkos = self.d.getVar("SDK_OS", True)
+ with open(platform_dir, "w+") as platform_fd:
+ platform_fd.write(platform + '\n')
+ for pt in platform_extra:
+ channel_priority += 5
+ if sdkos:
+ tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
+ tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
+ platform_fd.write(tmp)
+
+ # Tell RPM that the "/" directory exist and is available
+ bb.note("configuring RPM system provides")
+ sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
+ bb.utils.mkdirhier(sysinfo_dir)
+ with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
+ dirnames.write("/\n")
+
+ if self.providename:
+ providename_dir = os.path.join(sysinfo_dir, "Providename")
+ if not os.path.exists(providename_dir):
+ providename_content = '\n'.join(self.providename)
+ providename_content += '\n'
+ open(providename_dir, "w+").write(providename_content)
+
+ # Configure RPM... we enforce these settings!
+ bb.note("configuring RPM DB settings")
+ # After change the __db.* cache size, log file will not be
+ # generated automatically, that will raise some warnings,
+ # so touch a bare log for rpm write into it.
+ rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
+ if not os.path.exists(rpmlib_log):
+ bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
+ open(rpmlib_log, 'w+').close()
+
+ DB_CONFIG_CONTENT = "# ================ Environment\n" \
+ "set_data_dir .\n" \
+ "set_create_dir .\n" \
+ "set_lg_dir ./log\n" \
+ "set_tmp_dir ./tmp\n" \
+ "set_flags db_log_autoremove on\n" \
+ "\n" \
+ "# -- thread_count must be >= 8\n" \
+ "set_thread_count 64\n" \
+ "\n" \
+ "# ================ Logging\n" \
+ "\n" \
+ "# ================ Memory Pool\n" \
+ "set_cachesize 0 1048576 0\n" \
+ "set_mp_mmapsize 268435456\n" \
+ "\n" \
+ "# ================ Locking\n" \
+ "set_lk_max_locks 16384\n" \
+ "set_lk_max_lockers 16384\n" \
+ "set_lk_max_objects 16384\n" \
+ "mutex_set_max 163840\n" \
+ "\n" \
+ "# ================ Replication\n"
+
+ db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
+ if not os.path.exists(db_config_dir):
+ open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
+
+ # Create database so that smart doesn't complain (lazy init)
+ opt = "-qa"
+ cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
+ self.rpm_cmd, self.target_rootfs, opt)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Create rpm database failed. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+ # Import GPG key to RPM database of the target system
+ if self.d.getVar('RPM_SIGN_PACKAGES', True) == '1':
+ pubkey_path = self.d.getVar('RPM_GPG_PUBKEY', True)
+ cmd = "%s --root %s --dbpath /var/lib/rpm --import %s > /dev/null" % (
+ self.rpm_cmd, self.target_rootfs, pubkey_path)
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+
+ # Configure smart
+ bb.note("configuring Smart settings")
+ bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
+ True)
+ self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
+ self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
+ self._invoke_smart('config --set rpm-extra-macros._var=%s' %
+ self.d.getVar('localstatedir', True))
+ cmd = "config --set rpm-extra-macros._tmppath=/%s/tmp" % (self.install_dir_name)
+
+ prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
+ if prefer_color:
+ if prefer_color not in ['0', '1', '2', '4']:
+ bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
+ "\t1: ELF32 wins\n"
+ "\t2: ELF64 wins\n"
+ "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
+ prefer_color)
+ if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
+ ['mips64', 'mips64el']:
+ bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
+ "only.")
+ self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
+ % prefer_color)
+
+ self._invoke_smart(cmd)
+ self._invoke_smart('config --set rpm-ignoresize=1')
+
+ # Write common configuration for host and target usage
+ self._invoke_smart('config --set rpm-nolinktos=1')
+ self._invoke_smart('config --set rpm-noparentdirs=1')
+ check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
+ if check_signature and check_signature.strip() == "0":
+ self._invoke_smart('config --set rpm-check-signatures=false')
+ for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
+ self._invoke_smart('flag --set ignore-recommends %s' % i)
+
+ # Do the following configurations here, to avoid them being
+ # saved for field upgrade
+ if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
+ self._invoke_smart('config --set ignore-all-recommends=1')
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+ for i in pkg_exclude.split():
+ self._invoke_smart('flag --set exclude-packages %s' % i)
+
+ # Optional debugging
+ # self._invoke_smart('config --set rpm-log-level=debug')
+ # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
+ # self._invoke_smart(cmd)
+ ch_already_added = []
+ for canonical_arch in platform_extra:
+ arch = canonical_arch.split('-')[0]
+ arch_channel = os.path.join(self.deploy_dir, arch)
+ if os.path.exists(arch_channel) and not arch in ch_already_added:
+ bb.note('Note: adding Smart channel %s (%s)' %
+ (arch, channel_priority))
+ self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
+ % (arch, arch_channel))
+ self._invoke_smart('channel --set %s priority=%d' %
+ (arch, channel_priority))
+ channel_priority -= 5
+
+ ch_already_added.append(arch)
+
+ bb.note('adding Smart RPM DB channel')
+ self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
+
+ # Construct install scriptlet wrapper.
+ # Scripts need to be ordered when executed, this ensures numeric order.
+ # If we ever run into needing more the 899 scripts, we'll have to.
+ # change num to start with 1000.
+ #
+ scriptletcmd = "$2 $1/$3 $4\n"
+ scriptpath = "$1/$3"
+
+ # When self.debug_level >= 3, also dump the content of the
+ # executed scriptlets and how they get invoked. We have to
+ # replace "exit 1" and "ERR" because printing those as-is
+ # would trigger a log analysis failure.
+ if self.debug_level >= 3:
+ dump_invocation = 'echo "Executing ${name} ${kind} with: ' + scriptletcmd + '"\n'
+ dump_script = 'cat ' + scriptpath + '| sed -e "s/exit 1/exxxit 1/g" -e "s/ERR/IRR/g"; echo\n'
+ else:
+ dump_invocation = 'echo "Executing ${name} ${kind}"\n'
+ dump_script = ''
+
+ SCRIPTLET_FORMAT = "#!/bin/bash\n" \
+ "\n" \
+ "export PATH=%s\n" \
+ "export D=%s\n" \
+ 'export OFFLINE_ROOT="$D"\n' \
+ 'export IPKG_OFFLINE_ROOT="$D"\n' \
+ 'export OPKG_OFFLINE_ROOT="$D"\n' \
+ "export INTERCEPT_DIR=%s\n" \
+ "export NATIVE_ROOT=%s\n" \
+ "\n" \
+ "name=`head -1 " + scriptpath + " | cut -d\' \' -f 2`\n" \
+ "kind=`head -1 " + scriptpath + " | cut -d\' \' -f 4`\n" \
+ + dump_invocation \
+ + dump_script \
+ + scriptletcmd + \
+ "ret=$?\n" \
+ "echo Result of ${name} ${kind}: ${ret}\n" \
+ "if [ ${ret} -ne 0 ]; then\n" \
+ " if [ $4 -eq 1 ]; then\n" \
+ " mkdir -p $1/etc/rpm-postinsts\n" \
+ " num=100\n" \
+ " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
+ ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
+ ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
+ " cat " + scriptpath + " >> $1/etc/rpm-postinsts/${num}-${name}\n" \
+ " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
+ ' echo "Info: deferring ${name} ${kind} install scriptlet to first boot"\n' \
+ " else\n" \
+ ' echo "Error: ${name} ${kind} remove scriptlet failed"\n' \
+ " fi\n" \
+ "fi\n"
+
+ intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
+ native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
+ scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
+ self.target_rootfs,
+ intercept_dir,
+ native_root)
+ open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
+
+ bb.note("Note: configuring RPM cross-install scriptlet_wrapper")
+ os.chmod(self.scriptlet_wrapper, 0755)
+ cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
+ self.scriptlet_wrapper
+ self._invoke_smart(cmd)
+
+ # Debug to show smart config info
+ # bb.note(self._invoke_smart('config --show'))
+
+ def update(self):
+ self._invoke_smart('update rpmsys')
+
+ def get_rdepends_recursively(self, pkgs):
+ # pkgs will be changed during the loop, so use [:] to make a copy.
+ for pkg in pkgs[:]:
+ sub_data = oe.packagedata.read_subpkgdata(pkg, self.d)
+ sub_rdep = sub_data.get("RDEPENDS_" + pkg)
+ if not sub_rdep:
+ continue
+ done = bb.utils.explode_dep_versions2(sub_rdep).keys()
+ next = done
+ # Find all the rdepends on dependency chain
+ while next:
+ new = []
+ for sub_pkg in next:
+ sub_data = oe.packagedata.read_subpkgdata(sub_pkg, self.d)
+ sub_pkg_rdep = sub_data.get("RDEPENDS_" + sub_pkg)
+ if not sub_pkg_rdep:
+ continue
+ for p in bb.utils.explode_dep_versions2(sub_pkg_rdep):
+ # Already handled, skip it.
+ if p in done or p in pkgs:
+ continue
+ # It's a new dep
+ if oe.packagedata.has_subpkgdata(p, self.d):
+ done.append(p)
+ new.append(p)
+ next = new
+ pkgs.extend(done)
+ return pkgs
+
+ '''
+ Install pkgs with smart, the pkg name is oe format
+ '''
+ def install(self, pkgs, attempt_only=False):
+
+ if not pkgs:
+ bb.note("There are no packages to install")
+ return
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ if not attempt_only:
+ # Pull in multilib requires since rpm may not pull in them
+ # correctly, for example,
+ # lib32-packagegroup-core-standalone-sdk-target requires
+ # lib32-libc6, but rpm may pull in libc6 rather than lib32-libc6
+ # since it doesn't know mlprefix (lib32-), bitbake knows it and
+ # can handle it well, find out the RDEPENDS on the chain will
+ # fix the problem. Both do_rootfs and do_populate_sdk have this
+ # issue.
+ # The attempt_only packages don't need this since they are
+ # based on the installed ones.
+ #
+ # Separate pkgs into two lists, one is multilib, the other one
+ # is non-multilib.
+ ml_pkgs = []
+ non_ml_pkgs = pkgs[:]
+ for pkg in pkgs:
+ for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
+ if pkg.startswith(mlib + '-'):
+ ml_pkgs.append(pkg)
+ non_ml_pkgs.remove(pkg)
+
+ if len(ml_pkgs) > 0 and len(non_ml_pkgs) > 0:
+ # Found both foo and lib-foo
+ ml_pkgs = self.get_rdepends_recursively(ml_pkgs)
+ non_ml_pkgs = self.get_rdepends_recursively(non_ml_pkgs)
+ # Longer list makes smart slower, so only keep the pkgs
+ # which have the same BPN, and smart can handle others
+ # correctly.
+ pkgs_new = []
+ for pkg in non_ml_pkgs:
+ for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
+ mlib_pkg = mlib + "-" + pkg
+ if mlib_pkg in ml_pkgs:
+ pkgs_new.append(pkg)
+ pkgs_new.append(mlib_pkg)
+ for pkg in pkgs:
+ if pkg not in pkgs_new:
+ pkgs_new.append(pkg)
+ pkgs = pkgs_new
+ new_depends = {}
+ deps = bb.utils.explode_dep_versions2(" ".join(pkgs))
+ for depend in deps:
+ data = oe.packagedata.read_subpkgdata(depend, self.d)
+ key = "PKG_%s" % depend
+ if key in data:
+ new_depend = data[key]
+ else:
+ new_depend = depend
+ new_depends[new_depend] = deps[depend]
+ pkgs = bb.utils.join_deps(new_depends, commasep=True).split(', ')
+ pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
+ if not attempt_only:
+ bb.note('to be installed: %s' % ' '.join(pkgs))
+ cmd = "%s %s install -y %s" % \
+ (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
+ bb.note(cmd)
+ else:
+ bb.note('installing attempt only packages...')
+ bb.note('Attempting %s' % ' '.join(pkgs))
+ cmd = "%s %s install --attempt -y %s" % \
+ (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
+ try:
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ '''
+ Remove pkgs with smart, the pkg name is smart/rpm format
+ '''
+ def remove(self, pkgs, with_dependencies=True):
+ bb.note('to be removed: ' + ' '.join(pkgs))
+
+ if not with_dependencies:
+ cmd = "%s -e --nodeps " % self.rpm_cmd
+ cmd += "--root=%s " % self.target_rootfs
+ cmd += "--dbpath=/var/lib/rpm "
+ cmd += "--define='_cross_scriptlet_wrapper %s' " % \
+ self.scriptlet_wrapper
+ cmd += "--define='_tmppath /%s/tmp' %s" % (self.install_dir_name, ' '.join(pkgs))
+ else:
+ # for pkg in pkgs:
+ # bb.note('Debug: What required: %s' % pkg)
+ # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
+
+ cmd = "%s %s remove -y %s" % (self.smart_cmd,
+ self.smart_opt,
+ ' '.join(pkgs))
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.note("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ def upgrade(self):
+ bb.note('smart upgrade')
+ self._invoke_smart('upgrade')
+
+ def write_index(self):
+ result = self.indexer.write_index()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def remove_packaging_data(self):
+ bb.utils.remove(self.image_rpmlib, True)
+ bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
+ True)
+ bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
+
+ # remove temp directory
+ bb.utils.remove(self.install_dir_path, True)
+
+ def backup_packaging_data(self):
+ # Save the rpmlib for increment rpm image generation
+ if os.path.exists(self.saved_rpmlib):
+ bb.utils.remove(self.saved_rpmlib, True)
+ shutil.copytree(self.image_rpmlib,
+ self.saved_rpmlib,
+ symlinks=True)
+
+ def recovery_packaging_data(self):
+ # Move the rpmlib back
+ if os.path.exists(self.saved_rpmlib):
+ if os.path.exists(self.image_rpmlib):
+ bb.utils.remove(self.image_rpmlib, True)
+
+ bb.note('Recovery packaging data')
+ shutil.copytree(self.saved_rpmlib,
+ self.image_rpmlib,
+ symlinks=True)
+
+ def list_installed(self):
+ return self.pkgs_list.list_pkgs()
+
+ '''
+ If incremental install, we need to determine what we've got,
+ what we need to add, and what to remove...
+ The dump_install_solution will dump and save the new install
+ solution.
+ '''
+ def dump_install_solution(self, pkgs):
+ bb.note('creating new install solution for incremental install')
+ if len(pkgs) == 0:
+ return
+
+ pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
+ install_pkgs = list()
+
+ cmd = "%s %s install -y --dump %s 2>%s" % \
+ (self.smart_cmd,
+ self.smart_opt,
+ ' '.join(pkgs),
+ self.solution_manifest)
+ try:
+ # Disable rpmsys channel for the fake install
+ self._invoke_smart('channel --disable rpmsys')
+
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ with open(self.solution_manifest, 'r') as manifest:
+ for pkg in manifest.read().split('\n'):
+ if '@' in pkg:
+ install_pkgs.append(pkg)
+ except subprocess.CalledProcessError as e:
+ bb.note("Unable to dump install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+ # Recovery rpmsys channel
+ self._invoke_smart('channel --enable rpmsys')
+ return install_pkgs
+
+ '''
+ If incremental install, we need to determine what we've got,
+ what we need to add, and what to remove...
+ The load_old_install_solution will load the previous install
+ solution
+ '''
+ def load_old_install_solution(self):
+ bb.note('load old install solution for incremental install')
+ installed_pkgs = list()
+ if not os.path.exists(self.solution_manifest):
+ bb.note('old install solution not exist')
+ return installed_pkgs
+
+ with open(self.solution_manifest, 'r') as manifest:
+ for pkg in manifest.read().split('\n'):
+ if '@' in pkg:
+ installed_pkgs.append(pkg.strip())
+
+ return installed_pkgs
+
+ '''
+ Dump all available packages in feeds, it should be invoked after the
+ newest rpm index was created
+ '''
+ def dump_all_available_pkgs(self):
+ available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
+ available_pkgs = list()
+ cmd = "%s %s query --output %s" % \
+ (self.smart_cmd, self.smart_opt, available_manifest)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ with open(available_manifest, 'r') as manifest:
+ for pkg in manifest.read().split('\n'):
+ if '@' in pkg:
+ available_pkgs.append(pkg.strip())
+ except subprocess.CalledProcessError as e:
+ bb.note("Unable to list all available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ self.fullpkglist = available_pkgs
+
+ return
+
+ def save_rpmpostinst(self, pkg):
+ mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS', False) or "").split()
+
+ new_pkg = pkg
+ # Remove any multilib prefix from the package name
+ for mlib in mlibs:
+ if mlib in pkg:
+ new_pkg = pkg.replace(mlib + '-', '')
+ break
+
+ bb.note(' * postponing %s' % new_pkg)
+ saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
+
+ cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
+ cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
+ cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
+ cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
+ cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+ bb.note(output)
+ os.chmod(saved_dir, 0755)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ '''Write common configuration for target usage'''
+ def rpm_setup_smart_target_config(self):
+ bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
+ True)
+
+ self._invoke_smart('config --set rpm-nolinktos=1')
+ self._invoke_smart('config --set rpm-noparentdirs=1')
+ for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
+ self._invoke_smart('flag --set ignore-recommends %s' % i)
+ self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
+
+ '''
+ The rpm db lock files were produced after invoking rpm to query on
+ build system, and they caused the rpm on target didn't work, so we
+ need to unlock the rpm db by removing the lock files.
+ '''
+ def unlock_rpm_db(self):
+ # Remove rpm db lock files
+ rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
+ for f in rpm_db_locks:
+ bb.utils.remove(f, True)
+
+
+class OpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
+ super(OpkgPM, self).__init__(d)
+
+ self.target_rootfs = target_rootfs
+ self.config_file = config_file
+ self.pkg_archs = archs
+ self.task_name = task_name
+
+ self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
+ self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "--volatile-cache -f %s -o %s " % (self.config_file, target_rootfs)
+ self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+
+ self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
+
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") == "1"
+ if self.from_feeds:
+ self._create_custom_config()
+ else:
+ self._create_config()
+
+ self.indexer = OpkgIndexer(self.d, self.deploy_dir)
+
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ def mark_packages(self, status_tag, packages=None):
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ os.rename(status_file + ".tmp", status_file)
+
+ def _create_custom_config(self):
+ bb.note("Building from feeds activated!")
+
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
+ feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+
+ if feed_match is not None:
+ feed_name = feed_match.group(1)
+ feed_uri = feed_match.group(2)
+
+ bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
+
+ config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
+
+ """
+ Allow to use package deploy directory contents as quick devel-testing
+ feed. This creates individual feed configs for each arch subdir of those
+ specified as compatible for the current machine.
+ NOTE: Development-helper feature, NOT a full-fledged feed.
+ """
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
+ for arch in self.pkg_archs.split():
+ cfg_file_name = os.path.join(self.target_rootfs,
+ self.d.getVar("sysconfdir", True),
+ "opkg",
+ "local-%s-feed.conf" % arch)
+
+ with open(cfg_file_name, "w+") as cfg_file:
+ cfg_file.write("src/gz local-%s %s/%s" %
+ (arch,
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
+ arch))
+
+ if self.opkg_dir != '/var/lib/opkg':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+
+
+ def _create_config(self):
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ config_file.write("src oe file:%s\n" % self.deploy_dir)
+
+ for arch in self.pkg_archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ if os.path.isdir(pkgs_dir):
+ config_file.write("src oe-%s file:%s\n" %
+ (arch, pkgs_dir))
+
+ if self.opkg_dir != '/var/lib/opkg':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+
+ def insert_feeds_uris(self):
+ if self.feed_uris == "":
+ return
+
+ rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
+ % self.target_rootfs)
+
+ feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
+ archs = self.pkg_archs.split() if self.feed_archs is None else self.feed_archs.split()
+
+ with open(rootfs_config, "w+") as config_file:
+ uri_iterator = 0
+ for uri in feed_uris:
+ if archs:
+ for arch in archs:
+ if (self.feed_archs is None) and (not os.path.exists(os.path.join(self.deploy_dir, arch))):
+ continue
+ bb.note('Note: adding opkg feed url-%s-%d (%s)' %
+ (arch, uri_iterator, uri))
+ config_file.write("src/gz uri-%s-%d %s/%s\n" %
+ (arch, uri_iterator, uri, arch))
+ else:
+ bb.note('Note: adding opkg feed url-%d (%s)' %
+ (uri_iterator, uri))
+ config_file.write("src/gz uri-%d %s\n" %
+ (uri_iterator, uri))
+
+ uri_iterator += 1
+
+ def update(self):
+ self.deploy_dir_lock()
+
+ cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ self.deploy_dir_unlock()
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if not pkgs:
+ return
+
+ cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
+ "intercept_scripts")
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output))
+
+ def remove(self, pkgs, with_dependencies=True):
+ if with_dependencies:
+ cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ else:
+ cmd = "%s %s --force-depends remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def remove_packaging_data(self):
+ bb.utils.remove(self.opkg_dir, True)
+ # create the directory back, it's needed by PM lock
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ def remove_lists(self):
+ if not self.from_feeds:
+ bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
+
+ def list_installed(self):
+ return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
+
+ def handle_bad_recommendations(self):
+ bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
+ if bad_recommendations.strip() == "":
+ return
+
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ # If status file existed, it means the bad recommendations has already
+ # been handled
+ if os.path.exists(status_file):
+ return
+
+ cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
+
+ with open(status_file, "w+") as status:
+ for pkg in bad_recommendations.split():
+ pkg_info = cmd + pkg
+
+ try:
+ output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip()
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get package info. Command '%s' "
+ "returned %d:\n%s" % (pkg_info, e.returncode, e.output))
+
+ if output == "":
+ bb.note("Ignored bad recommendation: '%s' is "
+ "not a package" % pkg)
+ continue
+
+ for line in output.split('\n'):
+ if line.startswith("Status:"):
+ status.write("Status: deinstall hold not-installed\n")
+ else:
+ status.write(line + "\n")
+
+ # Append a blank line after each package entry to ensure that it
+ # is separated from the following entry
+ status.write("\n")
+
+ '''
+ The following function dummy installs pkgs and returns the log of output.
+ '''
+ def dummy_install(self, pkgs):
+ if len(pkgs) == 0:
+ return
+
+ # Create an temp dir as opkg root for dummy installation
+ temp_rootfs = self.d.expand('${T}/opkg')
+ temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
+ bb.utils.mkdirhier(temp_opkg_dir)
+
+ opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
+ opkg_args += self.d.getVar("OPKG_ARGS", True)
+
+ cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ # Dummy installation
+ cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
+ opkg_args,
+ ' '.join(pkgs))
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to dummy install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ bb.utils.remove(temp_rootfs, True)
+
+ return output
+
+ def backup_packaging_data(self):
+ # Save the opkglib for increment ipk image generation
+ if os.path.exists(self.saved_opkg_dir):
+ bb.utils.remove(self.saved_opkg_dir, True)
+ shutil.copytree(self.opkg_dir,
+ self.saved_opkg_dir,
+ symlinks=True)
+
+ def recover_packaging_data(self):
+ # Move the opkglib back
+ if os.path.exists(self.saved_opkg_dir):
+ if os.path.exists(self.opkg_dir):
+ bb.utils.remove(self.opkg_dir, True)
+
+ bb.note('Recover packaging data')
+ shutil.copytree(self.saved_opkg_dir,
+ self.opkg_dir,
+ symlinks=True)
+
+
+class DpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
+ super(DpkgPM, self).__init__(d)
+ self.target_rootfs = target_rootfs
+ self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
+ if apt_conf_dir is None:
+ self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
+ else:
+ self.apt_conf_dir = apt_conf_dir
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
+
+ self.apt_args = d.getVar("APT_ARGS", True)
+
+ self.all_arch_list = archs.split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+ self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
+
+ self._create_configs(archs, base_archs)
+
+ self.indexer = DpkgIndexer(self.d, self.deploy_dir)
+
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ def mark_packages(self, status_tag, packages=None):
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ os.rename(status_file + ".tmp", status_file)
+
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
+ def run_pre_post_installs(self, package_name=None):
+ info_dir = self.target_rootfs + "/var/lib/dpkg/info"
+ suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+ installed_pkgs = []
+
+ with open(status_file, "r") as status:
+ for line in status.read().split('\n'):
+ m = re.match("^Package: (.*)", line)
+ if m is not None:
+ installed_pkgs.append(m.group(1))
+
+ if package_name is not None and not package_name in installed_pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
+ "intercept_scripts")
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+
+ failed_pkgs = []
+ for pkg_name in installed_pkgs:
+ for suffix in suffixes:
+ p_full = os.path.join(info_dir, pkg_name + suffix[0])
+ if os.path.exists(p_full):
+ try:
+ bb.note("Executing %s for package: %s ..." %
+ (suffix[1].lower(), pkg_name))
+ subprocess.check_output(p_full, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.note("%s for package %s failed with %d:\n%s" %
+ (suffix[1], pkg_name, e.returncode, e.output))
+ failed_pkgs.append(pkg_name)
+ break
+
+ if len(failed_pkgs):
+ self.mark_packages("unpacked", failed_pkgs)
+
+ def update(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ self.deploy_dir_lock()
+
+ cmd = "%s update" % self.apt_get_cmd
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if attempt_only and len(pkgs) == 0:
+ return
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
+ (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output))
+
+ # rename *.dpkg-new files/dirs
+ for root, dirs, files in os.walk(self.target_rootfs):
+ for dir in dirs:
+ new_dir = re.sub("\.dpkg-new", "", dir)
+ if dir != new_dir:
+ os.rename(os.path.join(root, dir),
+ os.path.join(root, new_dir))
+
+ for file in files:
+ new_file = re.sub("\.dpkg-new", "", file)
+ if file != new_file:
+ os.rename(os.path.join(root, file),
+ os.path.join(root, new_file))
+
+
+ def remove(self, pkgs, with_dependencies=True):
+ if with_dependencies:
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+ cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
+ else:
+ cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
+ " -P --force-depends %s" % \
+ (bb.utils.which(os.getenv('PATH'), "dpkg"),
+ self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def insert_feeds_uris(self):
+ if self.feed_uris == "":
+ return
+
+ sources_conf = os.path.join("%s/etc/apt/sources.list"
+ % self.target_rootfs)
+ arch_list = []
+
+ if self.feed_archs is None:
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+ else:
+ arch_list = self.feed_archs.split()
+
+ feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
+
+ with open(sources_conf, "w+") as sources_file:
+ for uri in feed_uris:
+ if arch_list:
+ for arch in arch_list:
+ bb.note('Note: adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb %s/%s ./\n" %
+ (uri, arch))
+ else:
+ bb.note('Note: adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb %s ./\n" % uri)
+
+ def _create_configs(self, archs, base_archs):
+ base_archs = re.sub("_", "-", base_archs)
+
+ if os.path.exists(self.apt_conf_dir):
+ bb.utils.remove(self.apt_conf_dir, True)
+
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
+
+ arch_list = []
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
+ priority = 801
+ for arch in arch_list:
+ prefs_file.write(
+ "Package: *\n"
+ "Pin: release l=%s\n"
+ "Pin-Priority: %d\n\n" % (arch, priority))
+
+ priority += 5
+
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+ for pkg in pkg_exclude.split():
+ prefs_file.write(
+ "Package: %s\n"
+ "Pin: release *\n"
+ "Pin-Priority: -1\n\n" % pkg)
+
+ arch_list.reverse()
+
+ with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
+ for arch in arch_list:
+ sources_file.write("deb file:%s/ ./\n" %
+ os.path.join(self.deploy_dir, arch))
+
+ base_arch_list = base_archs.split()
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
+ for variant in multilib_variants.split():
+ localdata = bb.data.createCopy(self.d)
+ variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
+ orig_arch = localdata.getVar("DPKG_ARCH", True)
+ localdata.setVar("DEFAULTTUNE", variant_tune)
+ bb.data.update_data(localdata)
+ variant_arch = localdata.getVar("DPKG_ARCH", True)
+ if variant_arch not in base_arch_list:
+ base_arch_list.append(variant_arch)
+
+ with open(self.apt_conf_file, "w+") as apt_conf:
+ with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ match_arch = re.match(" Architecture \".*\";$", line)
+ architectures = ""
+ if match_arch:
+ for base_arch in base_arch_list:
+ architectures += "\"%s\";" % base_arch
+ apt_conf.write(" Architectures {%s};\n" % architectures);
+ apt_conf.write(" Architecture \"%s\";\n" % base_archs)
+ else:
+ line = re.sub("#ROOTFS#", self.target_rootfs, line)
+ line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
+
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
+
+ if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
+ open(os.path.join(target_dpkg_dir, "status"), "w+").close()
+ if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
+ open(os.path.join(target_dpkg_dir, "available"), "w+").close()
+
+ def remove_packaging_data(self):
+ bb.utils.remove(os.path.join(self.target_rootfs,
+ self.d.getVar('opkglibdir', True)), True)
+ bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
+
+ def fix_broken_dependencies(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot fix broken dependencies. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+ def list_installed(self):
+ return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+
+def generate_index_files(d):
+ classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
+
+ indexer_map = {
+ "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
+ }
+
+ result = None
+
+ for pkg_class in classes:
+ if not pkg_class in indexer_map:
+ continue
+
+ if os.path.exists(indexer_map[pkg_class][1]):
+ result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
+
+ if result is not None:
+ bb.fatal(result)
+
+if __name__ == "__main__":
+ """
+ We should be able to run this as a standalone script, from outside bitbake
+ environment.
+ """
+ """
+ TBD
+ """
diff --git a/import-layers/yocto-poky/meta/lib/oe/packagedata.py b/import-layers/yocto-poky/meta/lib/oe/packagedata.py
new file mode 100644
index 000000000..bc0fd06bc
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/packagedata.py
@@ -0,0 +1,95 @@
+import codecs
+import os
+
+def packaged(pkg, d):
+ return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
+
+def read_pkgdatafile(fn):
+ pkgdata = {}
+
+ def decode(str):
+ c = codecs.getdecoder("string_escape")
+ return c(str)[0]
+
+ if os.access(fn, os.R_OK):
+ import re
+ f = open(fn, 'r')
+ lines = f.readlines()
+ f.close()
+ r = re.compile("([^:]+):\s*(.*)")
+ for l in lines:
+ m = r.match(l)
+ if m:
+ pkgdata[m.group(1)] = decode(m.group(2))
+
+ return pkgdata
+
+def get_subpkgedata_fn(pkg, d):
+ return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
+
+def has_subpkgdata(pkg, d):
+ return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
+
+def read_subpkgdata(pkg, d):
+ return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
+
+def has_pkgdata(pn, d):
+ fn = d.expand('${PKGDATA_DIR}/%s' % pn)
+ return os.access(fn, os.R_OK)
+
+def read_pkgdata(pn, d):
+ fn = d.expand('${PKGDATA_DIR}/%s' % pn)
+ return read_pkgdatafile(fn)
+
+#
+# Collapse FOO_pkg variables into FOO
+#
+def read_subpkgdata_dict(pkg, d):
+ ret = {}
+ subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
+ for var in subd:
+ newvar = var.replace("_" + pkg, "")
+ if newvar == var and var + "_" + pkg in subd:
+ continue
+ ret[newvar] = subd[var]
+ return ret
+
+def _pkgmap(d):
+ """Return a dictionary mapping package to recipe name."""
+
+ pkgdatadir = d.getVar("PKGDATA_DIR", True)
+
+ pkgmap = {}
+ try:
+ files = os.listdir(pkgdatadir)
+ except OSError:
+ bb.warn("No files in %s?" % pkgdatadir)
+ files = []
+
+ for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
+ try:
+ pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
+ except OSError:
+ continue
+
+ packages = pkgdata.get("PACKAGES") or ""
+ for pkg in packages.split():
+ pkgmap[pkg] = pn
+
+ return pkgmap
+
+def pkgmap(d):
+ """Return a dictionary mapping package to recipe name.
+ Cache the mapping in the metadata"""
+
+ pkgmap_data = d.getVar("__pkgmap_data", False)
+ if pkgmap_data is None:
+ pkgmap_data = _pkgmap(d)
+ d.setVar("__pkgmap_data", pkgmap_data)
+
+ return pkgmap_data
+
+def recipename(pkg, d):
+ """Return the recipe name for the given binary package name."""
+
+ return pkgmap(d).get(pkg)
diff --git a/import-layers/yocto-poky/meta/lib/oe/packagegroup.py b/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
new file mode 100644
index 000000000..a6fee5f95
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
@@ -0,0 +1,36 @@
+import itertools
+
+def is_optional(feature, d):
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+ if packages:
+ return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional", True))
+ else:
+ return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional", True))
+
+def packages(features, d):
+ for feature in features:
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+ if not packages:
+ packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
+ for pkg in (packages or "").split():
+ yield pkg
+
+def required_packages(features, d):
+ req = filter(lambda feature: not is_optional(feature, d), features)
+ return packages(req, d)
+
+def optional_packages(features, d):
+ opt = filter(lambda feature: is_optional(feature, d), features)
+ return packages(opt, d)
+
+def active_packages(features, d):
+ return itertools.chain(required_packages(features, d),
+ optional_packages(features, d))
+
+def active_recipes(features, d):
+ import oe.packagedata
+
+ for pkg in active_packages(features, d):
+ recipe = oe.packagedata.recipename(pkg, d)
+ if recipe:
+ yield recipe
diff --git a/import-layers/yocto-poky/meta/lib/oe/patch.py b/import-layers/yocto-poky/meta/lib/oe/patch.py
new file mode 100644
index 000000000..9d3617290
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/patch.py
@@ -0,0 +1,704 @@
+import oe.path
+
+class NotFoundError(bb.BBHandledException):
+ def __init__(self, path):
+ self.path = path
+
+ def __str__(self):
+ return "Error: %s not found." % self.path
+
+class CmdError(bb.BBHandledException):
+ def __init__(self, command, exitstatus, output):
+ self.command = command
+ self.status = exitstatus
+ self.output = output
+
+ def __str__(self):
+ return "Command Error: '%s' exited with %d Output:\n%s" % \
+ (self.command, self.status, self.output)
+
+
+def runcmd(args, dir = None):
+ import pipes
+
+ if dir:
+ olddir = os.path.abspath(os.curdir)
+ if not os.path.exists(dir):
+ raise NotFoundError(dir)
+ os.chdir(dir)
+ # print("cwd: %s -> %s" % (olddir, dir))
+
+ try:
+ args = [ pipes.quote(str(arg)) for arg in args ]
+ cmd = " ".join(args)
+ # print("cmd: %s" % cmd)
+ (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+ if exitstatus != 0:
+ raise CmdError(cmd, exitstatus >> 8, output)
+ return output
+
+ finally:
+ if dir:
+ os.chdir(olddir)
+
+class PatchError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return "Patch Error: %s" % self.msg
+
+class PatchSet(object):
+ defaults = {
+ "strippath": 1
+ }
+
+ def __init__(self, dir, d):
+ self.dir = dir
+ self.d = d
+ self.patches = []
+ self._current = None
+
+ def current(self):
+ return self._current
+
+ def Clean(self):
+ """
+ Clean out the patch set. Generally includes unapplying all
+ patches and wiping out all associated metadata.
+ """
+ raise NotImplementedError()
+
+ def Import(self, patch, force):
+ if not patch.get("file"):
+ if not patch.get("remote"):
+ raise PatchError("Patch file must be specified in patch import.")
+ else:
+ patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
+
+ for param in PatchSet.defaults:
+ if not patch.get(param):
+ patch[param] = PatchSet.defaults[param]
+
+ if patch.get("remote"):
+ patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
+
+ patch["filemd5"] = bb.utils.md5_file(patch["file"])
+
+ def Push(self, force):
+ raise NotImplementedError()
+
+ def Pop(self, force):
+ raise NotImplementedError()
+
+ def Refresh(self, remote = None, all = None):
+ raise NotImplementedError()
+
+ @staticmethod
+ def getPatchedFiles(patchfile, striplevel, srcdir=None):
+ """
+ Read a patch file and determine which files it will modify.
+ Params:
+ patchfile: the patch file to read
+ striplevel: the strip level at which the patch is going to be applied
+ srcdir: optional path to join onto the patched file paths
+ Returns:
+ A list of tuples of file path and change mode ('A' for add,
+ 'D' for delete or 'M' for modify)
+ """
+
+ def patchedpath(patchline):
+ filepth = patchline.split()[1]
+ if filepth.endswith('/dev/null'):
+ return '/dev/null'
+ filesplit = filepth.split(os.sep)
+ if striplevel > len(filesplit):
+ bb.error('Patch %s has invalid strip level %d' % (patchfile, striplevel))
+ return None
+ return os.sep.join(filesplit[striplevel:])
+
+ copiedmode = False
+ filelist = []
+ with open(patchfile) as f:
+ for line in f:
+ if line.startswith('--- '):
+ patchpth = patchedpath(line)
+ if not patchpth:
+ break
+ if copiedmode:
+ addedfile = patchpth
+ else:
+ removedfile = patchpth
+ elif line.startswith('+++ '):
+ addedfile = patchedpath(line)
+ if not addedfile:
+ break
+ elif line.startswith('*** '):
+ copiedmode = True
+ removedfile = patchedpath(line)
+ if not removedfile:
+ break
+ else:
+ removedfile = None
+ addedfile = None
+
+ if addedfile and removedfile:
+ if removedfile == '/dev/null':
+ mode = 'A'
+ elif addedfile == '/dev/null':
+ mode = 'D'
+ else:
+ mode = 'M'
+ if srcdir:
+ fullpath = os.path.abspath(os.path.join(srcdir, addedfile))
+ else:
+ fullpath = addedfile
+ filelist.append((fullpath, mode))
+
+ return filelist
+
+
+class PatchTree(PatchSet):
+ def __init__(self, dir, d):
+ PatchSet.__init__(self, dir, d)
+ self.patchdir = os.path.join(self.dir, 'patches')
+ self.seriespath = os.path.join(self.dir, 'patches', 'series')
+ bb.utils.mkdirhier(self.patchdir)
+
+ def _appendPatchFile(self, patch, strippath):
+ with open(self.seriespath, 'a') as f:
+ f.write(os.path.basename(patch) + "," + strippath + "\n")
+ shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ def _removePatch(self, p):
+ patch = {}
+ patch['file'] = p.split(",")[0]
+ patch['strippath'] = p.split(",")[1]
+ self._applypatch(patch, False, True)
+
+ def _removePatchFile(self, all = False):
+ if not os.path.exists(self.seriespath):
+ return
+ with open(self.seriespath, 'r+') as f:
+ patches = f.readlines()
+ if all:
+ for p in reversed(patches):
+ self._removePatch(os.path.join(self.patchdir, p.strip()))
+ patches = []
+ else:
+ self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
+ patches.pop()
+ with open(self.seriespath, 'w') as f:
+ for p in patches:
+ f.write(p)
+
+ def Import(self, patch, force = None):
+ """"""
+ PatchSet.Import(self, patch, force)
+
+ if self._current is not None:
+ i = self._current + 1
+ else:
+ i = 0
+ self.patches.insert(i, patch)
+
+ def _applypatch(self, patch, force = False, reverse = False, run = True):
+ shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ if reverse:
+ shellcmd.append('-R')
+
+ if not run:
+ return "sh" + "-c" + " ".join(shellcmd)
+
+ if not force:
+ shellcmd.append('--dry-run')
+
+ try:
+ output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ if force:
+ return
+
+ shellcmd.pop(len(shellcmd) - 1)
+ output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ except CmdError as err:
+ raise bb.BBHandledException("Applying '%s' failed:\n%s" %
+ (os.path.basename(patch['file']), err.output))
+
+ if not reverse:
+ self._appendPatchFile(patch['file'], patch['strippath'])
+
+ return output
+
+ def Push(self, force = False, all = False, run = True):
+ bb.note("self._current is %s" % self._current)
+ bb.note("patches is %s" % self.patches)
+ if all:
+ for i in self.patches:
+ bb.note("applying patch %s" % i)
+ self._applypatch(i, force)
+ self._current = i
+ else:
+ if self._current is not None:
+ next = self._current + 1
+ else:
+ next = 0
+
+ bb.note("applying patch %s" % self.patches[next])
+ ret = self._applypatch(self.patches[next], force)
+
+ self._current = next
+ return ret
+
+ def Pop(self, force = None, all = None):
+ if all:
+ self._removePatchFile(True)
+ self._current = None
+ else:
+ self._removePatchFile(False)
+
+ if self._current == 0:
+ self._current = None
+
+ if self._current is not None:
+ self._current = self._current - 1
+
+ def Clean(self):
+ """"""
+ self.Pop(all=True)
+
+class GitApplyTree(PatchTree):
+ patch_line_prefix = '%% original patch'
+ ignore_commit_prefix = '%% ignore'
+
+ def __init__(self, dir, d):
+ PatchTree.__init__(self, dir, d)
+
+ @staticmethod
+ def extractPatchHeader(patchfile):
+ """
+ Extract just the header lines from the top of a patch file
+ """
+ lines = []
+ with open(patchfile, 'r') as f:
+ for line in f.readlines():
+ if line.startswith('Index: ') or line.startswith('diff -') or line.startswith('---'):
+ break
+ lines.append(line)
+ return lines
+
+ @staticmethod
+ def decodeAuthor(line):
+ from email.header import decode_header
+ authorval = line.split(':', 1)[1].strip().replace('"', '')
+ return decode_header(authorval)[0][0]
+
+ @staticmethod
+ def interpretPatchHeader(headerlines):
+ import re
+ author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
+ outlines = []
+ author = None
+ date = None
+ subject = None
+ for line in headerlines:
+ if line.startswith('Subject: '):
+ subject = line.split(':', 1)[1]
+ # Remove any [PATCH][oe-core] etc.
+ subject = re.sub(r'\[.+?\]\s*', '', subject)
+ continue
+ elif line.startswith('From: ') or line.startswith('Author: '):
+ authorval = GitApplyTree.decodeAuthor(line)
+ # git is fussy about author formatting i.e. it must be Name <email@domain>
+ if author_re.match(authorval):
+ author = authorval
+ continue
+ elif line.startswith('Date: '):
+ if date is None:
+ dateval = line.split(':', 1)[1].strip()
+ # Very crude check for date format, since git will blow up if it's not in the right
+ # format. Without e.g. a python-dateutils dependency we can't do a whole lot more
+ if len(dateval) > 12:
+ date = dateval
+ continue
+ elif not author and line.lower().startswith('signed-off-by: '):
+ authorval = GitApplyTree.decodeAuthor(line)
+ # git is fussy about author formatting i.e. it must be Name <email@domain>
+ if author_re.match(authorval):
+ author = authorval
+ outlines.append(line)
+ return outlines, author, date, subject
+
+ @staticmethod
+ def prepareCommit(patchfile):
+ """
+ Prepare a git commit command line based on the header from a patch file
+ (typically this is useful for patches that cannot be applied with "git am" due to formatting)
+ """
+ import tempfile
+ # Process patch header and extract useful information
+ lines = GitApplyTree.extractPatchHeader(patchfile)
+ outlines, author, date, subject = GitApplyTree.interpretPatchHeader(lines)
+ if not author or not subject:
+ try:
+ shellcmd = ["git", "log", "--format=email", "--diff-filter=A", "--", patchfile]
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.dirname(patchfile))
+ except CmdError:
+ out = None
+ if out:
+ _, newauthor, newdate, newsubject = GitApplyTree.interpretPatchHeader(out.splitlines())
+ if not author or not date:
+ # These really need to go together
+ author = newauthor
+ date = newdate
+ if not subject:
+ subject = newsubject
+ if subject:
+ outlines.insert(0, '%s\n\n' % subject.strip())
+
+ # Write out commit message to a file
+ with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+ tmpfile = tf.name
+ for line in outlines:
+ tf.write(line)
+ # Prepare git command
+ cmd = ["git", "commit", "-F", tmpfile]
+ # git doesn't like plain email addresses as authors
+ if author and '<' in author:
+ cmd.append('--author="%s"' % author)
+ if date:
+ cmd.append('--date="%s"' % date)
+ return (tmpfile, cmd)
+
+ @staticmethod
+ def extractPatches(tree, startcommit, outdir, paths=None):
+ import tempfile
+ import shutil
+ tempdir = tempfile.mkdtemp(prefix='oepatch')
+ try:
+ shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+ if paths:
+ shellcmd.append('--')
+ shellcmd.extend(paths)
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
+ if out:
+ for srcfile in out.split():
+ patchlines = []
+ outfile = None
+ with open(srcfile, 'r') as f:
+ for line in f:
+ if line.startswith(GitApplyTree.patch_line_prefix):
+ outfile = line.split()[-1].strip()
+ continue
+ if line.startswith(GitApplyTree.ignore_commit_prefix):
+ continue
+ patchlines.append(line)
+ if not outfile:
+ outfile = os.path.basename(srcfile)
+ with open(os.path.join(outdir, outfile), 'w') as of:
+ for line in patchlines:
+ of.write(line)
+ finally:
+ shutil.rmtree(tempdir)
+
+ def _applypatch(self, patch, force = False, reverse = False, run = True):
+ import shutil
+
+ def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True):
+ if reverse:
+ shellcmd.append('-R')
+
+ shellcmd.append(patch['file'])
+
+ if not run:
+ return "sh" + "-c" + " ".join(shellcmd)
+
+ return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ # Add hooks which add a pointer to the original patch file name in the commit message
+ reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
+ if not reporoot:
+ raise Exception("Cannot get repository root for directory %s" % self.dir)
+ hooks_dir = os.path.join(reporoot, '.git', 'hooks')
+ hooks_dir_backup = hooks_dir + '.devtool-orig'
+ if os.path.lexists(hooks_dir_backup):
+ raise Exception("Git hooks backup directory already exists: %s" % hooks_dir_backup)
+ if os.path.lexists(hooks_dir):
+ shutil.move(hooks_dir, hooks_dir_backup)
+ os.mkdir(hooks_dir)
+ commithook = os.path.join(hooks_dir, 'commit-msg')
+ applyhook = os.path.join(hooks_dir, 'applypatch-msg')
+ with open(commithook, 'w') as f:
+ # NOTE: the formatting here is significant; if you change it you'll also need to
+ # change other places which read it back
+ f.write('echo >> $1\n')
+ f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix)
+ os.chmod(commithook, 0755)
+ shutil.copy2(commithook, applyhook)
+ try:
+ patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
+ try:
+ shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot, "am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+ return _applypatchhelper(shellcmd, patch, force, reverse, run)
+ except CmdError:
+ # Need to abort the git am, or we'll still be within it at the end
+ try:
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "am", "--abort"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ except CmdError:
+ pass
+ # git am won't always clean up after itself, sadly, so...
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "reset", "--hard", "HEAD"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Also need to take care of any stray untracked files
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "clean", "-f"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ # Fall back to git apply
+ shellcmd = ["git", "--git-dir=%s" % reporoot, "apply", "-p%s" % patch['strippath']]
+ try:
+ output = _applypatchhelper(shellcmd, patch, force, reverse, run)
+ except CmdError:
+ # Fall back to patch
+ output = PatchTree._applypatch(self, patch, force, reverse, run)
+ # Add all files
+ shellcmd = ["git", "add", "-f", "-A", "."]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Exclude the patches directory
+ shellcmd = ["git", "reset", "HEAD", self.patchdir]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Commit the result
+ (tmpfile, shellcmd) = self.prepareCommit(patch['file'])
+ try:
+ shellcmd.insert(0, patchfilevar)
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ finally:
+ os.remove(tmpfile)
+ return output
+ finally:
+ shutil.rmtree(hooks_dir)
+ if os.path.lexists(hooks_dir_backup):
+ shutil.move(hooks_dir_backup, hooks_dir)
+
+
+class QuiltTree(PatchSet):
+ def _runcmd(self, args, run = True):
+ quiltrc = self.d.getVar('QUILTRCFILE', True)
+ if not run:
+ return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
+ runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
+
+ def _quiltpatchpath(self, file):
+ return os.path.join(self.dir, "patches", os.path.basename(file))
+
+
+ def __init__(self, dir, d):
+ PatchSet.__init__(self, dir, d)
+ self.initialized = False
+ p = os.path.join(self.dir, 'patches')
+ if not os.path.exists(p):
+ os.makedirs(p)
+
+ def Clean(self):
+ try:
+ self._runcmd(["pop", "-a", "-f"])
+ oe.path.remove(os.path.join(self.dir, "patches","series"))
+ except Exception:
+ pass
+ self.initialized = True
+
+ def InitFromDir(self):
+ # read series -> self.patches
+ seriespath = os.path.join(self.dir, 'patches', 'series')
+ if not os.path.exists(self.dir):
+ raise NotFoundError(self.dir)
+ if os.path.exists(seriespath):
+ with open(seriespath, 'r') as f:
+ for line in f.readlines():
+ patch = {}
+ parts = line.strip().split()
+ patch["quiltfile"] = self._quiltpatchpath(parts[0])
+ patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
+ if len(parts) > 1:
+ patch["strippath"] = parts[1][2:]
+ self.patches.append(patch)
+
+ # determine which patches are applied -> self._current
+ try:
+ output = runcmd(["quilt", "applied"], self.dir)
+ except CmdError:
+ import sys
+ if sys.exc_value.output.strip() == "No patches applied":
+ return
+ else:
+ raise
+ output = [val for val in output.split('\n') if not val.startswith('#')]
+ for patch in self.patches:
+ if os.path.basename(patch["quiltfile"]) == output[-1]:
+ self._current = self.patches.index(patch)
+ self.initialized = True
+
+ def Import(self, patch, force = None):
+ if not self.initialized:
+ self.InitFromDir()
+ PatchSet.Import(self, patch, force)
+ oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
+ with open(os.path.join(self.dir, "patches", "series"), "a") as f:
+ f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"] + "\n")
+ patch["quiltfile"] = self._quiltpatchpath(patch["file"])
+ patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
+
+ # TODO: determine if the file being imported:
+ # 1) is already imported, and is the same
+ # 2) is already imported, but differs
+
+ self.patches.insert(self._current or 0, patch)
+
+
+ def Push(self, force = False, all = False, run = True):
+ # quilt push [-f]
+
+ args = ["push"]
+ if force:
+ args.append("-f")
+ if all:
+ args.append("-a")
+ if not run:
+ return self._runcmd(args, run)
+
+ self._runcmd(args)
+
+ if self._current is not None:
+ self._current = self._current + 1
+ else:
+ self._current = 0
+
+ def Pop(self, force = None, all = None):
+ # quilt pop [-f]
+ args = ["pop"]
+ if force:
+ args.append("-f")
+ if all:
+ args.append("-a")
+
+ self._runcmd(args)
+
+ if self._current == 0:
+ self._current = None
+
+ if self._current is not None:
+ self._current = self._current - 1
+
+ def Refresh(self, **kwargs):
+ if kwargs.get("remote"):
+ patch = self.patches[kwargs["patch"]]
+ if not patch:
+ raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
+ (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"])
+ if type == "file":
+ import shutil
+ if not patch.get("file") and patch.get("remote"):
+ patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
+
+ shutil.copyfile(patch["quiltfile"], patch["file"])
+ else:
+ raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
+ else:
+ # quilt refresh
+ args = ["refresh"]
+ if kwargs.get("quiltfile"):
+ args.append(os.path.basename(kwargs["quiltfile"]))
+ elif kwargs.get("patch"):
+ args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
+ self._runcmd(args)
+
+class Resolver(object):
+ def __init__(self, patchset, terminal):
+ raise NotImplementedError()
+
+ def Resolve(self):
+ raise NotImplementedError()
+
+ def Revert(self):
+ raise NotImplementedError()
+
+ def Finalize(self):
+ raise NotImplementedError()
+
+class NOOPResolver(Resolver):
+ def __init__(self, patchset, terminal):
+ self.patchset = patchset
+ self.terminal = terminal
+
+ def Resolve(self):
+ olddir = os.path.abspath(os.curdir)
+ os.chdir(self.patchset.dir)
+ try:
+ self.patchset.Push()
+ except Exception:
+ import sys
+ os.chdir(olddir)
+ raise
+
+# Patch resolver which relies on the user doing all the work involved in the
+# resolution, with the exception of refreshing the remote copy of the patch
+# files (the urls).
+class UserResolver(Resolver):
+ def __init__(self, patchset, terminal):
+ self.patchset = patchset
+ self.terminal = terminal
+
+ # Force a push in the patchset, then drop to a shell for the user to
+ # resolve any rejected hunks
+ def Resolve(self):
+ olddir = os.path.abspath(os.curdir)
+ os.chdir(self.patchset.dir)
+ try:
+ self.patchset.Push(False)
+ except CmdError as v:
+ # Patch application failed
+ patchcmd = self.patchset.Push(True, False, False)
+
+ t = self.patchset.d.getVar('T', True)
+ if not t:
+ bb.msg.fatal("Build", "T not set")
+ bb.utils.mkdirhier(t)
+ import random
+ rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
+ with open(rcfile, "w") as f:
+ f.write("echo '*** Manual patch resolution mode ***'\n")
+ f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
+ f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
+ f.write("echo ''\n")
+ f.write(" ".join(patchcmd) + "\n")
+ os.chmod(rcfile, 0775)
+
+ self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
+
+ # Construct a new PatchSet after the user's changes, compare the
+ # sets, checking patches for modifications, and doing a remote
+ # refresh on each.
+ oldpatchset = self.patchset
+ self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
+
+ for patch in self.patchset.patches:
+ oldpatch = None
+ for opatch in oldpatchset.patches:
+ if opatch["quiltfile"] == patch["quiltfile"]:
+ oldpatch = opatch
+
+ if oldpatch:
+ patch["remote"] = oldpatch["remote"]
+ if patch["quiltfile"] == oldpatch["quiltfile"]:
+ if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
+ bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
+ # user change? remote refresh
+ self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
+ else:
+ # User did not fix the problem. Abort.
+ raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
+ except Exception:
+ os.chdir(olddir)
+ raise
+ os.chdir(olddir)
diff --git a/import-layers/yocto-poky/meta/lib/oe/path.py b/import-layers/yocto-poky/meta/lib/oe/path.py
new file mode 100644
index 000000000..413ebfb39
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/path.py
@@ -0,0 +1,243 @@
+import errno
+import glob
+import shutil
+import subprocess
+import os.path
+
+def join(*paths):
+ """Like os.path.join but doesn't treat absolute RHS specially"""
+ return os.path.normpath("/".join(paths))
+
+def relative(src, dest):
+ """ Return a relative path from src to dest.
+
+ >>> relative("/usr/bin", "/tmp/foo/bar")
+ ../../tmp/foo/bar
+
+ >>> relative("/usr/bin", "/usr/lib")
+ ../lib
+
+ >>> relative("/tmp", "/tmp/foo/bar")
+ foo/bar
+ """
+
+ return os.path.relpath(dest, src)
+
+def make_relative_symlink(path):
+ """ Convert an absolute symlink to a relative one """
+ if not os.path.islink(path):
+ return
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ return
+
+ # find the common ancestor directory
+ ancestor = path
+ depth = 0
+ while ancestor and not link.startswith(ancestor):
+ ancestor = ancestor.rpartition('/')[0]
+ depth += 1
+
+ if not ancestor:
+ print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
+ return
+
+ base = link.partition(ancestor)[2].strip('/')
+ while depth > 1:
+ base = "../" + base
+ depth -= 1
+
+ os.remove(path)
+ os.symlink(base, path)
+
+def format_display(path, metadata):
+ """ Prepare a path for display to the user. """
+ rel = relative(metadata.getVar("TOPDIR", True), path)
+ if len(rel) > len(path):
+ return path
+ else:
+ return rel
+
+def copytree(src, dst):
+ # We could use something like shutil.copytree here but it turns out to
+ # to be slow. It takes twice as long copying to an empty directory.
+ # If dst already has contents performance can be 15 time slower
+ # This way we also preserve hardlinks between files in the tree.
+
+ bb.utils.mkdirhier(dst)
+ cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst)
+ check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+def copyhardlinktree(src, dst):
+ """ Make the hard link when possible, otherwise copy. """
+ bb.utils.mkdirhier(dst)
+ if os.path.isdir(src) and not len(os.listdir(src)):
+ return
+
+ if (os.stat(src).st_dev == os.stat(dst).st_dev):
+ # Need to copy directories only with tar first since cp will error if two
+ # writers try and create a directory at the same time
+ cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst)
+ check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst)
+ check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ else:
+ copytree(src, dst)
+
+def remove(path, recurse=True):
+ """Equivalent to rm -f or rm -rf"""
+ for name in glob.glob(path):
+ try:
+ os.unlink(name)
+ except OSError as exc:
+ if recurse and exc.errno == errno.EISDIR:
+ shutil.rmtree(name)
+ elif exc.errno != errno.ENOENT:
+ raise
+
+def symlink(source, destination, force=False):
+ """Create a symbolic link"""
+ try:
+ if force:
+ remove(destination)
+ os.symlink(source, destination)
+ except OSError as e:
+ if e.errno != errno.EEXIST or os.readlink(destination) != source:
+ raise
+
+class CalledProcessError(Exception):
+ def __init__(self, retcode, cmd, output = None):
+ self.retcode = retcode
+ self.cmd = cmd
+ self.output = output
+ def __str__(self):
+ return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output)
+
+# Not needed when we move to python 2.7
+def check_output(*popenargs, **kwargs):
+ r"""Run command with arguments and return its output as a byte string.
+
+ If the exit code was non-zero it raises a CalledProcessError. The
+ CalledProcessError object will have the return code in the returncode
+ attribute and output in the output attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ >>> check_output(["ls", "-l", "/dev/null"])
+ 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
+
+ The stdout argument is not allowed as it is used internally.
+ To capture standard error in the result, use stderr=STDOUT.
+
+ >>> check_output(["/bin/sh", "-c",
+ ... "ls -l non_existent_file ; exit 0"],
+ ... stderr=STDOUT)
+ 'ls: non_existent_file: No such file or directory\n'
+ """
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
+ output, unused_err = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise CalledProcessError(retcode, cmd, output=output)
+ return output
+
+def find(dir, **walkoptions):
+ """ Given a directory, recurses into that directory,
+ returning all files as absolute paths. """
+
+ for root, dirs, files in os.walk(dir, **walkoptions):
+ for file in files:
+ yield os.path.join(root, file)
+
+
+## realpath() related functions
+def __is_path_below(file, root):
+ return (file + os.path.sep).startswith(root)
+
+def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
+ """Calculates real path of symlink 'start' + 'rel_path' below
+ 'root'; no part of 'start' below 'root' must contain symlinks. """
+ have_dir = True
+
+ for d in rel_path.split(os.path.sep):
+ if not have_dir and not assume_dir:
+ raise OSError(errno.ENOENT, "no such directory %s" % start)
+
+ if d == os.path.pardir: # '..'
+ if len(start) >= len(root):
+ # do not follow '..' before root
+ start = os.path.dirname(start)
+ else:
+ # emit warning?
+ pass
+ else:
+ (start, have_dir) = __realpath(os.path.join(start, d),
+ root, loop_cnt, assume_dir)
+
+ assert(__is_path_below(start, root))
+
+ return start
+
+def __realpath(file, root, loop_cnt, assume_dir):
+ while os.path.islink(file) and len(file) >= len(root):
+ if loop_cnt == 0:
+ raise OSError(errno.ELOOP, file)
+
+ loop_cnt -= 1
+ target = os.path.normpath(os.readlink(file))
+
+ if not os.path.isabs(target):
+ tdir = os.path.dirname(file)
+ assert(__is_path_below(tdir, root))
+ else:
+ tdir = root
+
+ file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
+
+ try:
+ is_dir = os.path.isdir(file)
+ except:
+ is_dir = false
+
+ return (file, is_dir)
+
+def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
+ """ Returns the canonical path of 'file' with assuming a
+ toplevel 'root' directory. When 'use_physdir' is set, all
+ preceding path components of 'file' will be resolved first;
+ this flag should be set unless it is guaranteed that there is
+ no symlink in the path. When 'assume_dir' is not set, missing
+ path components will raise an ENOENT error"""
+
+ root = os.path.normpath(root)
+ file = os.path.normpath(file)
+
+ if not root.endswith(os.path.sep):
+ # letting root end with '/' makes some things easier
+ root = root + os.path.sep
+
+ if not __is_path_below(file, root):
+ raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
+
+ try:
+ if use_physdir:
+ file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
+ else:
+ file = __realpath(file, root, loop_cnt, assume_dir)[0]
+ except OSError as e:
+ if e.errno == errno.ELOOP:
+ # make ELOOP more readable; without catching it, there will
+ # be printed a backtrace with 100s of OSError exceptions
+ # else
+ raise OSError(errno.ELOOP,
+ "too much recursions while resolving '%s'; loop in '%s'" %
+ (file, e.strerror))
+
+ raise
+
+ return file
diff --git a/import-layers/yocto-poky/meta/lib/oe/prservice.py b/import-layers/yocto-poky/meta/lib/oe/prservice.py
new file mode 100644
index 000000000..b0cbcb1fb
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/prservice.py
@@ -0,0 +1,126 @@
+
+def prserv_make_conn(d, check = False):
+ import prserv.serv
+ host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
+ try:
+ conn = None
+ conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+ if check:
+ if not conn.ping():
+ raise Exception('service not available')
+ d.setVar("__PRSERV_CONN",conn)
+ except Exception, exc:
+ bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
+
+ return conn
+
+def prserv_dump_db(d):
+ if not d.getVar('PRSERV_HOST', True):
+ bb.error("Not using network based PR service")
+ return None
+
+ conn = d.getVar("__PRSERV_CONN", True)
+ if conn is None:
+ conn = prserv_make_conn(d)
+ if conn is None:
+ bb.error("Making connection failed to remote PR service")
+ return None
+
+ #dump db
+ opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
+ opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
+ opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
+ opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
+ return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+
+def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
+ if not d.getVar('PRSERV_HOST', True):
+ bb.error("Not using network based PR service")
+ return None
+
+ conn = d.getVar("__PRSERV_CONN", True)
+ if conn is None:
+ conn = prserv_make_conn(d)
+ if conn is None:
+ bb.error("Making connection failed to remote PR service")
+ return None
+ #get the entry values
+ imported = []
+ prefix = "PRAUTO$"
+ for v in d.keys():
+ if v.startswith(prefix):
+ (remain, sep, checksum) = v.rpartition('$')
+ (remain, sep, pkgarch) = remain.rpartition('$')
+ (remain, sep, version) = remain.rpartition('$')
+ if (remain + '$' != prefix) or \
+ (filter_version and filter_version != version) or \
+ (filter_pkgarch and filter_pkgarch != pkgarch) or \
+ (filter_checksum and filter_checksum != checksum):
+ continue
+ try:
+ value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
+ except BaseException as exc:
+ bb.debug("Not valid value of %s:%s" % (v,str(exc)))
+ continue
+ ret = conn.importone(version,pkgarch,checksum,value)
+ if ret != value:
+ bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
+ else:
+ imported.append((version,pkgarch,checksum,value))
+ return imported
+
+def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
+ import bb.utils
+ #initilize the output file
+ bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
+ df = d.getVar('PRSERV_DUMPFILE', True)
+ #write data
+ lf = bb.utils.lockfile("%s.lock" % df)
+ f = open(df, "a")
+ if metainfo:
+ #dump column info
+ f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
+ f.write("#Table: %s\n" % metainfo['tbl_name'])
+ f.write("#Columns:\n")
+ f.write("#name \t type \t notn \t dflt \t pk\n")
+ f.write("#----------\t --------\t --------\t --------\t ----\n")
+ for i in range(len(metainfo['col_info'])):
+ f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
+ (metainfo['col_info'][i]['name'],
+ metainfo['col_info'][i]['type'],
+ metainfo['col_info'][i]['notnull'],
+ metainfo['col_info'][i]['dflt_value'],
+ metainfo['col_info'][i]['pk']))
+ f.write("\n")
+
+ if lockdown:
+ f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
+
+ if datainfo:
+ idx = {}
+ for i in range(len(datainfo)):
+ pkgarch = datainfo[i]['pkgarch']
+ value = datainfo[i]['value']
+ if pkgarch not in idx:
+ idx[pkgarch] = i
+ elif value > datainfo[idx[pkgarch]]['value']:
+ idx[pkgarch] = i
+ f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
+ (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
+ if not nomax:
+ for i in idx:
+ f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
+ f.close()
+ bb.utils.unlockfile(lf)
+
+def prserv_check_avail(d):
+ host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
+ try:
+ if len(host_params) != 2:
+ raise TypeError
+ else:
+ int(host_params[1])
+ except TypeError:
+ bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
+ else:
+ prserv_make_conn(d, True)
diff --git a/import-layers/yocto-poky/meta/lib/oe/qa.py b/import-layers/yocto-poky/meta/lib/oe/qa.py
new file mode 100644
index 000000000..3cfeee737
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/qa.py
@@ -0,0 +1,151 @@
+import os, struct
+
+class NotELFFileError(Exception):
+ pass
+
+class ELFFile:
+ EI_NIDENT = 16
+
+ EI_CLASS = 4
+ EI_DATA = 5
+ EI_VERSION = 6
+ EI_OSABI = 7
+ EI_ABIVERSION = 8
+
+ E_MACHINE = 0x12
+
+ # possible values for EI_CLASS
+ ELFCLASSNONE = 0
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+
+ # possible value for EI_VERSION
+ EV_CURRENT = 1
+
+ # possible values for EI_DATA
+ ELFDATANONE = 0
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+
+ PT_INTERP = 3
+
+ def my_assert(self, expectation, result):
+ if not expectation == result:
+ #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
+ raise NotELFFileError("%s is not an ELF" % self.name)
+
+ def __init__(self, name, bits = 0):
+ self.name = name
+ self.bits = bits
+ self.objdump_output = {}
+
+ def open(self):
+ if not os.path.isfile(self.name):
+ raise NotELFFileError("%s is not a normal file" % self.name)
+
+ self.file = file(self.name, "r")
+ # Read 4k which should cover most of the headers we're after
+ self.data = self.file.read(4096)
+
+ if len(self.data) < ELFFile.EI_NIDENT + 4:
+ raise NotELFFileError("%s is not an ELF" % self.name)
+
+ self.my_assert(self.data[0], chr(0x7f) )
+ self.my_assert(self.data[1], 'E')
+ self.my_assert(self.data[2], 'L')
+ self.my_assert(self.data[3], 'F')
+ if self.bits == 0:
+ if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32):
+ self.bits = 32
+ elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64):
+ self.bits = 64
+ else:
+ # Not 32-bit or 64.. lets assert
+ raise NotELFFileError("ELF but not 32 or 64 bit.")
+ elif self.bits == 32:
+ self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32))
+ elif self.bits == 64:
+ self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64))
+ else:
+ raise NotELFFileError("Must specify unknown, 32 or 64 bit size.")
+ self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) )
+
+ self.sex = self.data[ELFFile.EI_DATA]
+ if self.sex == chr(ELFFile.ELFDATANONE):
+ raise NotELFFileError("self.sex == ELFDATANONE")
+ elif self.sex == chr(ELFFile.ELFDATA2LSB):
+ self.sex = "<"
+ elif self.sex == chr(ELFFile.ELFDATA2MSB):
+ self.sex = ">"
+ else:
+ raise NotELFFileError("Unknown self.sex")
+
+ def osAbi(self):
+ return ord(self.data[ELFFile.EI_OSABI])
+
+ def abiVersion(self):
+ return ord(self.data[ELFFile.EI_ABIVERSION])
+
+ def abiSize(self):
+ return self.bits
+
+ def isLittleEndian(self):
+ return self.sex == "<"
+
+ def isBigEndian(self):
+ return self.sex == ">"
+
+ def getShort(self, offset):
+ return struct.unpack_from(self.sex+"H", self.data, offset)[0]
+
+ def getWord(self, offset):
+ return struct.unpack_from(self.sex+"i", self.data, offset)[0]
+
+ def isDynamic(self):
+ """
+ Return True if there is a .interp segment (therefore dynamically
+ linked), otherwise False (statically linked).
+ """
+ offset = self.getWord(self.bits == 32 and 0x1C or 0x20)
+ size = self.getShort(self.bits == 32 and 0x2A or 0x36)
+ count = self.getShort(self.bits == 32 and 0x2C or 0x38)
+
+ for i in range(0, count):
+ p_type = self.getWord(offset + i * size)
+ if p_type == ELFFile.PT_INTERP:
+ return True
+ return False
+
+ def machine(self):
+ """
+ We know the sex stored in self.sex and we
+ know the position
+ """
+ return self.getShort(ELFFile.E_MACHINE)
+
+ def run_objdump(self, cmd, d):
+ import bb.process
+ import sys
+
+ if cmd in self.objdump_output:
+ return self.objdump_output[cmd]
+
+ objdump = d.getVar('OBJDUMP', True)
+
+ env = os.environ.copy()
+ env["LC_ALL"] = "C"
+ env["PATH"] = d.getVar('PATH', True)
+
+ try:
+ bb.note("%s %s %s" % (objdump, cmd, self.name))
+ self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
+ return self.objdump_output[cmd]
+ except Exception as e:
+ bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
+ return ""
+
+if __name__ == "__main__":
+ import sys
+ elf = ELFFile(sys.argv[1])
+ elf.open()
+ print elf.isDynamic()
diff --git a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
new file mode 100644
index 000000000..6c7adb5bd
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
@@ -0,0 +1,849 @@
+# Utility functions for reading and modifying recipes
+#
+# Some code borrowed from the OE layer index
+#
+# Copyright (C) 2013-2015 Intel Corporation
+#
+
+import sys
+import os
+import os.path
+import tempfile
+import textwrap
+import difflib
+import utils
+import shutil
+import re
+import fnmatch
+from collections import OrderedDict, defaultdict
+
+
+# Help us to find places to insert values
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'do_package()', 'do_deploy()']
+# Variables that sometimes are a bit long but shouldn't be wrapped
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]']
+list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
+meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
+
+
+def pn_to_recipe(cooker, pn):
+ """Convert a recipe name (PN) to the path to the recipe file"""
+ import bb.providers
+
+ if pn in cooker.recipecache.pkg_pn:
+ best = bb.providers.findBestProvider(pn, cooker.data, cooker.recipecache, cooker.recipecache.pkg_pn)
+ return best[3]
+ elif pn in cooker.recipecache.providers:
+ filenames = cooker.recipecache.providers[pn]
+ eligible, foundUnique = bb.providers.filterProviders(filenames, pn, cooker.expanded_data, cooker.recipecache)
+ filename = eligible[0]
+ return filename
+ else:
+ return None
+
+
+def get_unavailable_reasons(cooker, pn):
+ """If a recipe could not be found, find out why if possible"""
+ import bb.taskdata
+ taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
+ return taskdata.get_reasons(pn)
+
+
+def parse_recipe(fn, appendfiles, d):
+ """
+ Parse an individual recipe file, optionally with a list of
+ bbappend files.
+ """
+ import bb.cache
+ envdata = bb.cache.Cache.loadDataFull(fn, appendfiles, d)
+ return envdata
+
+
+def parse_recipe_simple(cooker, pn, d, appends=True):
+ """
+ Parse a recipe and optionally all bbappends that apply to it
+ in the current configuration.
+ """
+ import bb.providers
+
+ recipefile = pn_to_recipe(cooker, pn)
+ if not recipefile:
+ skipreasons = get_unavailable_reasons(cooker, pn)
+ # We may as well re-use bb.providers.NoProvider here
+ if skipreasons:
+ raise bb.providers.NoProvider(skipreasons)
+ else:
+ raise bb.providers.NoProvider('Unable to find any recipe file matching %s' % pn)
+ if appends:
+ appendfiles = cooker.collection.get_file_appends(recipefile)
+ else:
+ appendfiles = None
+ return parse_recipe(recipefile, appendfiles, d)
+
+
+def get_var_files(fn, varlist, d):
+ """Find the file in which each of a list of variables is set.
+ Note: requires variable history to be enabled when parsing.
+ """
+ varfiles = {}
+ for v in varlist:
+ history = d.varhistory.variable(v)
+ files = []
+ for event in history:
+ if 'file' in event and not 'flag' in event:
+ files.append(event['file'])
+ if files:
+ actualfile = files[-1]
+ else:
+ actualfile = None
+ varfiles[v] = actualfile
+
+ return varfiles
+
+
+def split_var_value(value, assignment=True):
+ """
+ Split a space-separated variable's value into a list of items,
+ taking into account that some of the items might be made up of
+ expressions containing spaces that should not be split.
+ Parameters:
+ value:
+ The string value to split
+ assignment:
+ True to assume that the value represents an assignment
+ statement, False otherwise. If True, and an assignment
+ statement is passed in the first item in
+ the returned list will be the part of the assignment
+ statement up to and including the opening quote character,
+ and the last item will be the closing quote.
+ """
+ inexpr = 0
+ lastchar = None
+ out = []
+ buf = ''
+ for char in value:
+ if char == '{':
+ if lastchar == '$':
+ inexpr += 1
+ elif char == '}':
+ inexpr -= 1
+ elif assignment and char in '"\'' and inexpr == 0:
+ if buf:
+ out.append(buf)
+ out.append(char)
+ char = ''
+ buf = ''
+ elif char.isspace() and inexpr == 0:
+ char = ''
+ if buf:
+ out.append(buf)
+ buf = ''
+ buf += char
+ lastchar = char
+ if buf:
+ out.append(buf)
+
+ # Join together assignment statement and opening quote
+ outlist = out
+ if assignment:
+ assigfound = False
+ for idx, item in enumerate(out):
+ if '=' in item:
+ assigfound = True
+ if assigfound:
+ if '"' in item or "'" in item:
+ outlist = [' '.join(out[:idx+1])]
+ outlist.extend(out[idx+1:])
+ break
+ return outlist
+
+
+def patch_recipe_file(fn, values, patch=False, relpath=''):
+ """Update or insert variable values into a recipe file (assuming you
+ have already identified the exact file you want to update.)
+ Note that some manual inspection/intervention may be required
+ since this cannot handle all situations.
+ """
+
+ import bb.utils
+
+ recipe_progression_res = []
+ recipe_progression_restrs = []
+ for item in recipe_progression:
+ if item.endswith('()'):
+ key = item[:-2]
+ else:
+ key = item
+ restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
+ if item.endswith('()'):
+ recipe_progression_restrs.append(restr + '()')
+ else:
+ recipe_progression_restrs.append(restr)
+ recipe_progression_res.append(re.compile('^%s$' % restr))
+
+ def get_recipe_pos(variable):
+ for i, p in enumerate(recipe_progression_res):
+ if p.match(variable):
+ return i
+ return -1
+
+ remainingnames = {}
+ for k in values.keys():
+ remainingnames[k] = get_recipe_pos(k)
+ remainingnames = OrderedDict(sorted(remainingnames.iteritems(), key=lambda x: x[1]))
+
+ modifying = False
+
+ def outputvalue(name, lines, rewindcomments=False):
+ if values[name] is None:
+ return
+ rawtext = '%s = "%s"\n' % (name, values[name])
+ addlines = []
+ if name in nowrap_vars:
+ addlines.append(rawtext)
+ elif name in list_vars:
+ splitvalue = split_var_value(values[name], assignment=False)
+ if len(splitvalue) > 1:
+ linesplit = ' \\\n' + (' ' * (len(name) + 4))
+ addlines.append('%s = "%s%s"\n' % (name, linesplit.join(splitvalue), linesplit))
+ else:
+ addlines.append(rawtext)
+ else:
+ wrapped = textwrap.wrap(rawtext)
+ for wrapline in wrapped[:-1]:
+ addlines.append('%s \\\n' % wrapline)
+ addlines.append('%s\n' % wrapped[-1])
+ if rewindcomments:
+ # Ensure we insert the lines before any leading comments
+ # (that we'd want to ensure remain leading the next value)
+ for i, ln in reversed(list(enumerate(lines))):
+ if ln[0] != '#':
+ lines[i+1:i+1] = addlines
+ break
+ else:
+ lines.extend(addlines)
+ else:
+ lines.extend(addlines)
+
+ existingnames = []
+ def patch_recipe_varfunc(varname, origvalue, op, newlines):
+ if modifying:
+ # Insert anything that should come before this variable
+ pos = get_recipe_pos(varname)
+ for k in remainingnames.keys()[:]:
+ if remainingnames[k] > -1 and pos >= remainingnames[k] and not k in existingnames:
+ outputvalue(k, newlines, rewindcomments=True)
+ del remainingnames[k]
+ # Now change this variable, if it needs to be changed
+ if varname in existingnames and op in ['+=', '=', '=+']:
+ if varname in remainingnames:
+ outputvalue(varname, newlines)
+ del remainingnames[varname]
+ return None, None, 0, True
+ else:
+ if varname in values:
+ existingnames.append(varname)
+ return origvalue, None, 0, True
+
+ # First run - establish which values we want to set are already in the file
+ varlist = [re.escape(item) for item in values.keys()]
+ with open(fn, 'r') as f:
+ changed, fromlines = bb.utils.edit_metadata(f, varlist, patch_recipe_varfunc)
+ # Second run - actually set everything
+ modifying = True
+ varlist.extend(recipe_progression_restrs)
+ changed, tolines = bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc, match_overrides=True)
+
+ if remainingnames:
+ if tolines[-1].strip() != '':
+ tolines.append('\n')
+ for k in remainingnames.keys():
+ outputvalue(k, tolines)
+
+ if patch:
+ relfn = os.path.relpath(fn, relpath)
+ diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
+ return diff
+ else:
+ with open(fn, 'w') as f:
+ f.writelines(tolines)
+ return None
+
+
+def localise_file_vars(fn, varfiles, varlist):
+ """Given a list of variables and variable history (fetched with get_var_files())
+ find where each variable should be set/changed. This handles for example where a
+ recipe includes an inc file where variables might be changed - in most cases
+ we want to update the inc file when changing the variable value rather than adding
+ it to the recipe itself.
+ """
+ fndir = os.path.dirname(fn) + os.sep
+
+ first_meta_file = None
+ for v in meta_vars:
+ f = varfiles.get(v, None)
+ if f:
+ actualdir = os.path.dirname(f) + os.sep
+ if actualdir.startswith(fndir):
+ first_meta_file = f
+ break
+
+ filevars = defaultdict(list)
+ for v in varlist:
+ f = varfiles[v]
+ # Only return files that are in the same directory as the recipe or in some directory below there
+ # (this excludes bbclass files and common inc files that wouldn't be appropriate to set the variable
+ # in if we were going to set a value specific to this recipe)
+ if f:
+ actualfile = f
+ else:
+ # Variable isn't in a file, if it's one of the "meta" vars, use the first file with a meta var in it
+ if first_meta_file:
+ actualfile = first_meta_file
+ else:
+ actualfile = fn
+
+ actualdir = os.path.dirname(actualfile) + os.sep
+ if not actualdir.startswith(fndir):
+ actualfile = fn
+ filevars[actualfile].append(v)
+
+ return filevars
+
+def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+ """Modify a list of variable values in the specified recipe. Handles inc files if
+ used by the recipe.
+ """
+ varlist = varvalues.keys()
+ varfiles = get_var_files(fn, varlist, d)
+ locs = localise_file_vars(fn, varfiles, varlist)
+ patches = []
+ for f,v in locs.iteritems():
+ vals = {k: varvalues[k] for k in v}
+ patchdata = patch_recipe_file(f, vals, patch, relpath)
+ if patch:
+ patches.append(patchdata)
+
+ if patch:
+ return patches
+ else:
+ return None
+
+
+
+def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
+ """Copy (local) recipe files, including both files included via include/require,
+ and files referred to in the SRC_URI variable."""
+ import bb.fetch2
+ import oe.path
+
+ # FIXME need a warning if the unexpanded SRC_URI value contains variable references
+
+ uris = (d.getVar('SRC_URI', True) or "").split()
+ fetch = bb.fetch2.Fetch(uris, d)
+ if download:
+ fetch.download()
+
+ # Copy local files to target directory and gather any remote files
+ bb_dir = os.path.dirname(d.getVar('FILE', True)) + os.sep
+ remotes = []
+ includes = [path for path in d.getVar('BBINCLUDED', True).split() if
+ path.startswith(bb_dir) and os.path.exists(path)]
+ for path in fetch.localpaths() + includes:
+ # Only import files that are under the meta directory
+ if path.startswith(bb_dir):
+ if not whole_dir:
+ relpath = os.path.relpath(path, bb_dir)
+ subdir = os.path.join(tgt_dir, os.path.dirname(relpath))
+ if not os.path.exists(subdir):
+ os.makedirs(subdir)
+ shutil.copy2(path, os.path.join(tgt_dir, relpath))
+ else:
+ remotes.append(path)
+ # Simply copy whole meta dir, if requested
+ if whole_dir:
+ shutil.copytree(bb_dir, tgt_dir)
+
+ return remotes
+
+
+def get_recipe_local_files(d, patches=False):
+ """Get a list of local files in SRC_URI within a recipe."""
+ uris = (d.getVar('SRC_URI', True) or "").split()
+ fetch = bb.fetch2.Fetch(uris, d)
+ ret = {}
+ for uri in uris:
+ if fetch.ud[uri].type == 'file':
+ if (not patches and
+ bb.utils.exec_flat_python_func('patch_path', uri, fetch, '')):
+ continue
+ # Skip files that are referenced by absolute path
+ if not os.path.isabs(fetch.ud[uri].basepath):
+ ret[fetch.ud[uri].basepath] = fetch.localpath(uri)
+ return ret
+
+
+def get_recipe_patches(d):
+ """Get a list of the patches included in SRC_URI within a recipe."""
+ patchfiles = []
+ # Execute src_patches() defined in patch.bbclass - this works since that class
+ # is inherited globally
+ patches = bb.utils.exec_flat_python_func('src_patches', d)
+ for patch in patches:
+ _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
+ patchfiles.append(local)
+ return patchfiles
+
+
+def get_recipe_patched_files(d):
+ """
+ Get the list of patches for a recipe along with the files each patch modifies.
+ Params:
+ d: the datastore for the recipe
+ Returns:
+ a dict mapping patch file path to a list of tuples of changed files and
+ change mode ('A' for add, 'D' for delete or 'M' for modify)
+ """
+ import oe.patch
+ # Execute src_patches() defined in patch.bbclass - this works since that class
+ # is inherited globally
+ patches = bb.utils.exec_flat_python_func('src_patches', d)
+ patchedfiles = {}
+ for patch in patches:
+ _, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
+ striplevel = int(parm['striplevel'])
+ patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S', True), parm.get('patchdir', '')))
+ return patchedfiles
+
+
+def validate_pn(pn):
+ """Perform validation on a recipe name (PN) for a new recipe."""
+ reserved_names = ['forcevariable', 'append', 'prepend', 'remove']
+ if not re.match('[0-9a-z-.]+', pn):
+ return 'Recipe name "%s" is invalid: only characters 0-9, a-z, - and . are allowed' % pn
+ elif pn in reserved_names:
+ return 'Recipe name "%s" is invalid: is a reserved keyword' % pn
+ elif pn.startswith('pn-'):
+ return 'Recipe name "%s" is invalid: names starting with "pn-" are reserved' % pn
+ elif pn.endswith(('.bb', '.bbappend', '.bbclass', '.inc', '.conf')):
+ return 'Recipe name "%s" is invalid: should be just a name, not a file name' % pn
+ return ''
+
+
+def get_bbappend_path(d, destlayerdir, wildcardver=False):
+ """Determine how a bbappend for a recipe should be named and located within another layer"""
+
+ import bb.cookerdata
+
+ destlayerdir = os.path.abspath(destlayerdir)
+ recipefile = d.getVar('FILE', True)
+ recipefn = os.path.splitext(os.path.basename(recipefile))[0]
+ if wildcardver and '_' in recipefn:
+ recipefn = recipefn.split('_', 1)[0] + '_%'
+ appendfn = recipefn + '.bbappend'
+
+ # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf
+ confdata = d.createCopy()
+ confdata.setVar('BBFILES', '')
+ confdata.setVar('LAYERDIR', destlayerdir)
+ destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
+ confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
+
+ origlayerdir = find_layerdir(recipefile)
+ if not origlayerdir:
+ return (None, False)
+ # Now join this to the path where the bbappend is going and check if it is covered by BBFILES
+ appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
+ closepath = ''
+ pathok = True
+ for bbfilespec in confdata.getVar('BBFILES', True).split():
+ if fnmatch.fnmatchcase(appendpath, bbfilespec):
+ # Our append path works, we're done
+ break
+ elif bbfilespec.startswith(destlayerdir) and fnmatch.fnmatchcase('test.bbappend', os.path.basename(bbfilespec)):
+ # Try to find the longest matching path
+ if len(bbfilespec) > len(closepath):
+ closepath = bbfilespec
+ else:
+ # Unfortunately the bbappend layer and the original recipe's layer don't have the same structure
+ if closepath:
+ # bbappend layer's layer.conf at least has a spec that picks up .bbappend files
+ # Now we just need to substitute out any wildcards
+ appendsubdir = os.path.relpath(os.path.dirname(closepath), destlayerdir)
+ if 'recipes-*' in appendsubdir:
+ # Try to copy this part from the original recipe path
+ res = re.search('/recipes-[^/]+/', recipefile)
+ if res:
+ appendsubdir = appendsubdir.replace('/recipes-*/', res.group(0))
+ # This is crude, but we have to do something
+ appendsubdir = appendsubdir.replace('*', recipefn.split('_')[0])
+ appendsubdir = appendsubdir.replace('?', 'a')
+ appendpath = os.path.join(destlayerdir, appendsubdir, appendfn)
+ else:
+ pathok = False
+ return (appendpath, pathok)
+
+
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+ """
+ Writes a bbappend file for a recipe
+ Parameters:
+ rd: data dictionary for the recipe
+ destlayerdir: base directory of the layer to place the bbappend in
+ (subdirectory path from there will be determined automatically)
+ srcfiles: dict of source files to add to SRC_URI, where the value
+ is the full path to the file to be added, and the value is the
+ original filename as it would appear in SRC_URI or None if it
+ isn't already present. You may pass None for this parameter if
+ you simply want to specify your own content via the extralines
+ parameter.
+ install: dict mapping entries in srcfiles to a tuple of two elements:
+ install path (*without* ${D} prefix) and permission value (as a
+ string, e.g. '0644').
+ wildcardver: True to use a % wildcard in the bbappend filename, or
+ False to make the bbappend specific to the recipe version.
+ machine:
+ If specified, make the changes in the bbappend specific to this
+ machine. This will also cause PACKAGE_ARCH = "${MACHINE_ARCH}"
+ to be added to the bbappend.
+ extralines:
+ Extra lines to add to the bbappend. This may be a dict of name
+ value pairs, or simply a list of the lines.
+ removevalues:
+ Variable values to remove - a dict of names/values.
+ """
+
+ if not removevalues:
+ removevalues = {}
+
+ # Determine how the bbappend should be named
+ appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
+ if not appendpath:
+ bb.error('Unable to determine layer directory containing %s' % recipefile)
+ return (None, None)
+ if not pathok:
+ bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
+
+ appenddir = os.path.dirname(appendpath)
+ bb.utils.mkdirhier(appenddir)
+
+ # FIXME check if the bbappend doesn't get overridden by a higher priority layer?
+
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+ if not os.path.abspath(destlayerdir) in layerdirs:
+ bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+
+ bbappendlines = []
+ if extralines:
+ if isinstance(extralines, dict):
+ for name, value in extralines.iteritems():
+ bbappendlines.append((name, '=', value))
+ else:
+ # Do our best to split it
+ for line in extralines:
+ if line[-1] == '\n':
+ line = line[:-1]
+ splitline = line.split(None, 2)
+ if len(splitline) == 3:
+ bbappendlines.append(tuple(splitline))
+ else:
+ raise Exception('Invalid extralines value passed')
+
+ def popline(varname):
+ for i in xrange(0, len(bbappendlines)):
+ if bbappendlines[i][0] == varname:
+ line = bbappendlines.pop(i)
+ return line
+ return None
+
+ def appendline(varname, op, value):
+ for i in xrange(0, len(bbappendlines)):
+ item = bbappendlines[i]
+ if item[0] == varname:
+ bbappendlines[i] = (item[0], item[1], item[2] + ' ' + value)
+ break
+ else:
+ bbappendlines.append((varname, op, value))
+
+ destsubdir = rd.getVar('PN', True)
+ if srcfiles:
+ bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+
+ appendoverride = ''
+ if machine:
+ bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
+ appendoverride = '_%s' % machine
+ copyfiles = {}
+ if srcfiles:
+ instfunclines = []
+ for newfile, origsrcfile in srcfiles.iteritems():
+ srcfile = origsrcfile
+ srcurientry = None
+ if not srcfile:
+ srcfile = os.path.basename(newfile)
+ srcurientry = 'file://%s' % srcfile
+ # Double-check it's not there already
+ # FIXME do we care if the entry is added by another bbappend that might go away?
+ if not srcurientry in rd.getVar('SRC_URI', True).split():
+ if machine:
+ appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+ else:
+ appendline('SRC_URI', '+=', srcurientry)
+ copyfiles[newfile] = srcfile
+ if install:
+ institem = install.pop(newfile, None)
+ if institem:
+ (destpath, perms) = institem
+ instdestpath = replace_dir_vars(destpath, rd)
+ instdirline = 'install -d ${D}%s' % os.path.dirname(instdestpath)
+ if not instdirline in instfunclines:
+ instfunclines.append(instdirline)
+ instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
+ if instfunclines:
+ bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
+
+ bb.note('Writing append file %s' % appendpath)
+
+ if os.path.exists(appendpath):
+ # Work around lack of nonlocal in python 2
+ extvars = {'destsubdir': destsubdir}
+
+ def appendfile_varfunc(varname, origvalue, op, newlines):
+ if varname == 'FILESEXTRAPATHS_prepend':
+ if origvalue.startswith('${THISDIR}/'):
+ popline('FILESEXTRAPATHS_prepend')
+ extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
+ elif varname == 'PACKAGE_ARCH':
+ if machine:
+ popline('PACKAGE_ARCH')
+ return (machine, None, 4, False)
+ elif varname.startswith('do_install_append'):
+ func = popline(varname)
+ if func:
+ instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
+ for line in func[2]:
+ if not line in instfunclines:
+ instfunclines.append(line)
+ return (instfunclines, None, 4, False)
+ else:
+ splitval = split_var_value(origvalue, assignment=False)
+ changed = False
+ removevar = varname
+ if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+ removevar = 'SRC_URI'
+ line = popline(varname)
+ if line:
+ if line[2] not in splitval:
+ splitval.append(line[2])
+ changed = True
+ else:
+ line = popline(varname)
+ if line:
+ splitval = [line[2]]
+ changed = True
+
+ if removevar in removevalues:
+ remove = removevalues[removevar]
+ if isinstance(remove, basestring):
+ if remove in splitval:
+ splitval.remove(remove)
+ changed = True
+ else:
+ for removeitem in remove:
+ if removeitem in splitval:
+ splitval.remove(removeitem)
+ changed = True
+
+ if changed:
+ newvalue = splitval
+ if len(newvalue) == 1:
+ # Ensure it's written out as one line
+ if '_append' in varname:
+ newvalue = ' ' + newvalue[0]
+ else:
+ newvalue = newvalue[0]
+ if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+ # There's no point appending nothing
+ newvalue = None
+ if varname.endswith('()'):
+ indent = 4
+ else:
+ indent = -1
+ return (newvalue, None, indent, True)
+ return (origvalue, None, 4, False)
+
+ varnames = [item[0] for item in bbappendlines]
+ if removevalues:
+ varnames.extend(removevalues.keys())
+
+ with open(appendpath, 'r') as f:
+ (updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
+
+ destsubdir = extvars['destsubdir']
+ else:
+ updated = False
+ newlines = []
+
+ if bbappendlines:
+ for line in bbappendlines:
+ if line[0].endswith('()'):
+ newlines.append('%s {\n %s\n}\n' % (line[0], '\n '.join(line[2])))
+ else:
+ newlines.append('%s %s "%s"\n\n' % line)
+ updated = True
+
+ if updated:
+ with open(appendpath, 'w') as f:
+ f.writelines(newlines)
+
+ if copyfiles:
+ if machine:
+ destsubdir = os.path.join(destsubdir, machine)
+ for newfile, srcfile in copyfiles.iteritems():
+ filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+ if os.path.abspath(newfile) != os.path.abspath(filedest):
+ bb.note('Copying %s to %s' % (newfile, filedest))
+ bb.utils.mkdirhier(os.path.dirname(filedest))
+ shutil.copyfile(newfile, filedest)
+
+ return (appendpath, os.path.join(appenddir, destsubdir))
+
+
+def find_layerdir(fn):
+ """ Figure out relative path to base of layer for a file (e.g. a recipe)"""
+ pth = os.path.dirname(fn)
+ layerdir = ''
+ while pth:
+ if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
+ layerdir = pth
+ break
+ pth = os.path.dirname(pth)
+ return layerdir
+
+
+def replace_dir_vars(path, d):
+ """Replace common directory paths with appropriate variable references (e.g. /etc becomes ${sysconfdir})"""
+ dirvars = {}
+ # Sort by length so we get the variables we're interested in first
+ for var in sorted(d.keys(), key=len):
+ if var.endswith('dir') and var.lower() == var:
+ value = d.getVar(var, True)
+ if value.startswith('/') and not '\n' in value and value not in dirvars:
+ dirvars[value] = var
+ for dirpath in sorted(dirvars.keys(), reverse=True):
+ path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
+ return path
+
+def get_recipe_pv_without_srcpv(pv, uri_type):
+ """
+ Get PV without SRCPV common in SCM's for now only
+ support git.
+
+ Returns tuple with pv, prefix and suffix.
+ """
+ pfx = ''
+ sfx = ''
+
+ if uri_type == 'git':
+ git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+ m = git_regex.match(pv)
+
+ if m:
+ pv = m.group('ver')
+ pfx = m.group('pfx')
+ sfx = m.group('sfx')
+ else:
+ regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+ m = regex.match(pv)
+ if m:
+ pv = m.group('ver')
+ pfx = m.group('pfx')
+
+ return (pv, pfx, sfx)
+
+def get_recipe_upstream_version(rd):
+ """
+ Get upstream version of recipe using bb.fetch2 methods with support for
+ http, https, ftp and git.
+
+ bb.fetch2 exceptions can be raised,
+ FetchError when don't have network access or upstream site don't response.
+ NoMethodError when uri latest_versionstring method isn't implemented.
+
+ Returns a dictonary with version, type and datetime.
+ Type can be A for Automatic, M for Manual and U for Unknown.
+ """
+ from bb.fetch2 import decodeurl
+ from datetime import datetime
+
+ ru = {}
+ ru['version'] = ''
+ ru['type'] = 'U'
+ ru['datetime'] = ''
+
+ pv = rd.getVar('PV', True)
+
+ # XXX: If don't have SRC_URI means that don't have upstream sources so
+ # returns the current recipe version, so that upstream version check
+ # declares a match.
+ src_uris = rd.getVar('SRC_URI', True)
+ if not src_uris:
+ ru['version'] = pv
+ ru['type'] = 'M'
+ ru['datetime'] = datetime.now()
+ return ru
+
+ # XXX: we suppose that the first entry points to the upstream sources
+ src_uri = src_uris.split()[0]
+ uri_type, _, _, _, _, _ = decodeurl(src_uri)
+
+ manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION", True)
+ if manual_upstream_version:
+ # manual tracking of upstream version.
+ ru['version'] = manual_upstream_version
+ ru['type'] = 'M'
+
+ manual_upstream_date = rd.getVar("CHECK_DATE", True)
+ if manual_upstream_date:
+ date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
+ else:
+ date = datetime.now()
+ ru['datetime'] = date
+
+ elif uri_type == "file":
+ # files are always up-to-date
+ ru['version'] = pv
+ ru['type'] = 'A'
+ ru['datetime'] = datetime.now()
+ else:
+ ud = bb.fetch2.FetchData(src_uri, rd)
+ pupver = ud.method.latest_versionstring(ud, rd)
+ (upversion, revision) = pupver
+
+ # format git version version+gitAUTOINC+HASH
+ if uri_type == 'git':
+ (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
+
+ # if contains revision but not upversion use current pv
+ if upversion == '' and revision:
+ upversion = pv
+
+ if upversion:
+ tmp = upversion
+ upversion = ''
+
+ if pfx:
+ upversion = pfx + tmp
+ else:
+ upversion = tmp
+
+ if sfx:
+ upversion = upversion + sfx + revision[:10]
+
+ if upversion:
+ ru['version'] = upversion
+ ru['type'] = 'A'
+
+ ru['datetime'] = datetime.now()
+
+ return ru
diff --git a/import-layers/yocto-poky/meta/lib/oe/rootfs.py b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
new file mode 100644
index 000000000..a95e1b739
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
@@ -0,0 +1,986 @@
+from abc import ABCMeta, abstractmethod
+from oe.utils import execute_pre_post_process
+from oe.package_manager import *
+from oe.manifest import *
+import oe.path
+import filecmp
+import shutil
+import os
+import subprocess
+import re
+
+
+class Rootfs(object):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, d):
+ self.d = d
+ self.pm = None
+ self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+ self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True)
+
+ self.install_order = Manifest.INSTALL_ORDER
+
+ @abstractmethod
+ def _create(self):
+ pass
+
+ @abstractmethod
+ def _get_delayed_postinsts(self):
+ pass
+
+ @abstractmethod
+ def _save_postinsts(self):
+ pass
+
+ @abstractmethod
+ def _log_check(self):
+ pass
+
+ def _log_check_warn(self):
+ r = re.compile('^(warn|Warn|NOTE: warn|NOTE: Warn|WARNING:)')
+ log_path = self.d.expand("${T}/log.do_rootfs")
+ with open(log_path, 'r') as log:
+ for line in log:
+ if 'log_check' in line or 'NOTE:' in line:
+ continue
+
+ m = r.search(line)
+ if m:
+ bb.warn('[log_check] %s: found a warning message in the logfile (keyword \'%s\'):\n[log_check] %s'
+ % (self.d.getVar('PN', True), m.group(), line))
+
+ def _log_check_error(self):
+ r = re.compile(self.log_check_regex)
+ log_path = self.d.expand("${T}/log.do_rootfs")
+ with open(log_path, 'r') as log:
+ found_error = 0
+ message = "\n"
+ for line in log:
+ if 'log_check' in line:
+ continue
+
+ if hasattr(self, 'log_check_expected_errors_regexes'):
+ m = None
+ for ee in self.log_check_expected_errors_regexes:
+ m = re.search(ee, line)
+ if m:
+ break
+ if m:
+ continue
+
+ m = r.search(line)
+ if m:
+ found_error = 1
+ bb.warn('[log_check] In line: [%s]' % line)
+ bb.warn('[log_check] %s: found an error message in the logfile (keyword \'%s\'):\n[log_check] %s'
+ % (self.d.getVar('PN', True), m.group(), line))
+
+ if found_error >= 1 and found_error <= 5:
+ message += line + '\n'
+ found_error += 1
+
+ if found_error == 6:
+ bb.fatal(message)
+
+ def _insert_feed_uris(self):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ self.pm.insert_feeds_uris()
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ """
+ The _cleanup() method should be used to clean-up stuff that we don't really
+ want to end up on target. For example, in the case of RPM, the DB locks.
+ The method is called, once, at the end of create() method.
+ """
+ @abstractmethod
+ def _cleanup(self):
+ pass
+
+ def _setup_dbg_rootfs(self, dirs):
+ gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS', True) or '0'
+ if gen_debugfs != '1':
+ return
+
+ bb.note(" Renaming the original rootfs...")
+ try:
+ shutil.rmtree(self.image_rootfs + '-orig')
+ except:
+ pass
+ os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+
+ bb.note(" Creating debug rootfs...")
+ bb.utils.mkdirhier(self.image_rootfs)
+
+ bb.note(" Copying back package database...")
+ for dir in dirs:
+ bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
+ shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir)
+
+ cpath = oe.cachedpath.CachedPath()
+ # Copy files located in /usr/lib/debug or /usr/src/debug
+ for dir in ["/usr/lib/debug", "/usr/src/debug"]:
+ src = self.image_rootfs + '-orig' + dir
+ if cpath.exists(src):
+ dst = self.image_rootfs + dir
+ bb.utils.mkdirhier(os.path.dirname(dst))
+ shutil.copytree(src, dst)
+
+ # Copy files with suffix '.debug' or located in '.debug' dir.
+ for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
+ relative_dir = root[len(self.image_rootfs + '-orig'):]
+ for f in files:
+ if f.endswith('.debug') or '/.debug' in relative_dir:
+ bb.utils.mkdirhier(self.image_rootfs + relative_dir)
+ shutil.copy(os.path.join(root, f),
+ self.image_rootfs + relative_dir)
+
+ bb.note(" Install complementary '*-dbg' packages...")
+ self.pm.install_complementary('*-dbg')
+
+ bb.note(" Rename debug rootfs...")
+ try:
+ shutil.rmtree(self.image_rootfs + '-dbg')
+ except:
+ pass
+ os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+
+ bb.note(" Restoreing original rootfs...")
+ os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+
+ def _exec_shell_cmd(self, cmd):
+ fakerootcmd = self.d.getVar('FAKEROOT', True)
+ if fakerootcmd is not None:
+ exec_cmd = [fakerootcmd, cmd]
+ else:
+ exec_cmd = cmd
+
+ try:
+ subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+ return None
+
+ def create(self):
+ bb.note("###### Generate rootfs #######")
+ pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
+ post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
+ rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
+
+ postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR", True)
+ if not postinst_intercepts_dir:
+ postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
+ "intercept_scripts")
+
+ bb.utils.remove(intercepts_dir, True)
+
+ bb.utils.mkdirhier(self.image_rootfs)
+
+ bb.utils.mkdirhier(self.deploy_dir_image)
+
+ shutil.copytree(postinst_intercepts_dir, intercepts_dir)
+
+ shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
+ self.deploy_dir_image +
+ "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
+
+ execute_pre_post_process(self.d, pre_process_cmds)
+
+ # call the package manager dependent create method
+ self._create()
+
+ sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
+ bb.utils.mkdirhier(sysconfdir)
+ with open(sysconfdir + "/version", "w+") as ver:
+ ver.write(self.d.getVar('BUILDNAME', True) + "\n")
+
+ execute_pre_post_process(self.d, rootfs_post_install_cmds)
+
+ self._run_intercepts()
+
+ execute_pre_post_process(self.d, post_process_cmds)
+
+ if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d):
+ delayed_postinsts = self._get_delayed_postinsts()
+ if delayed_postinsts is not None:
+ bb.fatal("The following packages could not be configured "
+ "offline and rootfs is read-only: %s" %
+ delayed_postinsts)
+
+ if self.d.getVar('USE_DEVFS', True) != "1":
+ self._create_devfs()
+
+ self._uninstall_unneeded()
+
+ self._insert_feed_uris()
+
+ self._run_ldconfig()
+
+ if self.d.getVar('USE_DEPMOD', True) != "0":
+ self._generate_kernel_module_deps()
+
+ self._cleanup()
+ self._log_check()
+
+ def _uninstall_unneeded(self):
+ # Remove unneeded init script symlinks
+ delayed_postinsts = self._get_delayed_postinsts()
+ if delayed_postinsts is None:
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
+ self._exec_shell_cmd(["update-rc.d", "-f", "-r",
+ self.d.getVar('IMAGE_ROOTFS', True),
+ "run-postinsts", "remove"])
+
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ if image_rorfs:
+ # Remove components that we don't need if it's a read-only rootfs
+ unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED", True).split()
+ pkgs_installed = image_list_installed_packages(self.d)
+ pkgs_to_remove = [pkg for pkg in pkgs_installed if pkg in unneeded_pkgs]
+
+ if len(pkgs_to_remove) > 0:
+ self.pm.remove(pkgs_to_remove, False)
+
+ if delayed_postinsts:
+ self._save_postinsts()
+ if image_rorfs:
+ bb.warn("There are post install scripts "
+ "in a read-only rootfs")
+
+ post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND", True)
+ execute_pre_post_process(self.d, post_uninstall_cmds)
+
+ runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d)
+ if not runtime_pkgmanage:
+ # Remove the package manager data files
+ self.pm.remove_packaging_data()
+
+ def _run_intercepts(self):
+ intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
+ "intercept_scripts")
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.image_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ subprocess.check_call(script_full)
+ except subprocess.CalledProcessError as e:
+ bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
+ (script, e.returncode))
+
+ with open(script_full) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match("^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.warn("The postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+ def _run_ldconfig(self):
+ if self.d.getVar('LDCONFIGDEPEND', True):
+ bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
+ self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
+ 'new', '-v'])
+
+ def _check_for_kernel_modules(self, modules_dir):
+ for root, dirs, files in os.walk(modules_dir, topdown=True):
+ for name in files:
+ found_ko = name.endswith(".ko")
+ if found_ko:
+ return found_ko
+ return False
+
+ def _generate_kernel_module_deps(self):
+ modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules')
+ # if we don't have any modules don't bother to do the depmod
+ if not self._check_for_kernel_modules(modules_dir):
+ bb.note("No Kernel Modules found, not running depmod")
+ return
+
+ kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR', True), "kernel-depmod",
+ 'kernel-abiversion')
+ if not os.path.exists(kernel_abi_ver_file):
+ bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+
+ kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
+ versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+
+ bb.utils.mkdirhier(versioned_modules_dir)
+
+ self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+
+ """
+ Create devfs:
+ * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
+ * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
+ for in the BBPATH
+ If neither are specified then the default name of files/device_table-minimal.txt
+ is searched for in the BBPATH (same as the old version.)
+ """
+ def _create_devfs(self):
+ devtable_list = []
+ devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
+ if devtable is not None:
+ devtable_list.append(devtable)
+ else:
+ devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
+ if devtables is None:
+ devtables = 'files/device_table-minimal.txt'
+ for devtable in devtables.split():
+ devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
+
+ for devtable in devtable_list:
+ self._exec_shell_cmd(["makedevs", "-r",
+ self.image_rootfs, "-D", devtable])
+
+
+class RpmRootfs(Rootfs):
+ def __init__(self, d, manifest_dir):
+ super(RpmRootfs, self).__init__(d)
+ self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
+ '|exit 1|ERROR: |Error: |Error |ERROR '\
+ '|Failed |Failed: |Failed$|Failed\(\d+\):)'
+ self.manifest = RpmManifest(d, manifest_dir)
+
+ self.pm = RpmPM(d,
+ d.getVar('IMAGE_ROOTFS', True),
+ self.d.getVar('TARGET_VENDOR', True)
+ )
+
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
+ if self.inc_rpm_image_gen != "1":
+ bb.utils.remove(self.image_rootfs, True)
+ else:
+ self.pm.recovery_packaging_data()
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+
+ self.pm.create_configs()
+
+ '''
+ While rpm incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the new install solution manifest and the
+ old installed manifest.
+ '''
+ def _create_incremental(self, pkgs_initial_install):
+ if self.inc_rpm_image_gen == "1":
+
+ pkgs_to_install = list()
+ for pkg_type in pkgs_initial_install:
+ pkgs_to_install += pkgs_initial_install[pkg_type]
+
+ installed_manifest = self.pm.load_old_install_solution()
+ solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
+
+ pkg_to_remove = list()
+ for pkg in installed_manifest:
+ if pkg not in solution_manifest:
+ pkg_to_remove.append(pkg)
+
+ self.pm.update()
+
+ bb.note('incremental update -- upgrade packages in place ')
+ self.pm.upgrade()
+ if pkg_to_remove != []:
+ bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS', True)
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS', True)
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, rpm_pre_process_cmds)
+
+ self.pm.dump_all_available_pkgs()
+
+ if self.inc_rpm_image_gen == "1":
+ self._create_incremental(pkgs_to_install)
+
+ self.pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ self.pm.install(pkgs)
+
+ self.pm.install(pkgs_attempt, True)
+
+ self.pm.install_complementary()
+
+ self._setup_dbg_rootfs(['/etc/rpm', '/var/lib/rpm', '/var/lib/smart'])
+
+ execute_pre_post_process(self.d, rpm_post_process_cmds)
+
+ self._log_check()
+
+ if self.inc_rpm_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ self.pm.rpm_setup_smart_target_config()
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
+
+ def _get_delayed_postinsts(self):
+ postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
+ if os.path.isdir(postinst_dir):
+ files = os.listdir(postinst_dir)
+ for f in files:
+ bb.note('Delayed package scriptlet: %s' % f)
+ return files
+
+ return None
+
+ def _save_postinsts(self):
+ # this is just a stub. For RPM, the failed postinstalls are
+ # already saved in /etc/rpm-postinsts
+ pass
+
+ def _log_check_error(self):
+ r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)')
+ log_path = self.d.expand("${T}/log.do_rootfs")
+ with open(log_path, 'r') as log:
+ found_error = 0
+ message = "\n"
+ for line in log.read().split('\n'):
+ if 'log_check' in line:
+ continue
+ # sh -x may emit code which isn't actually executed
+ if line.startswith('+'):
+ continue
+
+ m = r.search(line)
+ if m:
+ found_error = 1
+ bb.warn('log_check: There were error messages in the logfile')
+ bb.warn('log_check: Matched keyword: [%s]\n\n' % m.group())
+
+ if found_error >= 1 and found_error <= 5:
+ message += line + '\n'
+ found_error += 1
+
+ if found_error == 6:
+ bb.fatal(message)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
+
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.pm.save_rpmpostinst(pkg)
+
+ def _cleanup(self):
+ # during the execution of postprocess commands, rpm is called several
+ # times to get the files installed, dependencies, etc. This creates the
+ # __db.00* (Berkeley DB files that hold locks, rpm specific environment
+ # settings, etc.), that should not get into the final rootfs
+ self.pm.unlock_rpm_db()
+ if os.path.isdir(self.pm.install_dir_path + "/tmp") and not os.listdir(self.pm.install_dir_path + "/tmp"):
+ bb.utils.remove(self.pm.install_dir_path + "/tmp", True)
+ if os.path.isdir(self.pm.install_dir_path) and not os.listdir(self.pm.install_dir_path):
+ bb.utils.remove(self.pm.install_dir_path, True)
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d):
+ super(DpkgOpkgRootfs, self).__init__(d)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match("^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match("^Package: (.*)", line)
+ m_status = re.match("^Status:.*unpacked", line)
+ m_depends = re.match("^Depends: (.*)", line)
+
+ if m_pkg is not None:
+ if pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+
+ pkg_name = m_pkg.group(1)
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ pkg_status_match = True
+ elif m_depends is not None:
+ pkg_depends = m_depends.group(1)
+
+ # remove package dependencies not in postinsts
+ pkg_names = pkgs.keys()
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL', True).strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = pkgs.keys()
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class DpkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir):
+ super(DpkgRootfs, self).__init__(d)
+ self.log_check_regex = '^E:'
+ self.log_check_expected_errors_regexes = \
+ [
+ "^E: Unmet dependencies."
+ ]
+
+ bb.utils.remove(self.image_rootfs, True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+ self.manifest = DpkgManifest(d, manifest_dir)
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
+ d.getVar('PACKAGE_ARCHS', True),
+ d.getVar('DPKG_ARCH', True))
+
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS', True)
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS', True)
+
+ alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
+ bb.utils.mkdirhier(alt_dir)
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, deb_pre_process_cmds)
+
+ self.pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ self.pm.install_complementary()
+
+ self._setup_dbg_rootfs(['/var/lib/dpkg'])
+
+ self.pm.fix_broken_dependencies()
+
+ self.pm.mark_packages("installed")
+
+ self.pm.run_pre_post_installs()
+
+ execute_pre_post_process(self.d, deb_post_process_cmds)
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
+
+ def _get_delayed_postinsts(self):
+ status_file = self.image_rootfs + "/var/lib/dpkg/status"
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.pm.mark_packages("unpacked", registered_pkgs.split())
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ pass
+
+
+class OpkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir):
+ super(OpkgRootfs, self).__init__(d)
+ self.log_check_regex = '(exit 1|Collected errors)'
+
+ self.manifest = OpkgManifest(d, manifest_dir)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
+
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
+ if self._remove_old_rootfs():
+ bb.utils.remove(self.image_rootfs, True)
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ else:
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ self.pm.recover_packaging_data()
+
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+
+ def _prelink_file(self, root_dir, filename):
+ bb.note('prelink %s in %s' % (filename, root_dir))
+ prelink_cfg = oe.path.join(root_dir,
+ self.d.expand('${sysconfdir}/prelink.conf'))
+ if not os.path.exists(prelink_cfg):
+ shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
+ prelink_cfg)
+
+ cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
+ self._exec_shell_cmd([cmd_prelink,
+ '--root',
+ root_dir,
+ '-amR',
+ '-N',
+ '-c',
+ self.d.expand('${sysconfdir}/prelink.conf')])
+
+ '''
+ Compare two files with the same key twice to see if they are equal.
+ If they are not equal, it means they are duplicated and come from
+ different packages.
+ 1st: Comapre them directly;
+ 2nd: While incremental image creation is enabled, one of the
+ files could be probaly prelinked in the previous image
+ creation and the file has been changed, so we need to
+ prelink the other one and compare them.
+ '''
+ def _file_equal(self, key, f1, f2):
+
+ # Both of them are not prelinked
+ if filecmp.cmp(f1, f2):
+ return True
+
+ if self.image_rootfs not in f1:
+ self._prelink_file(f1.replace(key, ''), f1)
+
+ if self.image_rootfs not in f2:
+ self._prelink_file(f2.replace(key, ''), f2)
+
+ # Both of them are prelinked
+ if filecmp.cmp(f1, f2):
+ return True
+
+ # Not equal
+ return False
+
+ """
+ This function was reused from the old implementation.
+ See commit: "image.bbclass: Added variables for multilib support." by
+ Lianhao Lu.
+ """
+ def _multilib_sanity_test(self, dirs):
+
+ allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
+ if allow_replace is None:
+ allow_replace = ""
+
+ allow_rep = re.compile(re.sub("\|$", "", allow_replace))
+ error_prompt = "Multilib check error:"
+
+ files = {}
+ for dir in dirs:
+ for root, subfolders, subfiles in os.walk(dir):
+ for file in subfiles:
+ item = os.path.join(root, file)
+ key = str(os.path.join("/", os.path.relpath(item, dir)))
+
+ valid = True
+ if key in files:
+ #check whether the file is allow to replace
+ if allow_rep.match(key):
+ valid = True
+ else:
+ if os.path.exists(files[key]) and \
+ os.path.exists(item) and \
+ not self._file_equal(key, files[key], item):
+ valid = False
+ bb.fatal("%s duplicate files %s %s is not the same\n" %
+ (error_prompt, item, files[key]))
+
+ #pass the check, add to list
+ if valid:
+ files[key] = item
+
+ def _multilib_test_install(self, pkgs):
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
+ bb.utils.mkdirhier(ml_temp)
+
+ dirs = [self.image_rootfs]
+
+ for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
+ ml_target_rootfs = os.path.join(ml_temp, variant)
+
+ bb.utils.remove(ml_target_rootfs, True)
+
+ ml_opkg_conf = os.path.join(ml_temp,
+ variant + "-" + os.path.basename(self.opkg_conf))
+
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
+
+ ml_pm.update()
+ ml_pm.install(pkgs)
+
+ dirs.append(ml_target_rootfs)
+
+ self._multilib_sanity_test(dirs)
+
+ '''
+ While ipk incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the old full manifest in previous existing
+ image and the new full manifest in the current image.
+ '''
+ def _remove_extra_packages(self, pkgs_initial_install):
+ if self.inc_opkg_image_gen == "1":
+ # Parse full manifest in previous existing image creation session
+ old_full_manifest = self.manifest.parse_full_manifest()
+
+ # Create full manifest for the current image session, the old one
+ # will be replaced by the new one.
+ self.manifest.create_full(self.pm)
+
+ # Parse full manifest in current image creation session
+ new_full_manifest = self.manifest.parse_full_manifest()
+
+ pkg_to_remove = list()
+ for pkg in old_full_manifest:
+ if pkg not in new_full_manifest:
+ pkg_to_remove.append(pkg)
+
+ if pkg_to_remove != []:
+ bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ '''
+ Compare with previous existing image creation, if some conditions
+ triggered, the previous old image should be removed.
+ The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
+ and BAD_RECOMMENDATIONS' has been changed.
+ '''
+ def _remove_old_rootfs(self):
+ if self.inc_opkg_image_gen != "1":
+ return True
+
+ vars_list_file = self.d.expand('${T}/vars_list')
+
+ old_vars_list = ""
+ if os.path.exists(vars_list_file):
+ old_vars_list = open(vars_list_file, 'r+').read()
+
+ new_vars_list = '%s:%s:%s\n' % \
+ ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
+ open(vars_list_file, 'w+').write(new_vars_list)
+
+ if old_vars_list != new_vars_list:
+ return True
+
+ return False
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
+
+ # update PM index files, unless users provide their own feeds
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, opkg_pre_process_cmds)
+
+ self.pm.update()
+
+ self.pm.handle_bad_recommendations()
+
+ if self.inc_opkg_image_gen == "1":
+ self._remove_extra_packages(pkgs_to_install)
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ # For multilib, we perform a sanity test before final install
+ # If sanity test fails, it will automatically do a bb.fatal()
+ # and the installation will stop
+ if pkg_type == Manifest.PKG_TYPE_MULTILIB:
+ self._multilib_test_install(pkgs_to_install[pkg_type])
+
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ self.pm.install_complementary()
+
+ self._setup_dbg_rootfs(['/var/lib/opkg'])
+
+ execute_pre_post_process(self.d, opkg_post_process_cmds)
+
+ if self.inc_opkg_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ @staticmethod
+ def _depends_list():
+ return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
+
+ def _get_delayed_postinsts(self):
+ status_file = os.path.join(self.image_rootfs,
+ self.d.getVar('OPKGLIBDIR', True).strip('/'),
+ "opkg", "status")
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.pm.mark_packages("unpacked", registered_pkgs.split())
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ self.pm.remove_lists()
+
+def get_class_for_type(imgtype):
+ return {"rpm": RpmRootfs,
+ "ipk": OpkgRootfs,
+ "deb": DpkgRootfs}[imgtype]
+
+def variable_depends(d, manifest_dir=None):
+ img_type = d.getVar('IMAGE_PKGTYPE', True)
+ cls = get_class_for_type(img_type)
+ return cls._depends_list()
+
+def create_rootfs(d, manifest_dir=None):
+ env_bkp = os.environ.copy()
+
+ img_type = d.getVar('IMAGE_PKGTYPE', True)
+ if img_type == "rpm":
+ RpmRootfs(d, manifest_dir).create()
+ elif img_type == "ipk":
+ OpkgRootfs(d, manifest_dir).create()
+ elif img_type == "deb":
+ DpkgRootfs(d, manifest_dir).create()
+
+ os.environ.clear()
+ os.environ.update(env_bkp)
+
+
+def image_list_installed_packages(d, rootfs_dir=None):
+ if not rootfs_dir:
+ rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
+
+ img_type = d.getVar('IMAGE_PKGTYPE', True)
+ if img_type == "rpm":
+ return RpmPkgsList(d, rootfs_dir).list_pkgs()
+ elif img_type == "ipk":
+ return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list_pkgs()
+ elif img_type == "deb":
+ return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+if __name__ == "__main__":
+ """
+ We should be able to run this as a standalone script, from outside bitbake
+ environment.
+ """
+ """
+ TBD
+ """
diff --git a/import-layers/yocto-poky/meta/lib/oe/sdk.py b/import-layers/yocto-poky/meta/lib/oe/sdk.py
new file mode 100644
index 000000000..f15fbdb36
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/sdk.py
@@ -0,0 +1,367 @@
+from abc import ABCMeta, abstractmethod
+from oe.utils import execute_pre_post_process
+from oe.manifest import *
+from oe.package_manager import *
+import os
+import shutil
+import glob
+import traceback
+
+
+class Sdk(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, d, manifest_dir):
+ self.d = d
+ self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
+ self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
+ self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
+ self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
+
+ self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
+ self.sdk_host_sysroot = self.sdk_output
+
+ if manifest_dir is None:
+ self.manifest_dir = self.d.getVar("SDK_DIR", True)
+ else:
+ self.manifest_dir = manifest_dir
+
+ self.remove(self.sdk_output, True)
+
+ self.install_order = Manifest.INSTALL_ORDER
+
+ @abstractmethod
+ def _populate(self):
+ pass
+
+ def populate(self):
+ self.mkdirhier(self.sdk_output)
+
+ # call backend dependent implementation
+ self._populate()
+
+ # Don't ship any libGL in the SDK
+ self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('libdir_nativesdk', True).strip('/'),
+ "libGL*"))
+
+ # Fix or remove broken .la files
+ self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('libdir_nativesdk', True).strip('/'),
+ "*.la"))
+
+ # Link the ld.so.cache file into the hosts filesystem
+ link_name = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.sysconfdir, "ld.so.cache")
+ self.mkdirhier(os.path.dirname(link_name))
+ os.symlink("/etc/ld.so.cache", link_name)
+
+ execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
+
+ def movefile(self, sourcefile, destdir):
+ try:
+ # FIXME: this check of movefile's return code to None should be
+ # fixed within the function to use only exceptions to signal when
+ # something goes wrong
+ if (bb.utils.movefile(sourcefile, destdir) == None):
+ raise OSError("moving %s to %s failed"
+ %(sourcefile, destdir))
+ #FIXME: using umbrella exc catching because bb.utils method raises it
+ except Exception as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.error("unable to place %s in final SDK location" % sourcefile)
+
+ def mkdirhier(self, dirpath):
+ try:
+ bb.utils.mkdirhier(dirpath)
+ except OSError as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.fatal("cannot make dir for SDK: %s" % dirpath)
+
+ def remove(self, path, recurse=False):
+ try:
+ bb.utils.remove(path, recurse)
+ #FIXME: using umbrella exc catching because bb.utils method raises it
+ except Exception as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.warn("cannot remove SDK dir: %s" % path)
+
+class RpmSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(RpmSdk, self).__init__(d, manifest_dir)
+
+ self.target_manifest = RpmManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = RpmManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ target_providename = ['/bin/sh',
+ '/bin/bash',
+ '/usr/bin/env',
+ '/usr/bin/perl',
+ 'pkgconfig'
+ ]
+
+ self.target_pm = RpmPM(d,
+ self.sdk_target_sysroot,
+ self.d.getVar('TARGET_VENDOR', True),
+ 'target',
+ target_providename
+ )
+
+ sdk_providename = ['/bin/sh',
+ '/bin/bash',
+ '/usr/bin/env',
+ '/usr/bin/perl',
+ 'pkgconfig',
+ 'libGL.so()(64bit)',
+ 'libGL.so'
+ ]
+
+ self.host_pm = RpmPM(d,
+ self.sdk_host_sysroot,
+ self.d.getVar('SDK_VENDOR', True),
+ 'host',
+ sdk_providename,
+ "SDK_PACKAGE_ARCHS",
+ "SDK_OS"
+ )
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.create_configs()
+ pm.write_index()
+ pm.dump_all_available_pkgs()
+ pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ pm.install(pkgs)
+
+ pm.install(pkgs_attempt, True)
+
+ def _populate(self):
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+
+ self.host_pm.remove_packaging_data()
+
+ # Move host RPM library data
+ native_rpm_state_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+ "lib",
+ "rpm"
+ )
+ self.mkdirhier(native_rpm_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output,
+ "var",
+ "lib",
+ "rpm",
+ "*")):
+ self.movefile(f, native_rpm_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+ # Move host sysconfig data
+ native_sysconf_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('sysconfdir',
+ True).strip('/'),
+ )
+ self.mkdirhier(native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
+ self.movefile(f, native_sysconf_dir)
+ self.remove(os.path.join(self.sdk_output, "etc"), True)
+
+
+class OpkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(OpkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
+ self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
+
+ self.target_manifest = OpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = OpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+
+ self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
+ self.d.getVar("SDK_PACKAGE_ARCHS", True))
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+ pm.write_index()
+
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+
+ self.host_pm.remove_packaging_data()
+
+ target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
+ host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
+
+ self.mkdirhier(target_sysconfdir)
+ shutil.copy(self.target_conf, target_sysconfdir)
+ os.chmod(os.path.join(target_sysconfdir,
+ os.path.basename(self.target_conf)), 0644)
+
+ self.mkdirhier(host_sysconfdir)
+ shutil.copy(self.host_conf, host_sysconfdir)
+ os.chmod(os.path.join(host_sysconfdir,
+ os.path.basename(self.host_conf)), 0644)
+
+ native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+ "lib", "opkg")
+ self.mkdirhier(native_opkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
+ self.movefile(f, native_opkg_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+
+class DpkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(DpkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
+
+ self.target_manifest = DpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = DpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
+ self.d.getVar("PACKAGE_ARCHS", True),
+ self.d.getVar("DPKG_ARCH", True),
+ self.target_conf_dir)
+
+ self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
+ self.d.getVar("SDK_PACKAGE_ARCHS", True),
+ self.d.getVar("DEB_SDK_ARCH", True),
+ self.host_conf_dir)
+
+ def _copy_apt_dir_to(self, dst_dir):
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
+
+ self.remove(dst_dir, True)
+
+ shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.write_index()
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
+ "etc", "apt"))
+
+ native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ "var", "lib", "dpkg")
+ self.mkdirhier(native_dpkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
+ self.movefile(f, native_dpkg_state_dir)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+
+
+def sdk_list_installed_packages(d, target, rootfs_dir=None):
+ if rootfs_dir is None:
+ sdk_output = d.getVar('SDK_OUTPUT', True)
+ target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
+
+ rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+
+ img_type = d.getVar('IMAGE_PKGTYPE', True)
+ if img_type == "rpm":
+ arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
+ os_var = ["SDK_OS", None][target is True]
+ return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list_pkgs()
+ elif img_type == "ipk":
+ conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
+ return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list_pkgs()
+ elif img_type == "deb":
+ return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+def populate_sdk(d, manifest_dir=None):
+ env_bkp = os.environ.copy()
+
+ img_type = d.getVar('IMAGE_PKGTYPE', True)
+ if img_type == "rpm":
+ RpmSdk(d, manifest_dir).populate()
+ elif img_type == "ipk":
+ OpkgSdk(d, manifest_dir).populate()
+ elif img_type == "deb":
+ DpkgSdk(d, manifest_dir).populate()
+
+ os.environ.clear()
+ os.environ.update(env_bkp)
+
+if __name__ == "__main__":
+ pass
diff --git a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
new file mode 100644
index 000000000..01dce660c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
@@ -0,0 +1,356 @@
+import bb.siggen
+
+def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
+ # Return True if we should keep the dependency, False to drop it
+ def isNative(x):
+ return x.endswith("-native")
+ def isCross(x):
+ return "-cross-" in x
+ def isNativeSDK(x):
+ return x.startswith("nativesdk-")
+ def isKernel(fn):
+ inherits = " ".join(dataCache.inherits[fn])
+ return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
+ def isPackageGroup(fn):
+ inherits = " ".join(dataCache.inherits[fn])
+ return "/packagegroup.bbclass" in inherits
+ def isAllArch(fn):
+ inherits = " ".join(dataCache.inherits[fn])
+ return "/allarch.bbclass" in inherits
+ def isImage(fn):
+ return "/image.bbclass" in " ".join(dataCache.inherits[fn])
+
+ # Always include our own inter-task dependencies
+ if recipename == depname:
+ return True
+
+ # Quilt (patch application) changing isn't likely to affect anything
+ excludelist = ['quilt-native', 'subversion-native', 'git-native']
+ if depname in excludelist and recipename != depname:
+ return False
+
+ # Exclude well defined recipe->dependency
+ if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
+ return False
+
+ # Don't change native/cross/nativesdk recipe dependencies any further
+ if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
+ return True
+
+ # Only target packages beyond here
+
+ # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
+ if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
+ return False
+
+ # Exclude well defined machine specific configurations which don't change ABI
+ if depname in siggen.abisaferecipes and not isImage(fn):
+ return False
+
+ # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
+ # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+ # is machine specific.
+ # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
+ # and we reccomend a kernel-module, we exclude the dependency.
+ depfn = dep.rsplit(".", 1)[0]
+ if dataCache and isKernel(depfn) and not isKernel(fn):
+ for pkg in dataCache.runrecs[fn]:
+ if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
+ return False
+
+ # Default to keep dependencies
+ return True
+
+def sstate_lockedsigs(d):
+ sigs = {}
+ types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split()
+ for t in types:
+ siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
+ lockedsigs = (d.getVar(siggen_lockedsigs_var, True) or "").split()
+ for ls in lockedsigs:
+ pn, task, h = ls.split(":", 2)
+ if pn not in sigs:
+ sigs[pn] = {}
+ sigs[pn][task] = [h, siggen_lockedsigs_var]
+ return sigs
+
+class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
+ name = "OEBasic"
+ def init_rundepcheck(self, data):
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ pass
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+
+class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+ def init_rundepcheck(self, data):
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ self.lockedsigs = sstate_lockedsigs(data)
+ self.lockedhashes = {}
+ self.lockedpnmap = {}
+ self.lockedhashfn = {}
+ self.machine = data.getVar("MACHINE", True)
+ self.mismatch_msgs = []
+ self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES", True) or
+ "").split()
+ self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
+ pass
+
+ def tasks_resolved(self, virtmap, virtpnmap, dataCache):
+ # Translate virtual/xxx entries to PN values
+ newabisafe = []
+ for a in self.abisaferecipes:
+ if a in virtpnmap:
+ newabisafe.append(virtpnmap[a])
+ else:
+ newabisafe.append(a)
+ self.abisaferecipes = newabisafe
+ newsafedeps = []
+ for a in self.saferecipedeps:
+ a1, a2 = a.split("->")
+ if a1 in virtpnmap:
+ a1 = virtpnmap[a1]
+ if a2 in virtpnmap:
+ a2 = virtpnmap[a2]
+ newsafedeps.append(a1 + "->" + a2)
+ self.saferecipedeps = newsafedeps
+
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+
+ def get_taskdata(self):
+ data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
+ return (data, self.lockedpnmap, self.lockedhashfn)
+
+ def set_taskdata(self, data):
+ coredata, self.lockedpnmap, self.lockedhashfn = data
+ super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+
+ def dump_sigs(self, dataCache, options):
+ self.dump_lockedsigs()
+ return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
+
+ def get_taskhash(self, fn, task, deps, dataCache):
+ h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
+
+ recipename = dataCache.pkg_fn[fn]
+ self.lockedpnmap[fn] = recipename
+ self.lockedhashfn[fn] = dataCache.hashfn[fn]
+
+ unlocked = False
+ if recipename in self.unlockedrecipes:
+ unlocked = True
+ else:
+ def recipename_from_dep(dep):
+ # The dep entry will look something like
+ # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
+ # ...
+ fn = dep.rsplit('.', 1)[0]
+ return dataCache.pkg_fn[fn]
+
+ # If any unlocked recipe is in the direct dependencies then the
+ # current recipe should be unlocked as well.
+ depnames = [ recipename_from_dep(x) for x in deps ]
+ if any(x in y for y in depnames for x in self.unlockedrecipes):
+ self.unlockedrecipes[recipename] = ''
+ unlocked = True
+
+ if not unlocked and recipename in self.lockedsigs:
+ if task in self.lockedsigs[recipename]:
+ k = fn + "." + task
+ h_locked = self.lockedsigs[recipename][task][0]
+ var = self.lockedsigs[recipename][task][1]
+ self.lockedhashes[k] = h_locked
+ self.taskhash[k] = h_locked
+ #bb.warn("Using %s %s %s" % (recipename, task, h))
+
+ if h != h_locked:
+ self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
+ % (recipename, task, h, h_locked, var))
+
+ return h_locked
+ #bb.warn("%s %s %s" % (recipename, task, h))
+ return h
+
+ def dump_sigtask(self, fn, task, stampbase, runtime):
+ k = fn + "." + task
+ if k in self.lockedhashes:
+ return
+ super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
+
+ def dump_lockedsigs(self, sigfile=None, taskfilter=None):
+ if not sigfile:
+ sigfile = os.getcwd() + "/locked-sigs.inc"
+
+ bb.plain("Writing locked sigs to %s" % sigfile)
+ types = {}
+ for k in self.runtaskdeps:
+ if taskfilter:
+ if not k in taskfilter:
+ continue
+ fn = k.rsplit(".",1)[0]
+ t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
+ t = 't-' + t.replace('_', '-')
+ if t not in types:
+ types[t] = []
+ types[t].append(k)
+
+ with open(sigfile, "w") as f:
+ for t in types:
+ f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
+ types[t].sort()
+ sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
+ for k in sortedk:
+ fn = k.rsplit(".",1)[0]
+ task = k.rsplit(".",1)[1]
+ if k not in self.taskhash:
+ continue
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+ f.write(' "\n')
+ f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(types.keys())))
+
+ def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+ warn_msgs = []
+ error_msgs = []
+ sstate_missing_msgs = []
+
+ for task in range(len(sq_fn)):
+ if task not in ret:
+ for pn in self.lockedsigs:
+ if sq_hash[task] in self.lockedsigs[pn].itervalues():
+ if sq_task[task] == 'do_shared_workdir':
+ continue
+ sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
+ % (pn, sq_task[task], sq_hash[task]))
+
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK", True)
+ if checklevel == 'warn':
+ warn_msgs += self.mismatch_msgs
+ elif checklevel == 'error':
+ error_msgs += self.mismatch_msgs
+
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK", True)
+ if checklevel == 'warn':
+ warn_msgs += sstate_missing_msgs
+ elif checklevel == 'error':
+ error_msgs += sstate_missing_msgs
+
+ if warn_msgs:
+ bb.warn("\n".join(warn_msgs))
+ if error_msgs:
+ bb.fatal("\n".join(error_msgs))
+
+
+# Insert these classes into siggen's namespace so it can see and select them
+bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
+bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+
+
+def find_siginfo(pn, taskname, taskhashlist, d):
+ """ Find signature data files for comparison purposes """
+
+ import fnmatch
+ import glob
+
+ if taskhashlist:
+ hashfiles = {}
+
+ if not taskname:
+ # We have to derive pn and taskname
+ key = pn
+ splitit = key.split('.bb.')
+ taskname = splitit[1]
+ pn = os.path.basename(splitit[0]).split('_')[0]
+ if key.startswith('virtual:native:'):
+ pn = pn + '-native'
+
+ filedates = {}
+
+ # First search in stamps dir
+ localdata = d.createCopy()
+ localdata.setVar('MULTIMACH_TARGET_SYS', '*')
+ localdata.setVar('PN', pn)
+ localdata.setVar('PV', '*')
+ localdata.setVar('PR', '*')
+ localdata.setVar('EXTENDPE', '')
+ stamp = localdata.getVar('STAMP', True)
+ if pn.startswith("gcc-source"):
+ # gcc-source shared workdir is a special case :(
+ stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
+
+ filespec = '%s.%s.sigdata.*' % (stamp, taskname)
+ foundall = False
+ import glob
+ for fullpath in glob.glob(filespec):
+ match = False
+ if taskhashlist:
+ for taskhash in taskhashlist:
+ if fullpath.endswith('.%s' % taskhash):
+ hashfiles[taskhash] = fullpath
+ if len(hashfiles) == len(taskhashlist):
+ foundall = True
+ break
+ else:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except OSError:
+ continue
+
+ if not taskhashlist or (len(filedates) < 2 and not foundall):
+ # That didn't work, look in sstate-cache
+ hashes = taskhashlist or ['*']
+ localdata = bb.data.createCopy(d)
+ for hashval in hashes:
+ localdata.setVar('PACKAGE_ARCH', '*')
+ localdata.setVar('TARGET_VENDOR', '*')
+ localdata.setVar('TARGET_OS', '*')
+ localdata.setVar('PN', pn)
+ localdata.setVar('PV', '*')
+ localdata.setVar('PR', '*')
+ localdata.setVar('BB_TASKHASH', hashval)
+ swspec = localdata.getVar('SSTATE_SWSPEC', True)
+ if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
+ localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
+ elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
+ localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
+ sstatename = taskname[3:]
+ filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
+
+ if hashval != '*':
+ sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
+ else:
+ sstatedir = d.getVar('SSTATE_DIR', True)
+
+ for root, dirs, files in os.walk(sstatedir):
+ for fn in files:
+ fullpath = os.path.join(root, fn)
+ if fnmatch.fnmatch(fullpath, filespec):
+ if taskhashlist:
+ hashfiles[hashval] = fullpath
+ else:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except:
+ continue
+
+ if taskhashlist:
+ return hashfiles
+ else:
+ return filedates
+
+bb.siggen.find_siginfo = find_siginfo
+
+
+def sstate_get_manifest_filename(task, d):
+ """
+ Return the sstate manifest file path for a particular task.
+ Also returns the datastore that can be used to query related variables.
+ """
+ d2 = d.createCopy()
+ extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info', True)
+ if extrainf:
+ d2.setVar("SSTATE_MANMACH", extrainf)
+ return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
diff --git a/import-layers/yocto-poky/meta/lib/oe/terminal.py b/import-layers/yocto-poky/meta/lib/oe/terminal.py
new file mode 100644
index 000000000..634daa903
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/terminal.py
@@ -0,0 +1,275 @@
+import logging
+import oe.classutils
+import shlex
+from bb.process import Popen, ExecutionError
+from distutils.version import LooseVersion
+
+logger = logging.getLogger('BitBake.OE.Terminal')
+
+
+class UnsupportedTerminal(Exception):
+ pass
+
+class NoSupportedTerminals(Exception):
+ pass
+
+
+class Registry(oe.classutils.ClassRegistry):
+ command = None
+
+ def __init__(cls, name, bases, attrs):
+ super(Registry, cls).__init__(name.lower(), bases, attrs)
+
+ @property
+ def implemented(cls):
+ return bool(cls.command)
+
+
+class Terminal(Popen):
+ __metaclass__ = Registry
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ fmt_sh_cmd = self.format_command(sh_cmd, title)
+ try:
+ Popen.__init__(self, fmt_sh_cmd, env=env)
+ except OSError as exc:
+ import errno
+ if exc.errno == errno.ENOENT:
+ raise UnsupportedTerminal(self.name)
+ else:
+ raise
+
+ def format_command(self, sh_cmd, title):
+ fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+ if isinstance(self.command, basestring):
+ return shlex.split(self.command.format(**fmt))
+ else:
+ return [element.format(**fmt) for element in self.command]
+
+class XTerminal(Terminal):
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ Terminal.__init__(self, sh_cmd, title, env, d)
+ if not os.environ.get('DISPLAY'):
+ raise UnsupportedTerminal(self.name)
+
+class Gnome(XTerminal):
+ command = 'gnome-terminal -t "{title}" --disable-factory -x {command}'
+ priority = 2
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ # Recent versions of gnome-terminal does not support non-UTF8 charset:
+ # https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround,
+ # clearing the LC_ALL environment variable so it uses the locale.
+ # Once fixed on the gnome-terminal project, this should be removed.
+ if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
+
+ # Check version
+ vernum = check_terminal_version("gnome-terminal")
+ if vernum and LooseVersion(vernum) >= '3.10':
+ logger.debug(1, 'Gnome-Terminal 3.10 or later does not support --disable-factory')
+ self.command = 'gnome-terminal -t "{title}" -x {command}'
+ XTerminal.__init__(self, sh_cmd, title, env, d)
+
+class Mate(XTerminal):
+ command = 'mate-terminal -t "{title}" -x {command}'
+ priority = 2
+
+class Xfce(XTerminal):
+ command = 'xfce4-terminal -T "{title}" -e "{command}"'
+ priority = 2
+
+class Terminology(XTerminal):
+ command = 'terminology -T="{title}" -e {command}'
+ priority = 2
+
+class Konsole(XTerminal):
+ command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
+ priority = 2
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ # Check version
+ vernum = check_terminal_version("konsole")
+ if vernum and LooseVersion(vernum) < '2.0.0':
+ # Konsole from KDE 3.x
+ self.command = 'konsole -T "{title}" -e {command}'
+ XTerminal.__init__(self, sh_cmd, title, env, d)
+
+class XTerm(XTerminal):
+ command = 'xterm -T "{title}" -e {command}'
+ priority = 1
+
+class Rxvt(XTerminal):
+ command = 'rxvt -T "{title}" -e {command}'
+ priority = 1
+
+class Screen(Terminal):
+ command = 'screen -D -m -t "{title}" -S devshell {command}'
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ s_id = "devshell_%i" % os.getpid()
+ self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
+ Terminal.__init__(self, sh_cmd, title, env, d)
+ msg = 'Screen started. Please connect in another terminal with ' \
+ '"screen -r %s"' % s_id
+ if (d):
+ bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
+ 0.5, 10), d)
+ else:
+ logger.warn(msg)
+
+class TmuxRunning(Terminal):
+ """Open a new pane in the current running tmux window"""
+ name = 'tmux-running'
+ command = 'tmux split-window "{command}"'
+ priority = 2.75
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+ raise UnsupportedTerminal('tmux is not installed')
+
+ if not os.getenv('TMUX'):
+ raise UnsupportedTerminal('tmux is not running')
+
+ if not check_tmux_pane_size('tmux'):
+ raise UnsupportedTerminal('tmux pane too small or tmux < 1.9 version is being used')
+
+ Terminal.__init__(self, sh_cmd, title, env, d)
+
+class TmuxNewWindow(Terminal):
+ """Open a new window in the current running tmux session"""
+ name = 'tmux-new-window'
+ command = 'tmux new-window -n "{title}" "{command}"'
+ priority = 2.70
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+ raise UnsupportedTerminal('tmux is not installed')
+
+ if not os.getenv('TMUX'):
+ raise UnsupportedTerminal('tmux is not running')
+
+ Terminal.__init__(self, sh_cmd, title, env, d)
+
+class Tmux(Terminal):
+ """Start a new tmux session and window"""
+ command = 'tmux new -d -s devshell -n devshell "{command}"'
+ priority = 0.75
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+ raise UnsupportedTerminal('tmux is not installed')
+
+ # TODO: consider using a 'devshell' session shared amongst all
+ # devshells, if it's already there, add a new window to it.
+ window_name = 'devshell-%i' % os.getpid()
+
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ Terminal.__init__(self, sh_cmd, title, env, d)
+
+ attach_cmd = 'tmux att -t {0}'.format(window_name)
+ msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
+ if d:
+ bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
+ else:
+ logger.warn(msg)
+
+class Custom(Terminal):
+ command = 'false' # This is a placeholder
+ priority = 3
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
+ if self.command:
+ if not '{command}' in self.command:
+ self.command += ' {command}'
+ Terminal.__init__(self, sh_cmd, title, env, d)
+ logger.warn('Custom terminal was started.')
+ else:
+ logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
+ raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
+
+
+def prioritized():
+ return Registry.prioritized()
+
+def spawn_preferred(sh_cmd, title=None, env=None, d=None):
+ """Spawn the first supported terminal, by priority"""
+ for terminal in prioritized():
+ try:
+ spawn(terminal.name, sh_cmd, title, env, d)
+ break
+ except UnsupportedTerminal:
+ continue
+ else:
+ raise NoSupportedTerminals()
+
+def spawn(name, sh_cmd, title=None, env=None, d=None):
+ """Spawn the specified terminal, by name"""
+ logger.debug(1, 'Attempting to spawn terminal "%s"', name)
+ try:
+ terminal = Registry.registry[name]
+ except KeyError:
+ raise UnsupportedTerminal(name)
+
+ pipe = terminal(sh_cmd, title, env, d)
+ output = pipe.communicate()[0]
+ if pipe.returncode != 0:
+ raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+def check_tmux_pane_size(tmux):
+ import subprocess as sub
+ # On older tmux versions (<1.9), return false. The reason
+ # is that there is no easy way to get the height of the active panel
+ # on current window without nested formats (available from version 1.9)
+ vernum = check_terminal_version("tmux")
+ if vernum and LooseVersion(vernum) < '1.9':
+ return False
+ try:
+ p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
+ shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
+ out, err = p.communicate()
+ size = int(out.strip())
+ except OSError as exc:
+ import errno
+ if exc.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+
+ return size/2 >= 19
+
+def check_terminal_version(terminalName):
+ import subprocess as sub
+ try:
+ cmdversion = '%s --version' % terminalName
+ if terminalName.startswith('tmux'):
+ cmdversion = '%s -V' % terminalName
+ newenv = os.environ.copy()
+ newenv["LANG"] = "C"
+ p = sub.Popen(['sh', '-c', cmdversion], stdout=sub.PIPE, stderr=sub.PIPE, env=newenv)
+ out, err = p.communicate()
+ ver_info = out.rstrip().split('\n')
+ except OSError as exc:
+ import errno
+ if exc.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ vernum = None
+ for ver in ver_info:
+ if ver.startswith('Konsole'):
+ vernum = ver.split(' ')[-1]
+ if ver.startswith('GNOME Terminal'):
+ vernum = ver.split(' ')[-1]
+ if ver.startswith('tmux'):
+ vernum = ver.split()[-1]
+ return vernum
+
+def distro_name():
+ try:
+ p = Popen(['lsb_release', '-i'])
+ out, err = p.communicate()
+ distro = out.split(':')[1].strip().lower()
+ except:
+ distro = "unknown"
+ return distro
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/__init__.py b/import-layers/yocto-poky/meta/lib/oe/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/tests/__init__.py
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_license.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_license.py
new file mode 100644
index 000000000..c38888618
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/tests/test_license.py
@@ -0,0 +1,68 @@
+import unittest
+import oe.license
+
+class SeenVisitor(oe.license.LicenseVisitor):
+ def __init__(self):
+ self.seen = []
+ oe.license.LicenseVisitor.__init__(self)
+
+ def visit_Str(self, node):
+ self.seen.append(node.s)
+
+class TestSingleLicense(unittest.TestCase):
+ licenses = [
+ "GPLv2",
+ "LGPL-2.0",
+ "Artistic",
+ "MIT",
+ "GPLv3+",
+ "FOO_BAR",
+ ]
+ invalid_licenses = ["GPL/BSD"]
+
+ @staticmethod
+ def parse(licensestr):
+ visitor = SeenVisitor()
+ visitor.visit_string(licensestr)
+ return visitor.seen
+
+ def test_single_licenses(self):
+ for license in self.licenses:
+ licenses = self.parse(license)
+ self.assertListEqual(licenses, [license])
+
+ def test_invalid_licenses(self):
+ for license in self.invalid_licenses:
+ with self.assertRaises(oe.license.InvalidLicense) as cm:
+ self.parse(license)
+ self.assertEqual(cm.exception.license, license)
+
+class TestSimpleCombinations(unittest.TestCase):
+ tests = {
+ "FOO&BAR": ["FOO", "BAR"],
+ "BAZ & MOO": ["BAZ", "MOO"],
+ "ALPHA|BETA": ["ALPHA"],
+ "BAZ&MOO|FOO": ["FOO"],
+ "FOO&BAR|BAZ": ["FOO", "BAR"],
+ }
+ preferred = ["ALPHA", "FOO", "BAR"]
+
+ def test_tests(self):
+ def choose(a, b):
+ if all(lic in self.preferred for lic in b):
+ return b
+ else:
+ return a
+
+ for license, expected in self.tests.items():
+ licenses = oe.license.flattened_licenses(license, choose)
+ self.assertListEqual(licenses, expected)
+
+class TestComplexCombinations(TestSimpleCombinations):
+ tests = {
+ "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
+ "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
+ "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
+ "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
+ }
+ preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_path.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_path.py
new file mode 100644
index 000000000..3d41ce157
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/tests/test_path.py
@@ -0,0 +1,89 @@
+import unittest
+import oe, oe.path
+import tempfile
+import os
+import errno
+import shutil
+
+class TestRealPath(unittest.TestCase):
+ DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
+ FILES = [ "etc/passwd", "b/file" ]
+ LINKS = [
+ ( "bin", "/usr/bin", "/usr/bin" ),
+ ( "binX", "usr/binX", "/usr/binX" ),
+ ( "c", "broken", "/broken" ),
+ ( "etc/passwd-1", "passwd", "/etc/passwd" ),
+ ( "etc/passwd-2", "passwd-1", "/etc/passwd" ),
+ ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ),
+ ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ),
+ ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ),
+ ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ),
+ ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ),
+ ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ),
+ ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ),
+ ( "usr/binX/prog-E", "../sbin/prog-E", None ),
+ ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ),
+ ( "loop", "a/loop", None ),
+ ( "a/loop", "../loop", None ),
+ ( "b/test", "file/foo", "/b/file/foo" ),
+ ]
+
+ LINKS_PHYS = [
+ ( "./", "/", "" ),
+ ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
+ ]
+
+ EXCEPTIONS = [
+ ( "loop", errno.ELOOP ),
+ ( "b/test", errno.ENOENT ),
+ ]
+
+ def __del__(self):
+ try:
+ #os.system("tree -F %s" % self.tmpdir)
+ shutil.rmtree(self.tmpdir)
+ except:
+ pass
+
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
+ self.root = os.path.join(self.tmpdir, "R")
+
+ os.mkdir(os.path.join(self.tmpdir, "_real"))
+ os.symlink("_real", self.root)
+
+ for d in self.DIRS:
+ os.mkdir(os.path.join(self.root, d))
+ for f in self.FILES:
+ file(os.path.join(self.root, f), "w")
+ for l in self.LINKS:
+ os.symlink(l[1], os.path.join(self.root, l[0]))
+
+ def __realpath(self, file, use_physdir, assume_dir = True):
+ return oe.path.realpath(os.path.join(self.root, file), self.root,
+ use_physdir, assume_dir = assume_dir)
+
+ def test_norm(self):
+ for l in self.LINKS:
+ if l[2] == None:
+ continue
+
+ target_p = self.__realpath(l[0], True)
+ target_l = self.__realpath(l[0], False)
+
+ if l[2] != False:
+ self.assertEqual(target_p, target_l)
+ self.assertEqual(l[2], target_p[len(self.root):])
+
+ def test_phys(self):
+ for l in self.LINKS_PHYS:
+ target_p = self.__realpath(l[0], True)
+ target_l = self.__realpath(l[0], False)
+
+ self.assertEqual(l[1], target_p[len(self.root):])
+ self.assertEqual(l[2], target_l[len(self.root):])
+
+ def test_loop(self):
+ for e in self.EXCEPTIONS:
+ self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1],
+ self.__realpath, e[0], False, False)
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_types.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_types.py
new file mode 100644
index 000000000..367cc30e4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/tests/test_types.py
@@ -0,0 +1,62 @@
+import unittest
+from oe.maketype import create, factory
+
+class TestTypes(unittest.TestCase):
+ def assertIsInstance(self, obj, cls):
+ return self.assertTrue(isinstance(obj, cls))
+
+ def assertIsNot(self, obj, other):
+ return self.assertFalse(obj is other)
+
+ def assertFactoryCreated(self, value, type, **flags):
+ cls = factory(type)
+ self.assertIsNot(cls, None)
+ self.assertIsInstance(create(value, type, **flags), cls)
+
+class TestBooleanType(TestTypes):
+ def test_invalid(self):
+ self.assertRaises(ValueError, create, '', 'boolean')
+ self.assertRaises(ValueError, create, 'foo', 'boolean')
+ self.assertRaises(TypeError, create, object(), 'boolean')
+
+ def test_true(self):
+ self.assertTrue(create('y', 'boolean'))
+ self.assertTrue(create('yes', 'boolean'))
+ self.assertTrue(create('1', 'boolean'))
+ self.assertTrue(create('t', 'boolean'))
+ self.assertTrue(create('true', 'boolean'))
+ self.assertTrue(create('TRUE', 'boolean'))
+ self.assertTrue(create('truE', 'boolean'))
+
+ def test_false(self):
+ self.assertFalse(create('n', 'boolean'))
+ self.assertFalse(create('no', 'boolean'))
+ self.assertFalse(create('0', 'boolean'))
+ self.assertFalse(create('f', 'boolean'))
+ self.assertFalse(create('false', 'boolean'))
+ self.assertFalse(create('FALSE', 'boolean'))
+ self.assertFalse(create('faLse', 'boolean'))
+
+ def test_bool_equality(self):
+ self.assertEqual(create('n', 'boolean'), False)
+ self.assertNotEqual(create('n', 'boolean'), True)
+ self.assertEqual(create('y', 'boolean'), True)
+ self.assertNotEqual(create('y', 'boolean'), False)
+
+class TestList(TestTypes):
+ def assertListEqual(self, value, valid, sep=None):
+ obj = create(value, 'list', separator=sep)
+ self.assertEqual(obj, valid)
+ if sep is not None:
+ self.assertEqual(obj.separator, sep)
+ self.assertEqual(str(obj), obj.separator.join(obj))
+
+ def test_list_nosep(self):
+ testlist = ['alpha', 'beta', 'theta']
+ self.assertListEqual('alpha beta theta', testlist)
+ self.assertListEqual('alpha beta\ttheta', testlist)
+ self.assertListEqual('alpha', ['alpha'])
+
+ def test_list_usersep(self):
+ self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
+ self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py
new file mode 100644
index 000000000..5d9ac52e7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py
@@ -0,0 +1,51 @@
+import unittest
+from oe.utils import packages_filter_out_system
+
+class TestPackagesFilterOutSystem(unittest.TestCase):
+ def test_filter(self):
+ """
+ Test that oe.utils.packages_filter_out_system works.
+ """
+ try:
+ import bb
+ except ImportError:
+ self.skipTest("Cannot import bb")
+
+ d = bb.data_smart.DataSmart()
+ d.setVar("PN", "foo")
+
+ d.setVar("PACKAGES", "foo foo-doc foo-dev")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, [])
+
+ d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, ["foo-data"])
+
+ d.setVar("PACKAGES", "foo foo-locale-en-gb")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, [])
+
+ d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, ["foo-data"])
+
+
+class TestTrimVersion(unittest.TestCase):
+ def test_version_exception(self):
+ with self.assertRaises(TypeError):
+ trim_version(None, 2)
+ with self.assertRaises(TypeError):
+ trim_version((1, 2, 3), 2)
+
+ def test_num_exception(self):
+ with self.assertRaises(ValueError):
+ trim_version("1.2.3", 0)
+ with self.assertRaises(ValueError):
+ trim_version("1.2.3", -1)
+
+ def test_valid(self):
+ self.assertEqual(trim_version("1.2.3", 1), "1")
+ self.assertEqual(trim_version("1.2.3", 2), "1.2")
+ self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
+ self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/import-layers/yocto-poky/meta/lib/oe/types.py b/import-layers/yocto-poky/meta/lib/oe/types.py
new file mode 100644
index 000000000..7f47c17d0
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/types.py
@@ -0,0 +1,153 @@
+import errno
+import re
+import os
+
+
+class OEList(list):
+ """OpenEmbedded 'list' type
+
+ Acts as an ordinary list, but is constructed from a string value and a
+ separator (optional), and re-joins itself when converted to a string with
+ str(). Set the variable type flag to 'list' to use this type, and the
+ 'separator' flag may be specified (defaulting to whitespace)."""
+
+ name = "list"
+
+ def __init__(self, value, separator = None):
+ if value is not None:
+ list.__init__(self, value.split(separator))
+ else:
+ list.__init__(self)
+
+ if separator is None:
+ self.separator = " "
+ else:
+ self.separator = separator
+
+ def __str__(self):
+ return self.separator.join(self)
+
+def choice(value, choices):
+ """OpenEmbedded 'choice' type
+
+ Acts as a multiple choice for the user. To use this, set the variable
+ type flag to 'choice', and set the 'choices' flag to a space separated
+ list of valid values."""
+ if not isinstance(value, basestring):
+ raise TypeError("choice accepts a string, not '%s'" % type(value))
+
+ value = value.lower()
+ choices = choices.lower()
+ if value not in choices.split():
+ raise ValueError("Invalid choice '%s'. Valid choices: %s" %
+ (value, choices))
+ return value
+
+class NoMatch(object):
+ """Stub python regex pattern object which never matches anything"""
+ def findall(self, string, flags=0):
+ return None
+
+ def finditer(self, string, flags=0):
+ return None
+
+ def match(self, flags=0):
+ return None
+
+ def search(self, string, flags=0):
+ return None
+
+ def split(self, string, maxsplit=0):
+ return None
+
+ def sub(pattern, repl, string, count=0):
+ return None
+
+ def subn(pattern, repl, string, count=0):
+ return None
+
+NoMatch = NoMatch()
+
+def regex(value, regexflags=None):
+ """OpenEmbedded 'regex' type
+
+ Acts as a regular expression, returning the pre-compiled regular
+ expression pattern object. To use this type, set the variable type flag
+ to 'regex', and optionally, set the 'regexflags' type to a space separated
+ list of the flags to control the regular expression matching (e.g.
+ FOO[regexflags] += 'ignorecase'). See the python documentation on the
+ 're' module for a list of valid flags."""
+
+ flagval = 0
+ if regexflags:
+ for flag in regexflags.split():
+ flag = flag.upper()
+ try:
+ flagval |= getattr(re, flag)
+ except AttributeError:
+ raise ValueError("Invalid regex flag '%s'" % flag)
+
+ if not value:
+ # Let's ensure that the default behavior for an undefined or empty
+ # variable is to match nothing. If the user explicitly wants to match
+ # anything, they can match '.*' instead.
+ return NoMatch
+
+ try:
+ return re.compile(value, flagval)
+ except re.error as exc:
+ raise ValueError("Invalid regex value '%s': %s" %
+ (value, exc.args[0]))
+
+def boolean(value):
+ """OpenEmbedded 'boolean' type
+
+ Valid values for true: 'yes', 'y', 'true', 't', '1'
+ Valid values for false: 'no', 'n', 'false', 'f', '0'
+ """
+
+ if not isinstance(value, basestring):
+ raise TypeError("boolean accepts a string, not '%s'" % type(value))
+
+ value = value.lower()
+ if value in ('yes', 'y', 'true', 't', '1'):
+ return True
+ elif value in ('no', 'n', 'false', 'f', '0'):
+ return False
+ raise ValueError("Invalid boolean value '%s'" % value)
+
+def integer(value, numberbase=10):
+ """OpenEmbedded 'integer' type
+
+ Defaults to base 10, but this can be specified using the optional
+ 'numberbase' flag."""
+
+ return int(value, int(numberbase))
+
+_float = float
+def float(value, fromhex='false'):
+ """OpenEmbedded floating point type
+
+ To use this type, set the type flag to 'float', and optionally set the
+ 'fromhex' flag to a true value (obeying the same rules as for the
+ 'boolean' type) if the value is in base 16 rather than base 10."""
+
+ if boolean(fromhex):
+ return _float.fromhex(value)
+ else:
+ return _float(value)
+
+def path(value, relativeto='', normalize='true', mustexist='false'):
+ value = os.path.join(relativeto, value)
+
+ if boolean(normalize):
+ value = os.path.normpath(value)
+
+ if boolean(mustexist):
+ try:
+ open(value, 'r')
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
+
+ return value
diff --git a/import-layers/yocto-poky/meta/lib/oe/utils.py b/import-layers/yocto-poky/meta/lib/oe/utils.py
new file mode 100644
index 000000000..30d30629f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oe/utils.py
@@ -0,0 +1,306 @@
+try:
+ # Python 2
+ import commands as cmdstatus
+except ImportError:
+ # Python 3
+ import subprocess as cmdstatus
+
+def read_file(filename):
+ try:
+ f = open( filename, "r" )
+ except IOError as reason:
+ return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
+ else:
+ data = f.read().strip()
+ f.close()
+ return data
+ return None
+
+def ifelse(condition, iftrue = True, iffalse = False):
+ if condition:
+ return iftrue
+ else:
+ return iffalse
+
+def conditional(variable, checkvalue, truevalue, falsevalue, d):
+ if d.getVar(variable,1) == checkvalue:
+ return truevalue
+ else:
+ return falsevalue
+
+def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ if float(d.getVar(variable,1)) <= float(checkvalue):
+ return truevalue
+ else:
+ return falsevalue
+
+def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
+ if result <= 0:
+ return truevalue
+ else:
+ return falsevalue
+
+def both_contain(variable1, variable2, checkvalue, d):
+ val1 = d.getVar(variable1, True)
+ val2 = d.getVar(variable2, True)
+ val1 = set(val1.split())
+ val2 = set(val2.split())
+ if isinstance(checkvalue, basestring):
+ checkvalue = set(checkvalue.split())
+ else:
+ checkvalue = set(checkvalue)
+ if checkvalue.issubset(val1) and checkvalue.issubset(val2):
+ return " ".join(checkvalue)
+ else:
+ return ""
+
+def set_intersect(variable1, variable2, d):
+ """
+ Expand both variables, interpret them as lists of strings, and return the
+ intersection as a flattened string.
+
+ For example:
+ s1 = "a b c"
+ s2 = "b c d"
+ s3 = set_intersect(s1, s2)
+ => s3 = "b c"
+ """
+ val1 = set(d.getVar(variable1, True).split())
+ val2 = set(d.getVar(variable2, True).split())
+ return " ".join(val1 & val2)
+
+def prune_suffix(var, suffixes, d):
+ # See if var ends with any of the suffixes listed and
+ # remove it if found
+ for suffix in suffixes:
+ if var.endswith(suffix):
+ var = var.replace(suffix, "")
+
+ prefix = d.getVar("MLPREFIX", True)
+ if prefix and var.startswith(prefix):
+ var = var.replace(prefix, "")
+
+ return var
+
+def str_filter(f, str, d):
+ from re import match
+ return " ".join(filter(lambda x: match(f, x, 0), str.split()))
+
+def str_filter_out(f, str, d):
+ from re import match
+ return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
+
+def param_bool(cfg, field, dflt = None):
+ """Lookup <field> in <cfg> map and convert it to a boolean; take
+ <dflt> when this <field> does not exist"""
+ value = cfg.get(field, dflt)
+ strvalue = str(value).lower()
+ if strvalue in ('yes', 'y', 'true', 't', '1'):
+ return True
+ elif strvalue in ('no', 'n', 'false', 'f', '0'):
+ return False
+ raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
+
+def inherits(d, *classes):
+ """Return True if the metadata inherits any of the specified classes"""
+ return any(bb.data.inherits_class(cls, d) for cls in classes)
+
+def features_backfill(var,d):
+ # This construct allows the addition of new features to variable specified
+ # as var
+ # Example for var = "DISTRO_FEATURES"
+ # This construct allows the addition of new features to DISTRO_FEATURES
+ # that if not present would disable existing functionality, without
+ # disturbing distributions that have already set DISTRO_FEATURES.
+ # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
+ # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
+ features = (d.getVar(var, True) or "").split()
+ backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
+ considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
+
+ addfeatures = []
+ for feature in backfill:
+ if feature not in features and feature not in considered:
+ addfeatures.append(feature)
+
+ if addfeatures:
+ d.appendVar(var, " " + " ".join(addfeatures))
+
+
+def packages_filter_out_system(d):
+ """
+ Return a list of packages from PACKAGES with the "system" packages such as
+ PN-dbg PN-doc PN-locale-eb-gb removed.
+ """
+ pn = d.getVar('PN', True)
+ blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev'))
+ localepkg = pn + "-locale-"
+ pkgs = []
+
+ for pkg in d.getVar('PACKAGES', True).split():
+ if pkg not in blacklist and localepkg not in pkg:
+ pkgs.append(pkg)
+ return pkgs
+
+def getstatusoutput(cmd):
+ return cmdstatus.getstatusoutput(cmd)
+
+
+def trim_version(version, num_parts=2):
+ """
+ Return just the first <num_parts> of <version>, split by periods. For
+ example, trim_version("1.2.3", 2) will return "1.2".
+ """
+ if type(version) is not str:
+ raise TypeError("Version should be a string")
+ if num_parts < 1:
+ raise ValueError("Cannot split to parts < 1")
+
+ parts = version.split(".")
+ trimmed = ".".join(parts[:num_parts])
+ return trimmed
+
+def cpu_count():
+ import multiprocessing
+ return multiprocessing.cpu_count()
+
+def execute_pre_post_process(d, cmds):
+ if cmds is None:
+ return
+
+ for cmd in cmds.strip().split(';'):
+ cmd = cmd.strip()
+ if cmd != '':
+ bb.note("Executing %s ..." % cmd)
+ bb.build.exec_func(cmd, d)
+
+def multiprocess_exec(commands, function):
+ import signal
+ import multiprocessing
+
+ if not commands:
+ return []
+
+ def init_worker():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ nproc = min(multiprocessing.cpu_count(), len(commands))
+ pool = bb.utils.multiprocessingpool(nproc, init_worker)
+ imap = pool.imap(function, commands)
+
+ try:
+ res = list(imap)
+ pool.close()
+ pool.join()
+ results = []
+ for result in res:
+ if result is not None:
+ results.append(result)
+ return results
+
+ except KeyboardInterrupt:
+ pool.terminate()
+ pool.join()
+ raise
+
+def squashspaces(string):
+ import re
+ return re.sub("\s+", " ", string).strip()
+
+def format_pkg_list(pkg_dict, ret_format=None):
+ output = []
+
+ if ret_format == "arch":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
+ elif ret_format == "file":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
+ elif ret_format == "ver":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
+ elif ret_format == "deps":
+ for pkg in sorted(pkg_dict):
+ for dep in pkg_dict[pkg]["deps"]:
+ output.append("%s|%s" % (pkg, dep))
+ else:
+ for pkg in sorted(pkg_dict):
+ output.append(pkg)
+
+ return '\n'.join(output)
+
+#
+# Python 2.7 doesn't have threaded pools (just multiprocessing)
+# so implement a version here
+#
+
+from Queue import Queue
+from threading import Thread
+
+class ThreadedWorker(Thread):
+ """Thread executing tasks from a given tasks queue"""
+ def __init__(self, tasks, worker_init, worker_end):
+ Thread.__init__(self)
+ self.tasks = tasks
+ self.daemon = True
+
+ self.worker_init = worker_init
+ self.worker_end = worker_end
+
+ def run(self):
+ from Queue import Empty
+
+ if self.worker_init is not None:
+ self.worker_init(self)
+
+ while True:
+ try:
+ func, args, kargs = self.tasks.get(block=False)
+ except Empty:
+ if self.worker_end is not None:
+ self.worker_end(self)
+ break
+
+ try:
+ func(self, *args, **kargs)
+ except Exception, e:
+ print e
+ finally:
+ self.tasks.task_done()
+
+class ThreadedPool:
+ """Pool of threads consuming tasks from a queue"""
+ def __init__(self, num_workers, num_tasks, worker_init=None,
+ worker_end=None):
+ self.tasks = Queue(num_tasks)
+ self.workers = []
+
+ for _ in range(num_workers):
+ worker = ThreadedWorker(self.tasks, worker_init, worker_end)
+ self.workers.append(worker)
+
+ def start(self):
+ for worker in self.workers:
+ worker.start()
+
+ def add_task(self, func, *args, **kargs):
+ """Add a task to the queue"""
+ self.tasks.put((func, args, kargs))
+
+ def wait_completion(self):
+ """Wait for completion of all the tasks in the queue"""
+ self.tasks.join()
+ for worker in self.workers:
+ worker.join()
+
+def write_ld_so_conf(d):
+ # Some utils like prelink may not have the correct target library paths
+ # so write an ld.so.conf to help them
+ ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
+ if os.path.exists(ldsoconf):
+ bb.utils.remove(ldsoconf)
+ bb.utils.mkdirhier(os.path.dirname(ldsoconf))
+ with open(ldsoconf, "w") as f:
+ f.write(d.getVar("base_libdir", True) + '\n')
+ f.write(d.getVar("libdir", True) + '\n')
OpenPOWER on IntegriCloud