summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/meta/classes')
-rw-r--r--import-layers/yocto-poky/meta/classes/allarch.bbclass47
-rw-r--r--import-layers/yocto-poky/meta/classes/archiver.bbclass395
-rw-r--r--import-layers/yocto-poky/meta/classes/autotools-brokensep.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/autotools.bbclass322
-rw-r--r--import-layers/yocto-poky/meta/classes/base.bbclass667
-rw-r--r--import-layers/yocto-poky/meta/classes/bash-completion.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/bin_package.bbclass36
-rw-r--r--import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass29
-rw-r--r--import-layers/yocto-poky/meta/classes/binconfig.bbclass63
-rw-r--r--import-layers/yocto-poky/meta/classes/blacklist.bbclass45
-rw-r--r--import-layers/yocto-poky/meta/classes/bluetooth.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/bugzilla.bbclass187
-rw-r--r--import-layers/yocto-poky/meta/classes/buildhistory.bbclass856
-rw-r--r--import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass36
-rw-r--r--import-layers/yocto-poky/meta/classes/buildstats.bbclass185
-rw-r--r--import-layers/yocto-poky/meta/classes/ccache.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/chrpath.bbclass117
-rw-r--r--import-layers/yocto-poky/meta/classes/clutter.bbclass22
-rw-r--r--import-layers/yocto-poky/meta/classes/cmake.bbclass137
-rw-r--r--import-layers/yocto-poky/meta/classes/cml1.bbclass76
-rw-r--r--import-layers/yocto-poky/meta/classes/compress_doc.bbclass260
-rw-r--r--import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass64
-rw-r--r--import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass79
-rw-r--r--import-layers/yocto-poky/meta/classes/core-image.bbclass71
-rw-r--r--import-layers/yocto-poky/meta/classes/cpan-base.bbclass40
-rw-r--r--import-layers/yocto-poky/meta/classes/cpan.bbclass55
-rw-r--r--import-layers/yocto-poky/meta/classes/cpan_build.bbclass40
-rw-r--r--import-layers/yocto-poky/meta/classes/cross-canadian.bbclass182
-rw-r--r--import-layers/yocto-poky/meta/classes/cross.bbclass71
-rw-r--r--import-layers/yocto-poky/meta/classes/crosssdk.bbclass36
-rw-r--r--import-layers/yocto-poky/meta/classes/debian.bbclass141
-rw-r--r--import-layers/yocto-poky/meta/classes/deploy.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/devshell.bbclass156
-rw-r--r--import-layers/yocto-poky/meta/classes/distro_features_check.bbclass37
-rw-r--r--import-layers/yocto-poky/meta/classes/distrodata.bbclass480
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-base.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-tools.bbclass77
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils.bbclass86
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3-base.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3.bbclass100
-rw-r--r--import-layers/yocto-poky/meta/classes/externalsrc.bbclass154
-rw-r--r--import-layers/yocto-poky/meta/classes/extrausers.bbclass65
-rw-r--r--import-layers/yocto-poky/meta/classes/fontcache.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/fs-uuid.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/gconf.bbclass70
-rw-r--r--import-layers/yocto-poky/meta/classes/gettext.bbclass19
-rw-r--r--import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass37
-rw-r--r--import-layers/yocto-poky/meta/classes/gnome.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/gnomebase.bbclass30
-rw-r--r--import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass37
-rw-r--r--import-layers/yocto-poky/meta/classes/grub-efi.bbclass156
-rw-r--r--import-layers/yocto-poky/meta/classes/gsettings.bbclass37
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-doc.bbclass25
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass64
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass87
-rw-r--r--import-layers/yocto-poky/meta/classes/gummiboot.bbclass119
-rw-r--r--import-layers/yocto-poky/meta/classes/gzipnative.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/icecc.bbclass333
-rw-r--r--import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass78
-rw-r--r--import-layers/yocto-poky/meta/classes/image-live.bbclass284
-rw-r--r--import-layers/yocto-poky/meta/classes/image-mklibs.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/image-prelink.bbclass54
-rw-r--r--import-layers/yocto-poky/meta/classes/image-swab.bbclass94
-rw-r--r--import-layers/yocto-poky/meta/classes/image-vm.bbclass175
-rw-r--r--import-layers/yocto-poky/meta/classes/image.bbclass547
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types.bbclass299
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/insane.bbclass1319
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-arch.bbclass60
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass235
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-grub.bbclass91
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass203
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass20
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass36
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass376
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel.bbclass512
-rw-r--r--import-layers/yocto-poky/meta/classes/kernelsrc.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/lib_package.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-common.bbclass43
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-package.bbclass391
-rw-r--r--import-layers/yocto-poky/meta/classes/license.bbclass664
-rw-r--r--import-layers/yocto-poky/meta/classes/linux-kernel-base.bbclass41
-rw-r--r--import-layers/yocto-poky/meta/classes/linuxloader.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/live-vm-common.bbclass58
-rw-r--r--import-layers/yocto-poky/meta/classes/logging.bbclass101
-rw-r--r--import-layers/yocto-poky/meta/classes/meta.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/metadata_scm.bbclass83
-rw-r--r--import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass46
-rw-r--r--import-layers/yocto-poky/meta/classes/mime.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/mirrors.bbclass70
-rw-r--r--import-layers/yocto-poky/meta/classes/module-base.bbclass27
-rw-r--r--import-layers/yocto-poky/meta/classes/module.bbclass34
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib.bbclass148
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_global.bbclass180
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_header.bbclass54
-rw-r--r--import-layers/yocto-poky/meta/classes/native.bbclass179
-rw-r--r--import-layers/yocto-poky/meta/classes/nativesdk.bbclass97
-rw-r--r--import-layers/yocto-poky/meta/classes/nopackages.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/npm.bbclass49
-rw-r--r--import-layers/yocto-poky/meta/classes/oelint.bbclass84
-rw-r--r--import-layers/yocto-poky/meta/classes/own-mirrors.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/package.bbclass2131
-rw-r--r--import-layers/yocto-poky/meta/classes/package_deb.bbclass359
-rw-r--r--import-layers/yocto-poky/meta/classes/package_ipk.bbclass294
-rw-r--r--import-layers/yocto-poky/meta/classes/package_rpm.bbclass774
-rw-r--r--import-layers/yocto-poky/meta/classes/package_tar.bbclass69
-rw-r--r--import-layers/yocto-poky/meta/classes/packagedata.bbclass34
-rw-r--r--import-layers/yocto-poky/meta/classes/packagegroup.bbclass54
-rw-r--r--import-layers/yocto-poky/meta/classes/patch.bbclass187
-rw-r--r--import-layers/yocto-poky/meta/classes/perlnative.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/pixbufcache.bbclass67
-rw-r--r--import-layers/yocto-poky/meta/classes/pkgconfig.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass267
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass487
-rw-r--r--import-layers/yocto-poky/meta/classes/prexport.bbclass59
-rw-r--r--import-layers/yocto-poky/meta/classes/primport.bbclass21
-rw-r--r--import-layers/yocto-poky/meta/classes/ptest-gnome.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/ptest.bbclass67
-rw-r--r--import-layers/yocto-poky/meta/classes/python-dir.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/python3native.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/pythonnative.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/qemu.bbclass59
-rw-r--r--import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass166
-rw-r--r--import-layers/yocto-poky/meta/classes/relocatable.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/remove-libtool.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/report-error.bbclass95
-rw-r--r--import-layers/yocto-poky/meta/classes/rm_work.bbclass128
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass277
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass38
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass38
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass46
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass36
-rw-r--r--import-layers/yocto-poky/meta/classes/sanity.bbclass1015
-rw-r--r--import-layers/yocto-poky/meta/classes/scons.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/sdl.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/setuptools.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/setuptools3.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_ipk.bbclass52
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass43
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_rpm.bbclass53
-rw-r--r--import-layers/yocto-poky/meta/classes/sip.bbclass61
-rw-r--r--import-layers/yocto-poky/meta/classes/siteconfig.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/siteinfo.bbclass175
-rw-r--r--import-layers/yocto-poky/meta/classes/spdx.bbclass365
-rw-r--r--import-layers/yocto-poky/meta/classes/sstate.bbclass993
-rw-r--r--import-layers/yocto-poky/meta/classes/staging.bbclass221
-rw-r--r--import-layers/yocto-poky/meta/classes/syslinux.bbclass192
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd.bbclass207
-rw-r--r--import-layers/yocto-poky/meta/classes/terminal.bbclass96
-rw-r--r--import-layers/yocto-poky/meta/classes/testimage-auto.bbclass23
-rw-r--r--import-layers/yocto-poky/meta/classes/testimage.bbclass263
-rw-r--r--import-layers/yocto-poky/meta/classes/testsdk.bbclass142
-rw-r--r--import-layers/yocto-poky/meta/classes/texinfo.bbclass15
-rw-r--r--import-layers/yocto-poky/meta/classes/tinderclient.bbclass368
-rw-r--r--import-layers/yocto-poky/meta/classes/toaster.bbclass397
-rw-r--r--import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass157
-rw-r--r--import-layers/yocto-poky/meta/classes/typecheck.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-config.bbclass49
-rw-r--r--import-layers/yocto-poky/meta/classes/uninative.bbclass140
-rw-r--r--import-layers/yocto-poky/meta/classes/update-alternatives.bbclass267
-rw-r--r--import-layers/yocto-poky/meta/classes/update-rc.d.bbclass133
-rw-r--r--import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass302
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd.bbclass252
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd_base.bbclass151
-rw-r--r--import-layers/yocto-poky/meta/classes/utility-tasks.bbclass66
-rw-r--r--import-layers/yocto-poky/meta/classes/utils.bbclass380
-rw-r--r--import-layers/yocto-poky/meta/classes/vala.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/waf.bbclass38
174 files changed, 26076 insertions, 0 deletions
diff --git a/import-layers/yocto-poky/meta/classes/allarch.bbclass b/import-layers/yocto-poky/meta/classes/allarch.bbclass
new file mode 100644
index 000000000..208cde6e5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/allarch.bbclass
@@ -0,0 +1,47 @@
+#
+# This class is used for architecture independent recipes/data files (usually scripts)
+#
+
+# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
+# point elsewhere after these changes.
+STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
+
+PACKAGE_ARCH = "all"
+
+python () {
+ # Allow this class to be included but overridden - only set
+ # the values if we're still "all" package arch.
+ if d.getVar("PACKAGE_ARCH", True) == "all":
+ # No need for virtual/libc or a cross compiler
+ d.setVar("INHIBIT_DEFAULT_DEPS","1")
+
+ # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
+ # naming anyway
+ d.setVar("TARGET_ARCH", "allarch")
+ d.setVar("TARGET_OS", "linux")
+ d.setVar("TARGET_CC_ARCH", "none")
+ d.setVar("TARGET_LD_ARCH", "none")
+ d.setVar("TARGET_AS_ARCH", "none")
+ d.setVar("TARGET_FPU", "")
+ d.setVar("TARGET_PREFIX", "")
+ d.setVar("PACKAGE_EXTRA_ARCHS", "")
+ d.setVar("SDK_ARCH", "none")
+ d.setVar("SDK_CC_ARCH", "none")
+ d.setVar("TARGET_CPPFLAGS", "none")
+ d.setVar("TARGET_CFLAGS", "none")
+ d.setVar("TARGET_CXXFLAGS", "none")
+ d.setVar("TARGET_LDFLAGS", "none")
+
+ # Avoid this being unnecessarily different due to nuances of
+ # the target machine that aren't important for "all" arch
+ # packages.
+ d.setVar("LDFLAGS", "")
+
+ # No need to do shared library processing or debug symbol handling
+ d.setVar("EXCLUDE_FROM_SHLIBS", "1")
+ d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
+ d.setVar("INHIBIT_PACKAGE_STRIP", "1")
+ elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
+ bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True))
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/archiver.bbclass b/import-layers/yocto-poky/meta/classes/archiver.bbclass
new file mode 100644
index 000000000..2f3b278fb
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/archiver.bbclass
@@ -0,0 +1,395 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This bbclass is used for creating archive for:
+# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
+# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
+# 3) configured source: ARCHIVER_MODE[src] = "configured"
+# 4) The patches between do_unpack and do_patch:
+# ARCHIVER_MODE[diff] = "1"
+# And you can set the one that you'd like to exclude from the diff:
+# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
+# 5) The environment data, similar to 'bitbake -e recipe':
+# ARCHIVER_MODE[dumpdata] = "1"
+# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
+# 7) Whether output the .src.rpm package:
+# ARCHIVER_MODE[srpm] = "1"
+# 8) Filter the license, the recipe whose license in
+# COPYLEFT_LICENSE_INCLUDE will be included, and in
+# COPYLEFT_LICENSE_EXCLUDE will be excluded.
+# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
+# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
+# 9) The recipe type that will be archived:
+# COPYLEFT_RECIPE_TYPES = 'target'
+#
+
+# Don't filter the license by default
+COPYLEFT_LICENSE_INCLUDE ?= ''
+COPYLEFT_LICENSE_EXCLUDE ?= ''
+# Create archive for all the recipe types
+COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
+inherit copyleft_filter
+
+ARCHIVER_MODE[srpm] ?= "0"
+ARCHIVER_MODE[src] ?= "patched"
+ARCHIVER_MODE[diff] ?= "0"
+ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
+ARCHIVER_MODE[dumpdata] ?= "0"
+ARCHIVER_MODE[recipe] ?= "0"
+
+DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
+ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
+ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
+
+do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
+do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
+do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
+do_deploy_archives[dirs] = "${WORKDIR}"
+do_deploy_all_archives[dirs] = "${WORKDIR}"
+
+# This is a convenience for the shell script to use it
+
+
+python () {
+ pn = d.getVar('PN', True)
+ assume_provided = (d.getVar("ASSUME_PROVIDED", True) or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES", True).split():
+ if p != pn:
+ pn = p
+ break
+
+ included, reason = copyleft_should_include(d)
+ if not included:
+ bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
+ return
+ else:
+ bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN', True) in ['gcc', 'libgcc'] \
+ and not pn.startswith('gcc-source'):
+ bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
+ return
+
+ ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
+ ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
+ ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
+
+ if ar_src == "original":
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
+ elif ar_src == "patched":
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
+ elif ar_src == "configured":
+ # We can't use "addtask do_ar_configured after do_configure" since it
+ # will cause the deptask of do_populate_sysroot to run not matter what
+ # archives we need, so we add the depends here.
+
+ # There is a corner case with "gcc-source-${PV}" recipes, they don't have
+ # the "do_configure" task, so we need to use "do_preconfigure"
+ if pn.startswith("gcc-source-"):
+ d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
+ else:
+ d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
+
+ elif ar_src:
+ bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
+
+ if ar_dumpdata == "1":
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn)
+
+ if ar_recipe == "1":
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
+
+ # Output the srpm package
+ ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True)
+ if ar_srpm == "1":
+ if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm':
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
+ if ar_dumpdata == "1":
+ d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
+ if ar_recipe == "1":
+ d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn)
+ if ar_src == "original":
+ d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn)
+ elif ar_src == "patched":
+ d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
+ elif ar_src == "configured":
+ d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
+}
+
+# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
+# Files in SRC_URI are copied directly, anything that's a directory
+# (e.g. git repositories) is "unpacked" and then put into a tarball.
+python do_ar_original() {
+
+ import shutil, tarfile, tempfile
+
+ if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original":
+ return
+
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ bb.note('Archiving the original source...')
+ fetch = bb.fetch2.Fetch([], d)
+ for url in fetch.urls:
+ local = fetch.localpath(url).rstrip("/");
+ if os.path.isfile(local):
+ shutil.copy(local, ar_outdir)
+ elif os.path.isdir(local):
+ tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
+ fetch.unpack(tmpdir, (url,))
+ create_tarball(d, tmpdir + '/.', '', ar_outdir)
+
+ # Emit patch series files for 'original'
+ bb.note('Writing patch series files...')
+ for patch in src_patches(d):
+ _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_'))
+ else:
+ series = os.path.join(ar_outdir, 'series')
+
+ with open(series, 'a') as s:
+ s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
+}
+
+python do_ar_patched() {
+
+ if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched':
+ return
+
+ # Get the ARCHIVER_OUTDIR before we reset the WORKDIR
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
+ bb.note('Archiving the patched source...')
+ d.setVar('WORKDIR', ar_workdir)
+ create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
+}
+
+python do_ar_configured() {
+ import shutil
+
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
+ bb.note('Archiving the configured source...')
+ pn = d.getVar('PN', True)
+ # "gcc-source-${PV}" recipes don't have "do_configure"
+ # task, so we need to run "do_preconfigure" instead
+ if pn.startswith("gcc-source-"):
+ d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+ bb.build.exec_func('do_preconfigure', d)
+
+ # The libtool-native's do_configure will remove the
+ # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
+ # do_configure, we archive the already configured ${S} to
+ # instead of.
+ elif pn != 'libtool-native':
+ # Change the WORKDIR to make do_configure run in another dir.
+ d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+ if bb.data.inherits_class('kernel-yocto', d):
+ bb.build.exec_func('do_kernel_configme', d)
+ if bb.data.inherits_class('cmake', d):
+ bb.build.exec_func('do_generate_toolchain_file', d)
+ prefuncs = d.getVarFlag('do_configure', 'prefuncs', True)
+ for func in (prefuncs or '').split():
+ if func != "sysroot_cleansstate":
+ bb.build.exec_func(func, d)
+ bb.build.exec_func('do_configure', d)
+ postfuncs = d.getVarFlag('do_configure', 'postfuncs', True)
+ for func in (postfuncs or '').split():
+ if func != "do_qa_configure":
+ bb.build.exec_func(func, d)
+ srcdir = d.getVar('S', True)
+ builddir = d.getVar('B', True)
+ if srcdir != builddir:
+ if os.path.exists(builddir):
+ oe.path.copytree(builddir, os.path.join(srcdir, \
+ 'build.%s.ar_configured' % d.getVar('PF', True)))
+ create_tarball(d, srcdir, 'configured', ar_outdir)
+}
+
+def create_tarball(d, srcdir, suffix, ar_outdir):
+ """
+ create the tarball from srcdir
+ """
+ import tarfile
+
+ # Make sure we are only creating a single tarball for gcc sources
+ if (d.getVar('SRC_URI', True) == ""):
+ return
+
+ bb.utils.mkdirhier(ar_outdir)
+ if suffix:
+ filename = '%s-%s.tar.gz' % (d.getVar('PF', True), suffix)
+ else:
+ filename = '%s.tar.gz' % d.getVar('PF', True)
+ tarname = os.path.join(ar_outdir, filename)
+
+ srcdir = srcdir.rstrip('/')
+ dirname = os.path.dirname(srcdir)
+ basename = os.path.basename(srcdir)
+ os.chdir(dirname)
+ bb.note('Creating %s' % tarname)
+ tar = tarfile.open(tarname, 'w:gz')
+ tar.add(basename)
+ tar.close()
+
+# creating .diff.gz between source.orig and source
+def create_diff_gz(d, src_orig, src, ar_outdir):
+
+ import subprocess
+
+ if not os.path.isdir(src) or not os.path.isdir(src_orig):
+ return
+
+ # The diff --exclude can't exclude the file with path, so we copy
+ # the patched source, and remove the files that we'd like to
+ # exclude.
+ src_patched = src + '.patched'
+ oe.path.copyhardlinktree(src, src_patched)
+ for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split():
+ bb.utils.remove(os.path.join(src_orig, i), recurse=True)
+ bb.utils.remove(os.path.join(src_patched, i), recurse=True)
+
+ dirname = os.path.dirname(src)
+ basename = os.path.basename(src)
+ os.chdir(dirname)
+ out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True))
+ diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
+ subprocess.call(diff_cmd, shell=True)
+ bb.utils.remove(src_patched, recurse=True)
+
+# Run do_unpack and do_patch
+python do_unpack_and_patch() {
+ if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \
+ [ 'patched', 'configured'] and \
+ d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
+ return
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
+
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not bb.data.inherits_class('kernel-yocto', d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', ar_workdir)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B', True))
+
+ bb.build.exec_func('do_unpack', d)
+
+ # Save the original source for creating the patches
+ if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
+ src = d.getVar('S', True).rstrip('/')
+ src_orig = '%s.orig' % src
+ oe.path.copytree(src, src_orig)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not ((d.getVar('SRC_URI', True) == "" or bb.data.inherits_class('kernel-yocto', d))):
+ bb.build.exec_func('do_patch', d)
+
+ # Create the patches
+ if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
+ bb.note('Creating diff gz...')
+ create_diff_gz(d, src_orig, src, ar_outdir)
+ bb.utils.remove(src_orig, recurse=True)
+}
+
+python do_ar_recipe () {
+ """
+ archive the recipe, including .bb and .inc.
+ """
+ import re
+ import shutil
+
+ require_re = re.compile( r"require\s+(.+)" )
+ include_re = re.compile( r"include\s+(.+)" )
+ bbfile = d.getVar('FILE', True)
+ outdir = os.path.join(d.getVar('WORKDIR', True), \
+ '%s-recipe' % d.getVar('PF', True))
+ bb.utils.mkdirhier(outdir)
+ shutil.copy(bbfile, outdir)
+
+ pn = d.getVar('PN', True)
+ bbappend_files = d.getVar('BBINCLUDED', True).split()
+ # If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend
+ # Files like aa1.bbappend or aa1_1.1.bbappend must be excluded.
+ bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" %pn)
+ bbappend_re1 = re.compile( r".*/%s\.bbappend$" %pn)
+ for file in bbappend_files:
+ if bbappend_re.match(file) or bbappend_re1.match(file):
+ shutil.copy(file, outdir)
+
+ dirname = os.path.dirname(bbfile)
+ bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
+ f = open(bbfile, 'r')
+ for line in f.readlines():
+ incfile = None
+ if require_re.match(line):
+ incfile = require_re.match(line).group(1)
+ elif include_re.match(line):
+ incfile = include_re.match(line).group(1)
+ if incfile:
+ incfile = bb.data.expand(incfile, d)
+ incfile = bb.utils.which(bbpath, incfile)
+ if incfile:
+ shutil.copy(incfile, outdir)
+
+ create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True))
+ bb.utils.remove(outdir, recurse=True)
+}
+
+python do_dumpdata () {
+ """
+ dump environment data to ${PF}-showdata.dump
+ """
+
+ dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
+ '%s-showdata.dump' % d.getVar('PF', True))
+ bb.note('Dumping metadata into %s' % dumpfile)
+ with open(dumpfile, "w") as f:
+ # emit variables and shell functions
+ bb.data.emit_env(f, d, True)
+ # emit the metadata which isn't valid shell
+ for e in d.keys():
+ if d.getVarFlag(e, "python", False):
+ f.write("\npython %s () {\n%s}\n" % (e, d.getVar(e, False)))
+}
+
+SSTATETASKS += "do_deploy_archives"
+do_deploy_archives () {
+ echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
+}
+python do_deploy_archives_setscene () {
+ sstate_setscene(d)
+}
+do_deploy_archives[dirs] = "${ARCHIVER_TOPDIR}"
+do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
+do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
+addtask do_deploy_archives_setscene
+
+addtask do_ar_original after do_unpack
+addtask do_unpack_and_patch after do_patch
+addtask do_ar_patched after do_unpack_and_patch
+addtask do_ar_configured after do_unpack_and_patch
+addtask do_dumpdata
+addtask do_ar_recipe
+addtask do_deploy_archives before do_build
+
+addtask do_deploy_all_archives after do_deploy_archives
+do_deploy_all_archives[recrdeptask] = "do_deploy_archives"
+do_deploy_all_archives[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_deploy_all_archives() {
+ :
+}
+
+python () {
+ # Add tasks in the correct order, specifically for linux-yocto to avoid race condition
+ if bb.data.inherits_class('kernel-yocto', d):
+ bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
+}
diff --git a/import-layers/yocto-poky/meta/classes/autotools-brokensep.bbclass b/import-layers/yocto-poky/meta/classes/autotools-brokensep.bbclass
new file mode 100644
index 000000000..71cf97a39
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/autotools-brokensep.bbclass
@@ -0,0 +1,5 @@
+# Autotools class for recipes where separate build dir doesn't work
+# Ideally we should fix software so it does work. Standard autotools supports
+# this.
+inherit autotools
+B = "${S}"
diff --git a/import-layers/yocto-poky/meta/classes/autotools.bbclass b/import-layers/yocto-poky/meta/classes/autotools.bbclass
new file mode 100644
index 000000000..6649f5df7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/autotools.bbclass
@@ -0,0 +1,322 @@
+def autotools_dep_prepend(d):
+ if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
+ return ''
+
+ pn = d.getVar('PN', True)
+ deps = ''
+
+ if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
+ return deps
+ deps += 'autoconf-native automake-native '
+
+ if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
+ deps += 'libtool-native '
+ if not bb.data.inherits_class('native', d) \
+ and not bb.data.inherits_class('nativesdk', d) \
+ and not bb.data.inherits_class('cross', d) \
+ and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
+ deps += 'libtool-cross '
+
+ return deps + 'gnu-config-native '
+
+EXTRA_OEMAKE = ""
+
+DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
+
+inherit siteinfo
+
+# Space separated list of shell scripts with variables defined to supply test
+# results for autoconf tests we cannot run at build time.
+export CONFIG_SITE = "${@siteinfo_get_files(d, False)}"
+
+acpaths = "default"
+EXTRA_AUTORECONF = "--exclude=autopoint"
+
+export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
+
+# When building tools for use at build-time it's recommended for the build
+# system to use these variables when cross-compiling.
+# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
+export CPP_FOR_BUILD = "${BUILD_CPP}"
+export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
+
+export CC_FOR_BUILD = "${BUILD_CC}"
+export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
+
+export CXX_FOR_BUILD = "${BUILD_CXX}"
+export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
+
+export LD_FOR_BUILD = "${BUILD_LD}"
+export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
+
+def append_libtool_sysroot(d):
+ # Only supply libtool sysroot option for non-native packages
+ if not bb.data.inherits_class('native', d):
+ return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
+ return ""
+
+CONFIGUREOPTS = " --build=${BUILD_SYS} \
+ --host=${HOST_SYS} \
+ --target=${TARGET_SYS} \
+ --prefix=${prefix} \
+ --exec_prefix=${exec_prefix} \
+ --bindir=${bindir} \
+ --sbindir=${sbindir} \
+ --libexecdir=${libexecdir} \
+ --datadir=${datadir} \
+ --sysconfdir=${sysconfdir} \
+ --sharedstatedir=${sharedstatedir} \
+ --localstatedir=${localstatedir} \
+ --libdir=${libdir} \
+ --includedir=${includedir} \
+ --oldincludedir=${oldincludedir} \
+ --infodir=${infodir} \
+ --mandir=${mandir} \
+ --disable-silent-rules \
+ ${CONFIGUREOPT_DEPTRACK} \
+ ${@append_libtool_sysroot(d)}"
+CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
+
+AUTOTOOLS_SCRIPT_PATH ?= "${S}"
+CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
+
+AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
+
+oe_runconf () {
+ # Use relative path to avoid buildpaths in files
+ cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
+ cfgscript=`python -c "import os; print os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.')"`/$cfgscript_name
+ if [ -x "$cfgscript" ] ; then
+ bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
+ if ! ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
+ bbnote "The following config.log files may provide further information."
+ bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
+ bbfatal_log "configure failed"
+ fi
+ else
+ bbfatal "no configure script found at $cfgscript"
+ fi
+}
+
+CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
+
+autotools_preconfigure() {
+ if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
+ if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
+ if [ "${S}" != "${B}" ]; then
+ echo "Previously configured separate build directory detected, cleaning ${B}"
+ rm -rf ${B}
+ mkdir -p ${B}
+ else
+ # At least remove the .la files since automake won't automatically
+ # regenerate them even if CFLAGS/LDFLAGS are different
+ cd ${S}
+ if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
+ oe_runmake clean
+ fi
+ find ${S} -ignore_readdir_race -name \*.la -delete
+ fi
+ fi
+ fi
+}
+
+autotools_postconfigure(){
+ if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ mkdir -p `dirname ${CONFIGURESTAMPFILE}`
+ echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
+ fi
+}
+
+EXTRACONFFUNCS ??= ""
+
+do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
+do_configure[postfuncs] += "autotools_postconfigure"
+
+ACLOCALDIR = "${B}/aclocal-copy"
+
+python autotools_copy_aclocals () {
+ s = d.getVar("AUTOTOOLS_SCRIPT_PATH", True)
+ if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
+ if not d.getVar("AUTOTOOLS_COPYACLOCAL", False):
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ #bb.warn(str(taskdepdata))
+ pn = d.getVar("PN", True)
+ aclocaldir = d.getVar("ACLOCALDIR", True)
+ oe.path.remove(aclocaldir)
+ bb.utils.mkdirhier(aclocaldir)
+ start = None
+ configuredeps = []
+ # Detect bitbake -b usage
+ # Everything but quilt-native would have dependencies
+ nodeps = (pn != "quilt-native")
+
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == "do_configure" and data[0] == pn:
+ start = dep
+ if not nodeps and start:
+ break
+ if nodeps and data[0] != pn:
+ nodeps = False
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to find configure tasks which are either from <target> -> <target>
+ # or <native> -> <native> but not <target> -> <native> unless they're direct
+ # dependencies. This mirrors what would get restored from sstate.
+ done = [start]
+ next = [start]
+ while next:
+ new = []
+ for dep in next:
+ data = taskdepdata[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ if (not data[0].endswith("-native")) and taskdepdata[datadep][0].endswith("-native") and dep != start:
+ continue
+ done.append(datadep)
+ new.append(datadep)
+ if taskdepdata[datadep][1] == "do_configure":
+ configuredeps.append(taskdepdata[datadep][0])
+ next = new
+
+ #configuredeps2 = []
+ #for dep in taskdepdata:
+ # data = taskdepdata[dep]
+ # if data[1] == "do_configure" and data[0] != pn:
+ # configuredeps2.append(data[0])
+ #configuredeps.sort()
+ #configuredeps2.sort()
+ #bb.warn(str(configuredeps))
+ #bb.warn(str(configuredeps2))
+
+ cp = []
+ if nodeps:
+ bb.warn("autotools: Unable to find task dependencies, -b being used? Pulling in all m4 files")
+ for l in [d.expand("${STAGING_DATADIR_NATIVE}/aclocal/"), d.expand("${STAGING_DATADIR}/aclocal/")]:
+ cp.extend(os.path.join(l, f) for f in os.listdir(l))
+
+ for c in configuredeps:
+ if c.endswith("-native"):
+ manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
+ elif c.startswith("nativesdk-"):
+ manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
+ elif "-cross-" in c or "-crosssdk" in c:
+ continue
+ else:
+ manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
+ try:
+ f = open(manifest, "r")
+ for l in f:
+ if "/aclocal/" in l and l.strip().endswith(".m4"):
+ cp.append(l.strip())
+ elif "config_site.d/" in l:
+ cp.append(l.strip())
+ except:
+ bb.warn("%s not found" % manifest)
+
+ for c in cp:
+ t = os.path.join(aclocaldir, os.path.basename(c))
+ if not os.path.exists(t):
+ os.symlink(c, t)
+
+ d.setVar("CONFIG_SITE", siteinfo_get_files(d, False))
+}
+autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+
+autotools_do_configure() {
+ # WARNING: gross hack follows:
+ # An autotools built package generally needs these scripts, however only
+ # automake or libtoolize actually install the current versions of them.
+ # This is a problem in builds that do not use libtool or automake, in the case
+ # where we -need- the latest version of these scripts. e.g. running a build
+ # for a package whose autotools are old, on an x86_64 machine, which the old
+ # config.sub does not support. Work around this by installing them manually
+ # regardless.
+ for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
+ rm -f `dirname $ac`/configure
+ done
+ if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
+ olddir=`pwd`
+ cd ${AUTOTOOLS_SCRIPT_PATH}
+ ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
+ if [ x"${acpaths}" = xdefault ]; then
+ acpaths=
+ for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
+ grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
+ acpaths="$acpaths -I $i"
+ done
+ else
+ acpaths="${acpaths}"
+ fi
+ AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
+ automake --version
+ echo "AUTOV is $AUTOV"
+ if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
+ ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
+ fi
+ # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
+ # like it was auto-generated. Work around this by blowing it away
+ # by hand, unless the package specifically asked not to run aclocal.
+ if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
+ rm -f aclocal.m4
+ fi
+ if [ -e configure.in ]; then
+ CONFIGURE_AC=configure.in
+ else
+ CONFIGURE_AC=configure.ac
+ fi
+ if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
+ : do nothing -- we still have an old unmodified configure.ac
+ else
+ bbnote Executing glib-gettextize --force --copy
+ echo "no" | glib-gettextize --force --copy
+ fi
+ elif grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ # We'd call gettextize here if it wasn't so broken...
+ cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
+ if [ -d ${S}/po/ ]; then
+ cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
+ if [ ! -e ${S}/po/remove-potcdate.sin ]; then
+ cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
+ fi
+ fi
+ for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
+ for j in `find ${S} -ignore_readdir_race -name $i | grep -v aclocal-copy`; do
+ rm $j
+ done
+ done
+ fi
+ mkdir -p m4
+ if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
+ bbnote Executing intltoolize --copy --force --automake
+ intltoolize --copy --force --automake
+ fi
+ bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
+ ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
+ cd $olddir
+ fi
+ if [ -e ${CONFIGURE_SCRIPT} ]; then
+ oe_runconf
+ else
+ bbnote "nothing to configure"
+ fi
+}
+
+autotools_do_install() {
+ oe_runmake 'DESTDIR=${D}' install
+ # Info dir listing isn't interesting at this point so remove it if it exists.
+ if [ -e "${D}${infodir}/dir" ]; then
+ rm -f ${D}${infodir}/dir
+ fi
+}
+
+inherit siteconfig
+
+EXPORT_FUNCTIONS do_configure do_install
+
+B = "${WORKDIR}/build"
diff --git a/import-layers/yocto-poky/meta/classes/base.bbclass b/import-layers/yocto-poky/meta/classes/base.bbclass
new file mode 100644
index 000000000..a7ca3a667
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/base.bbclass
@@ -0,0 +1,667 @@
+BB_DEFAULT_TASK ?= "build"
+CLASSOVERRIDE ?= "class-target"
+
+inherit patch
+inherit staging
+
+inherit mirrors
+inherit utils
+inherit utility-tasks
+inherit metadata_scm
+inherit logging
+
+OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
+OE_IMPORTS[type] = "list"
+
+def oe_import(d):
+ import sys
+
+ bbpath = d.getVar("BBPATH", True).split(":")
+ sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
+
+ def inject(name, value):
+ """Make a python object accessible from the metadata"""
+ if hasattr(bb.utils, "_context"):
+ bb.utils._context[name] = value
+ else:
+ __builtins__[name] = value
+
+ import oe.data
+ for toimport in oe.data.typed_value("OE_IMPORTS", d):
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
+
+ return ""
+
+# We need the oe module name space early (before INHERITs get added)
+OE_IMPORTED := "${@oe_import(d)}"
+
+def lsb_distro_identifier(d):
+ adjust = d.getVar('LSB_DISTRO_ADJUST', True)
+ adjust_func = None
+ if adjust:
+ try:
+ adjust_func = globals()[adjust]
+ except KeyError:
+ pass
+ return oe.lsb.distro_identifier(adjust_func)
+
+die() {
+ bbfatal_log "$*"
+}
+
+oe_runmake_call() {
+ bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
+ ${MAKE} ${EXTRA_OEMAKE} "$@"
+}
+
+oe_runmake() {
+ oe_runmake_call "$@" || die "oe_runmake failed"
+}
+
+
+def base_dep_prepend(d):
+ #
+ # Ideally this will check a flag so we will operate properly in
+ # the case where host == build == target, for now we don't work in
+ # that case though.
+ #
+
+ deps = ""
+ # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
+ # we need that built is the responsibility of the patch function / class, not
+ # the application.
+ if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
+ deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
+ return deps
+
+BASEDEPENDS = "${@base_dep_prepend(d)}"
+
+DEPENDS_prepend="${BASEDEPENDS} "
+
+FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
+# THISDIR only works properly with imediate expansion as it has to run
+# in the context of the location its used (:=)
+THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
+
+def extra_path_elements(d):
+ path = ""
+ elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
+ for e in elements:
+ path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
+ return path
+
+PATH_prepend = "${@extra_path_elements(d)}"
+
+def get_lic_checksum_file_list(d):
+ filelist = []
+ lic_files = d.getVar("LIC_FILES_CHKSUM", True) or ''
+ tmpdir = d.getVar("TMPDIR", True)
+
+ urls = lic_files.split()
+ for url in urls:
+ # We only care about items that are absolute paths since
+ # any others should be covered by SRC_URI.
+ try:
+ path = bb.fetch.decodeurl(url)[2]
+ if path[0] == '/':
+ if path.startswith(tmpdir):
+ continue
+ filelist.append(path + ":" + str(os.path.exists(path)))
+ except bb.fetch.MalformedUrl:
+ raise bb.build.FuncFailed(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+ return " ".join(filelist)
+
+addtask fetch
+do_fetch[dirs] = "${DL_DIR}"
+do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
+do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
+do_fetch[vardeps] += "SRCREV"
+python base_do_fetch() {
+
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ fetcher.download()
+ except bb.fetch2.BBFetchException as e:
+ raise bb.build.FuncFailed(e)
+}
+
+addtask unpack after do_fetch
+do_unpack[dirs] = "${WORKDIR}"
+python base_do_unpack() {
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
+
+ rootdir = d.getVar('WORKDIR', True)
+
+ # Ensure that we cleanup ${S}/patches
+ # TODO: Investigate if we can remove
+ # the entire ${S} in this case.
+ s_dir = d.getVar('S', True)
+ p_dir = os.path.join(s_dir, 'patches')
+ bb.utils.remove(p_dir, True)
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ fetcher.unpack(rootdir)
+ except bb.fetch2.BBFetchException as e:
+ raise bb.build.FuncFailed(e)
+}
+
+def pkgarch_mapping(d):
+ # Compatibility mappings of TUNE_PKGARCH (opt in)
+ if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
+ if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
+ d.setVar("TUNE_PKGARCH", "armv7a")
+
+def get_layers_branch_rev(d):
+ layers = (d.getVar("BBLAYERS", True) or "").split()
+ layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
+ base_get_metadata_git_branch(i, None).strip(), \
+ base_get_metadata_git_revision(i, None)) \
+ for i in layers]
+ i = len(layers_branch_rev)-1
+ p1 = layers_branch_rev[i].find("=")
+ s1 = layers_branch_rev[i][p1:]
+ while i > 0:
+ p2 = layers_branch_rev[i-1].find("=")
+ s2= layers_branch_rev[i-1][p2:]
+ if s1 == s2:
+ layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
+ i -= 1
+ else:
+ i -= 1
+ p1 = layers_branch_rev[i].find("=")
+ s1= layers_branch_rev[i][p1:]
+ return layers_branch_rev
+
+
+BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
+BUILDCFG_FUNCS[type] = "list"
+
+def buildcfg_vars(d):
+ statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
+ for var in statusvars:
+ value = d.getVar(var, True)
+ if value is not None:
+ yield '%-17s = "%s"' % (var, value)
+
+def buildcfg_neededvars(d):
+ needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
+ pesteruser = []
+ for v in needed_vars:
+ val = d.getVar(v, True)
+ if not val or val == 'INVALID':
+ pesteruser.append(v)
+
+ if pesteruser:
+ bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
+
+addhandler base_eventhandler
+base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
+python base_eventhandler() {
+ import bb.runqueue
+
+ if isinstance(e, bb.event.ConfigParsed):
+ if not e.data.getVar("NATIVELSBSTRING", False):
+ e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
+ e.data.setVar('BB_VERSION', bb.__version__)
+ pkgarch_mapping(e.data)
+ oe.utils.features_backfill("DISTRO_FEATURES", e.data)
+ oe.utils.features_backfill("MACHINE_FEATURES", e.data)
+
+ if isinstance(e, bb.event.BuildStarted):
+ localdata = bb.data.createCopy(e.data)
+ bb.data.update_data(localdata)
+ statuslines = []
+ for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
+ g = globals()
+ if func not in g:
+ bb.warn("Build configuration function '%s' does not exist" % func)
+ else:
+ flines = g[func](localdata)
+ if flines:
+ statuslines.extend(flines)
+
+ statusheader = e.data.getVar('BUILDCFG_HEADER', True)
+ if statusheader:
+ bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
+
+ # This code is to silence warnings where the SDK variables overwrite the
+ # target ones and we'd see dulpicate key names overwriting each other
+ # for various PREFERRED_PROVIDERS
+ if isinstance(e, bb.event.RecipePreFinalise):
+ if e.data.getVar("TARGET_PREFIX", True) == e.data.getVar("SDK_PREFIX", True):
+ e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
+ e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
+ e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
+ e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
+ e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
+
+ if isinstance(e, bb.runqueue.sceneQueueComplete):
+ completions = e.data.expand("${STAGING_DIR}/sstatecompletions")
+ if os.path.exists(completions):
+ cmds = set()
+ with open(completions, "r") as f:
+ cmds = set(f)
+ e.data.setVar("completion_function", "\n".join(cmds))
+ e.data.setVarFlag("completion_function", "func", "1")
+ bb.debug(1, "Executing SceneQueue Completion commands: %s" % "\n".join(cmds))
+ bb.build.exec_func("completion_function", e.data)
+ os.remove(completions)
+
+ if isinstance(e, bb.event.RecipeParsed):
+ #
+ # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
+ # skip parsing for all the other providers which will mean they get uninstalled from the
+ # sysroot since they're now "unreachable". This makes switching virtual/kernel work in
+ # particular.
+ #
+ pn = d.getVar('PN', True)
+ source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
+ if not source_mirror_fetch:
+ provs = (d.getVar("PROVIDES", True) or "").split()
+ multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ for p in provs:
+ if p.startswith("virtual/") and p not in multiwhitelist:
+ profprov = d.getVar("PREFERRED_PROVIDER_" + p, True)
+ if profprov and pn != profprov:
+ raise bb.parse.SkipPackage("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
+}
+
+CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
+CLEANBROKEN = "0"
+
+addtask configure after do_patch
+do_configure[dirs] = "${B}"
+do_configure[deptask] = "do_populate_sysroot"
+base_do_configure() {
+ if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
+ if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
+ cd ${B}
+ if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
+ oe_runmake clean
+ fi
+ find ${B} -ignore_readdir_race -name \*.la -delete
+ fi
+ fi
+ if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ mkdir -p `dirname ${CONFIGURESTAMPFILE}`
+ echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
+ fi
+}
+
+addtask compile after do_configure
+do_compile[dirs] = "${B}"
+base_do_compile() {
+ if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
+ oe_runmake || die "make failed"
+ else
+ bbnote "nothing to compile"
+ fi
+}
+
+addtask install after do_compile
+do_install[dirs] = "${D} ${B}"
+# Remove and re-create ${D} so that is it guaranteed to be empty
+do_install[cleandirs] = "${D}"
+
+base_do_install() {
+ :
+}
+
+base_do_package() {
+ :
+}
+
+addtask build after do_populate_sysroot
+do_build[noexec] = "1"
+do_build[recrdeptask] += "do_deploy"
+do_build () {
+ :
+}
+
+def set_packagetriplet(d):
+ archs = []
+ tos = []
+ tvs = []
+
+ archs.append(d.getVar("PACKAGE_ARCHS", True).split())
+ tos.append(d.getVar("TARGET_OS", True))
+ tvs.append(d.getVar("TARGET_VENDOR", True))
+
+ def settriplet(d, varname, archs, tos, tvs):
+ triplets = []
+ for i in range(len(archs)):
+ for arch in archs[i]:
+ triplets.append(arch + tvs[i] + "-" + tos[i])
+ triplets.reverse()
+ d.setVar(varname, " ".join(triplets))
+
+ settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
+
+ variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ for item in variants.split():
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
+ localdata.setVar("OVERRIDES", overrides)
+ bb.data.update_data(localdata)
+
+ archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
+ tos.append(localdata.getVar("TARGET_OS", True))
+ tvs.append(localdata.getVar("TARGET_VENDOR", True))
+
+ settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
+
+python () {
+ import string, re
+
+ # Handle PACKAGECONFIG
+ #
+ # These take the form:
+ #
+ # PACKAGECONFIG ??= "<default options>"
+ # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
+ pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
+ if pkgconfigflags:
+ pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
+ pn = d.getVar("PN", True)
+
+ mlprefix = d.getVar("MLPREFIX", True)
+
+ def expandFilter(appends, extension, prefix):
+ appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
+ newappends = []
+ for a in appends:
+ if a.endswith("-native") or ("-cross-" in a):
+ newappends.append(a)
+ elif a.startswith("virtual/"):
+ subs = a.split("/", 1)[1]
+ if subs.startswith(prefix):
+ newappends.append(a + extension)
+ else:
+ newappends.append("virtual/" + prefix + subs + extension)
+ else:
+ if a.startswith(prefix):
+ newappends.append(a + extension)
+ else:
+ newappends.append(prefix + a + extension)
+ return newappends
+
+ def appendVar(varname, appends):
+ if not appends:
+ return
+ if varname.find("DEPENDS") != -1:
+ if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
+ appends = expandFilter(appends, "", "nativesdk-")
+ elif bb.data.inherits_class('native', d):
+ appends = expandFilter(appends, "-native", "")
+ elif mlprefix:
+ appends = expandFilter(appends, "", mlprefix)
+ varname = d.expand(varname)
+ d.appendVar(varname, " " + " ".join(appends))
+
+ extradeps = []
+ extrardeps = []
+ extraconf = []
+ for flag, flagval in sorted(pkgconfigflags.items()):
+ items = flagval.split(",")
+ num = len(items)
+ if num > 4:
+ bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend can be specified!"
+ % (d.getVar('PN', True), flag))
+
+ if flag in pkgconfig:
+ if num >= 3 and items[2]:
+ extradeps.append(items[2])
+ if num >= 4 and items[3]:
+ extrardeps.append(items[3])
+ if num >= 1 and items[0]:
+ extraconf.append(items[0])
+ elif num >= 2 and items[1]:
+ extraconf.append(items[1])
+ appendVar('DEPENDS', extradeps)
+ appendVar('RDEPENDS_${PN}', extrardeps)
+ appendVar('PACKAGECONFIG_CONFARGS', extraconf)
+
+ # TODO: once all recipes/classes abusing EXTRA_OECONF
+ # to get PACKAGECONFIG options are fixed to use PACKAGECONFIG_CONFARGS
+ # move this appendVar to autotools.bbclass.
+ if not bb.data.inherits_class('cmake', d):
+ appendVar('EXTRA_OECONF', extraconf)
+
+ pn = d.getVar('PN', True)
+ license = d.getVar('LICENSE', True)
+ if license == "INVALID":
+ bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
+
+ if bb.data.inherits_class('license', d):
+ check_license_format(d)
+ unmatched_license_flag = check_license_flags(d)
+ if unmatched_license_flag:
+ bb.debug(1, "Skipping %s because it has a restricted license not"
+ " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
+ raise bb.parse.SkipPackage("because it has a restricted license not"
+ " whitelisted in LICENSE_FLAGS_WHITELIST")
+
+ # If we're building a target package we need to use fakeroot (pseudo)
+ # in order to capture permissions, owners, groups and special files
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.setVarFlag('do_unpack', 'umask', '022')
+ d.setVarFlag('do_configure', 'umask', '022')
+ d.setVarFlag('do_compile', 'umask', '022')
+ d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_install', 'fakeroot', '1')
+ d.setVarFlag('do_install', 'umask', '022')
+ d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_package', 'fakeroot', '1')
+ d.setVarFlag('do_package', 'umask', '022')
+ d.setVarFlag('do_package_setscene', 'fakeroot', '1')
+ d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_devshell', 'fakeroot', '1')
+ d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+
+ need_machine = d.getVar('COMPATIBLE_MACHINE', True)
+ if need_machine:
+ import re
+ compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
+ for m in compat_machines:
+ if re.match(need_machine, m):
+ break
+ else:
+ raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
+
+ source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
+ if not source_mirror_fetch:
+ need_host = d.getVar('COMPATIBLE_HOST', True)
+ if need_host:
+ import re
+ this_host = d.getVar('HOST_SYS', True)
+ if not re.match(need_host, this_host):
+ raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
+
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+
+ check_license = False if pn.startswith("nativesdk-") else True
+ for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
+ "-crosssdk-${SDK_ARCH}", "-crosssdk-initial-${SDK_ARCH}",
+ "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
+ if pn.endswith(d.expand(t)):
+ check_license = False
+ if pn.startswith("gcc-source-"):
+ check_license = False
+
+ if check_license and bad_licenses:
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+
+ whitelist = []
+ incompatwl = []
+ for lic in bad_licenses:
+ spdx_license = return_spdx(d, lic)
+ for w in ["LGPLv2_WHITELIST_", "WHITELIST_"]:
+ whitelist.extend((d.getVar(w + lic, True) or "").split())
+ if spdx_license:
+ whitelist.extend((d.getVar(w + spdx_license, True) or "").split())
+ '''
+ We need to track what we are whitelisting and why. If pn is
+ incompatible we need to be able to note that the image that
+ is created may infact contain incompatible licenses despite
+ INCOMPATIBLE_LICENSE being set.
+ '''
+ incompatwl.extend((d.getVar(w + lic, True) or "").split())
+ if spdx_license:
+ incompatwl.extend((d.getVar(w + spdx_license, True) or "").split())
+
+ if not pn in whitelist:
+ pkgs = d.getVar('PACKAGES', True).split()
+ skipped_pkgs = []
+ unskipped_pkgs = []
+ for pkg in pkgs:
+ if incompatible_license(d, bad_licenses, pkg):
+ skipped_pkgs.append(pkg)
+ else:
+ unskipped_pkgs.append(pkg)
+ all_skipped = skipped_pkgs and not unskipped_pkgs
+ if unskipped_pkgs:
+ for pkg in skipped_pkgs:
+ bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + license)
+ mlprefix = d.getVar('MLPREFIX', True)
+ d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
+ for pkg in unskipped_pkgs:
+ bb.debug(1, "INCLUDING the package " + pkg)
+ elif all_skipped or incompatible_license(d, bad_licenses):
+ bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
+ raise bb.parse.SkipPackage("incompatible with license %s" % license)
+ elif pn in whitelist:
+ if pn in incompatwl:
+ bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
+
+ needsrcrev = False
+ srcuri = d.getVar('SRC_URI', True)
+ for uri in srcuri.split():
+ (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
+
+ # HTTP/FTP use the wget fetcher
+ if scheme in ("http", "https", "ftp"):
+ d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
+
+ # Svn packages should DEPEND on subversion-native
+ if scheme == "svn":
+ needsrcrev = True
+ d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
+
+ # Git packages should DEPEND on git-native
+ elif scheme in ("git", "gitsm"):
+ needsrcrev = True
+ d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
+
+ # Mercurial packages should DEPEND on mercurial-native
+ elif scheme == "hg":
+ needsrcrev = True
+ d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
+
+ # OSC packages should DEPEND on osc-native
+ elif scheme == "osc":
+ d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
+
+ elif scheme == "npm":
+ d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
+
+ # *.lz4 should DEPEND on lz4-native for unpacking
+ if path.endswith('.lz4'):
+ d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
+
+ # *.lz should DEPEND on lzip-native for unpacking
+ elif path.endswith('.lz'):
+ d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
+
+ # *.xz should DEPEND on xz-native for unpacking
+ elif path.endswith('.xz'):
+ d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+
+ # .zip should DEPEND on unzip-native for unpacking
+ elif path.endswith('.zip'):
+ d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
+
+ # file is needed by rpm2cpio.sh
+ elif path.endswith('.src.rpm'):
+ d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
+
+ if needsrcrev:
+ d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
+
+ set_packagetriplet(d)
+
+ # 'multimachine' handling
+ mach_arch = d.getVar('MACHINE_ARCH', True)
+ pkg_arch = d.getVar('PACKAGE_ARCH', True)
+
+ if (pkg_arch == mach_arch):
+ # Already machine specific - nothing further to do
+ return
+
+ #
+ # We always try to scan SRC_URI for urls with machine overrides
+ # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
+ #
+ override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
+ if override != '0':
+ paths = []
+ fpaths = (d.getVar('FILESPATH', True) or '').split(':')
+ machine = d.getVar('MACHINE', True)
+ for p in fpaths:
+ if os.path.basename(p) == machine and os.path.isdir(p):
+ paths.append(p)
+
+ if len(paths) != 0:
+ for s in srcuri.split():
+ if not s.startswith("file://"):
+ continue
+ fetcher = bb.fetch2.Fetch([s], d)
+ local = fetcher.localpath(s)
+ for mp in paths:
+ if local.startswith(mp):
+ #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
+ d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
+ return
+
+ packages = d.getVar('PACKAGES', True).split()
+ for pkg in packages:
+ pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
+
+ # We could look for != PACKAGE_ARCH here but how to choose
+ # if multiple differences are present?
+ # Look through PACKAGE_ARCHS for the priority order?
+ if pkgarch and pkgarch == mach_arch:
+ d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
+ bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
+}
+
+addtask cleansstate after do_clean
+python do_cleansstate() {
+ sstate_clean_cachefiles(d)
+}
+addtask cleanall after do_cleansstate
+do_cleansstate[nostamp] = "1"
+
+python do_cleanall() {
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ fetcher.clean()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
+}
+do_cleanall[nostamp] = "1"
+
+
+EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/import-layers/yocto-poky/meta/classes/bash-completion.bbclass b/import-layers/yocto-poky/meta/classes/bash-completion.bbclass
new file mode 100644
index 000000000..74a878edf
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/bash-completion.bbclass
@@ -0,0 +1,5 @@
+PACKAGES += "${PN}-bash-completion"
+
+FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
+
+RDEPENDS_${PN}-bash-completion = "bash-completion"
diff --git a/import-layers/yocto-poky/meta/classes/bin_package.bbclass b/import-layers/yocto-poky/meta/classes/bin_package.bbclass
new file mode 100644
index 000000000..a52b75be5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/bin_package.bbclass
@@ -0,0 +1,36 @@
+#
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Common variable and task for the binary package recipe.
+# Basic principle:
+# * The files have been unpacked to ${S} by base.bbclass
+# * Skip do_configure and do_compile
+# * Use do_install to install the files to ${D}
+#
+# Note:
+# The "subdir" parameter in the SRC_URI is useful when the input package
+# is rpm, ipk, deb and so on, for example:
+#
+# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
+#
+# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
+# they would be in ${WORKDIR}.
+#
+
+# Skip the unwanted steps
+do_configure[noexec] = "1"
+do_compile[noexec] = "1"
+
+# Install the files to ${D}
+bin_package_do_install () {
+ # Do it carefully
+ [ -d "${S}" ] || exit 1
+ cd ${S} || exit 1
+ tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
+ | tar --no-same-owner -xpf - -C ${D}
+}
+
+FILES_${PN} = "/"
+
+EXPORT_FUNCTIONS do_install
diff --git a/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass b/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass
new file mode 100644
index 000000000..602a669aa
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass
@@ -0,0 +1,29 @@
+#
+# Class to disable binconfig files instead of installing them
+#
+
+# The list of scripts which should be disabled.
+BINCONFIG ?= ""
+
+FILES_${PN}-dev += "${bindir}/*-config"
+
+do_install_append () {
+ for x in ${BINCONFIG}; do
+ # Make the disabled script emit invalid parameters for those configure
+ # scripts which call it without checking the return code.
+ echo "#!/bin/sh" > ${D}$x
+ echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
+ echo "echo '--should-not-have-used-$x'" >> ${D}$x
+ echo "exit 1" >> ${D}$x
+ done
+}
+
+SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
+
+binconfig_disabled_sysroot_preprocess () {
+ for x in ${BINCONFIG}; do
+ configname=`basename $x`
+ install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/binconfig.bbclass b/import-layers/yocto-poky/meta/classes/binconfig.bbclass
new file mode 100644
index 000000000..cbc417360
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/binconfig.bbclass
@@ -0,0 +1,63 @@
+FILES_${PN}-dev += "${bindir}/*-config"
+
+# The namespaces can clash here hence the two step replace
+def get_binconfig_mangle(d):
+ s = "-e ''"
+ if not bb.data.inherits_class('native', d):
+ optional_quote = r"\(\"\?\)"
+ s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
+ s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
+ s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
+ s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
+ s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
+ s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
+ s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
+ s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
+ s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
+ s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
+ s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
+ s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
+ s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
+ s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
+ s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
+ s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
+ if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
+ s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
+
+ return s
+
+BINCONFIG_GLOB ?= "*-config"
+
+PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
+
+binconfig_package_preprocess () {
+ for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
+ sed -i \
+ -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
+ -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
+ -e 's:${STAGING_INCDIR}:${includedir}:g;' \
+ -e 's:${STAGING_DATADIR}:${datadir}:' \
+ -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
+ $config
+ done
+ for lafile in `find ${PKGD} -name "*.la"` ; do
+ sed -i \
+ -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
+ -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
+ -e 's:${STAGING_INCDIR}:${includedir}:g;' \
+ -e 's:${STAGING_DATADIR}:${datadir}:' \
+ -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
+ $lafile
+ done
+}
+
+SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
+
+binconfig_sysroot_preprocess () {
+ for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do
+ configname=`basename $config`
+ install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
+ chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/blacklist.bbclass b/import-layers/yocto-poky/meta/classes/blacklist.bbclass
new file mode 100644
index 000000000..a0141a82c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/blacklist.bbclass
@@ -0,0 +1,45 @@
+# anonymous support class from originally from angstrom
+#
+# To use the blacklist, a distribution should include this
+# class in the INHERIT_DISTRO
+#
+# No longer use ANGSTROM_BLACKLIST, instead use a table of
+# recipes in PNBLACKLIST
+#
+# Features:
+#
+# * To add a package to the blacklist, set:
+# PNBLACKLIST[pn] = "message"
+#
+
+# Cope with PNBLACKLIST flags for multilib case
+addhandler blacklist_multilib_eventhandler
+blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
+python blacklist_multilib_eventhandler() {
+ multilibs = e.data.getVar('MULTILIBS', True)
+ if not multilibs:
+ return
+
+ # this block has been copied from base.bbclass so keep it in sync
+ prefixes = []
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ prefixes.append(eext[1])
+
+ blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
+ for pkg, reason in blacklists.items():
+ if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
+ continue
+ for p in prefixes:
+ newpkg = p + "-" + pkg
+ if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
+ e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
+}
+
+python () {
+ blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
+
+ if blacklist:
+ raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
+}
diff --git a/import-layers/yocto-poky/meta/classes/bluetooth.bbclass b/import-layers/yocto-poky/meta/classes/bluetooth.bbclass
new file mode 100644
index 000000000..f88b4ae5b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/bluetooth.bbclass
@@ -0,0 +1,14 @@
+# Avoid code duplication in bluetooth-dependent recipes.
+
+# Define a variable that expands to the recipe (package) providing core
+# bluetooth support on the platform:
+# "" if bluetooth is not in DISTRO_FEATURES
+# else "bluez5" if bluez5 is in DISTRO_FEATURES
+# else "bluez4"
+
+# Use this with:
+# inherit bluetooth
+# PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}
+# PACKAGECONFIG[bluez4] = "--enable-bluez4,--disable-bluez4,bluez4"
+
+BLUEZ ?= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', bb.utils.contains('DISTRO_FEATURES', 'bluez5', 'bluez5', 'bluez4', d), '', d)}"
diff --git a/import-layers/yocto-poky/meta/classes/bugzilla.bbclass b/import-layers/yocto-poky/meta/classes/bugzilla.bbclass
new file mode 100644
index 000000000..3fc895642
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/bugzilla.bbclass
@@ -0,0 +1,187 @@
+#
+# Small event handler to automatically open URLs and file
+# bug reports at a bugzilla of your choiche
+# it uses XML-RPC interface, so you must have it enabled
+#
+# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
+# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
+# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
+# BUGZILLA_VERSION - version against which to report new bugs
+#
+
+def bugzilla_find_bug_report(debug_file, server, args, bugname):
+ args['summary'] = bugname
+ bugs = server.Bug.search(args)
+ if len(bugs['bugs']) == 0:
+ print >> debug_file, "Bugs not found"
+ return (False,None)
+ else: # silently pick the first result
+ print >> debug_file, "Result of bug search is "
+ print >> debug_file, bugs
+ status = bugs['bugs'][0]['status']
+ id = bugs['bugs'][0]['id']
+ return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
+
+def bugzilla_file_bug(debug_file, server, args, name, text, version):
+ args['summary'] = name
+ args['comment'] = text
+ args['version'] = version
+ args['op_sys'] = 'Linux'
+ args['platform'] = 'Other'
+ args['severity'] = 'normal'
+ args['priority'] = 'Normal'
+ try:
+ return server.Bug.create(args)['id']
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return None
+
+def bugzilla_reopen_bug(debug_file, server, args, bug_number):
+ args['ids'] = [bug_number]
+ args['status'] = "CONFIRMED"
+ try:
+ server.Bug.update(args)
+ return True
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return False
+
+def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
+ args['ids'] = [bug_number]
+ args['file_name'] = file_name
+ args['summary'] = logdescription
+ args['content_type'] = "text/plain"
+ args['data'] = log
+ args['comment'] = text
+ try:
+ server.Bug.add_attachment(args)
+ return True
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return False
+
+def bugzilla_add_comment(debug_file, server, args, bug_number, text):
+ args['id'] = bug_number
+ args['comment'] = text
+ try:
+ server.Bug.add_comment(args)
+ return True
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return False
+
+addhandler bugzilla_eventhandler
+bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed"
+python bugzilla_eventhandler() {
+ import glob
+ import xmlrpclib, httplib
+
+ class ProxiedTransport(xmlrpclib.Transport):
+ def __init__(self, proxy, use_datetime = 0):
+ xmlrpclib.Transport.__init__(self, use_datetime)
+ self.proxy = proxy
+ self.user = None
+ self.password = None
+
+ def set_user(self, user):
+ self.user = user
+
+ def set_password(self, password):
+ self.password = password
+
+ def make_connection(self, host):
+ self.realhost = host
+ return httplib.HTTP(self.proxy)
+
+ def send_request(self, connection, handler, request_body):
+ connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
+ if self.user != None:
+ if self.password != None:
+ auth = "%s:%s" % (self.user, self.password)
+ else:
+ auth = self.user
+ connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
+
+ event = e
+ data = e.data
+ name = bb.event.getName(event)
+ if name == "MsgNote":
+ # avoid recursion
+ return
+
+ if name == "TaskFailed":
+ xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
+ user = data.getVar("BUGZILLA_USER", True)
+ passw = data.getVar("BUGZILLA_PASS", True)
+ product = data.getVar("BUGZILLA_PRODUCT", True)
+ compon = data.getVar("BUGZILLA_COMPONENT", True)
+ version = data.getVar("BUGZILLA_VERSION", True)
+
+ proxy = data.getVar('http_proxy', True )
+ if (proxy):
+ import urllib2
+ s, u, p, hostport = urllib2._parse_proxy(proxy)
+ transport = ProxiedTransport(hostport)
+ else:
+ transport = None
+
+ server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
+ args = {
+ 'Bugzilla_login': user,
+ 'Bugzilla_password': passw,
+ 'product': product,
+ 'component': compon}
+
+ # evil hack to figure out what is going on
+ debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
+
+ file = None
+ bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
+ "pv" : data.getVar("PV", True),
+ }
+ log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
+ text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
+ if len(log_file) != 0:
+ print >> debug_file, "Adding log file %s" % log_file[0]
+ file = open(log_file[0], 'r')
+ log = file.read()
+ file.close();
+ else:
+ print >> debug_file, "No log file found for the glob"
+ log = None
+
+ (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
+ print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
+
+ # The bug is present and still open, attach an error log
+ if not bug_number:
+ bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
+ if not bug_number:
+ print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
+ else:
+ print >> debug_file, "The new bug_number: '%s'" % bug_number
+ elif not bug_open:
+ if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
+ print >> debug_file, "Failed to reopen the bug #%s" % bug_number
+ else:
+ print >> debug_file, "Reopened the bug #%s" % bug_number
+
+ if bug_number and log:
+ print >> debug_file, "The bug is known as '%s'" % bug_number
+ desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
+ if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
+ print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
+ else:
+ print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
+ else:
+ print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
+ if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
+ print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
+ else:
+ print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
+
+ # store bug number for oestats-client
+ if bug_number:
+ data.setVar('OESTATS_BUG_NUMBER', bug_number)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
new file mode 100644
index 000000000..581d53269
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
@@ -0,0 +1,856 @@
+#
+# Records history of build output in order to detect regressions
+#
+# Based in part on testlab.bbclass and packagehistory.bbclass
+#
+# Copyright (C) 2011-2016 Intel Corporation
+# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
+#
+
+BUILDHISTORY_FEATURES ?= "image package sdk"
+BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
+BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
+BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
+
+# Setting this to non-empty will remove the old content of the buildhistory as part of
+# the current bitbake invocation and replace it with information about what was built
+# during the build.
+#
+# This is meant to be used in continuous integration (CI) systems when invoking bitbake
+# for full world builds. The effect in that case is that information about packages
+# that no longer get build also gets removed from the buildhistory, which is not
+# the case otherwise.
+#
+# The advantage over manually cleaning the buildhistory outside of bitbake is that
+# the "version-going-backwards" check still works. When relying on that, be careful
+# about failed world builds: they will lead to incomplete information in the
+# buildhistory because information about packages that could not be built will
+# also get removed. A CI system should handle that by discarding the buildhistory
+# of failed builds.
+#
+# The expected usage is via auto.conf, but passing via the command line also works
+# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
+BUILDHISTORY_RESET ?= ""
+
+BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
+BUILDHISTORY_OLD_DIR_PACKAGE = "${BUILDHISTORY_OLD_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
+BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}${SDK_EXT}/${IMAGE_BASENAME}"
+BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
+BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf conf/locked-sigs.inc conf/devtool.conf"
+BUILDHISTORY_COMMIT ?= "0"
+BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
+BUILDHISTORY_PUSH_REPO ?= ""
+
+SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
+# We want to avoid influencing the signatures of sstate tasks - first the function itself:
+sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
+# then the value added to SSTATEPOSTINSTFUNCS:
+SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
+
+# All items excepts those listed here will be removed from a recipe's
+# build history directory by buildhistory_emit_pkghistory(). This is
+# necessary because some of these items (package directories, files that
+# we no longer emit) might be obsolete.
+#
+# When extending build history, derive your class from buildhistory.bbclass
+# and extend this list here with the additional files created by the derived
+# class.
+BUILDHISTORY_PRESERVE = "latest latest_srcrev"
+
+#
+# Write out metadata about this package for comparison when writing future packages
+#
+python buildhistory_emit_pkghistory() {
+ if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
+ return 0
+
+ if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
+ return 0
+
+ import re
+ import json
+ import errno
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ oldpkghistdir = d.getVar('BUILDHISTORY_OLD_DIR_PACKAGE', True)
+
+ class RecipeInfo:
+ def __init__(self, name):
+ self.name = name
+ self.pe = "0"
+ self.pv = "0"
+ self.pr = "r0"
+ self.depends = ""
+ self.packages = ""
+ self.srcrev = ""
+
+
+ class PackageInfo:
+ def __init__(self, name):
+ self.name = name
+ self.pe = "0"
+ self.pv = "0"
+ self.pr = "r0"
+ # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them
+ self.pkg = ""
+ self.pkge = ""
+ self.pkgv = ""
+ self.pkgr = ""
+ self.size = 0
+ self.depends = ""
+ self.rprovides = ""
+ self.rdepends = ""
+ self.rrecommends = ""
+ self.rsuggests = ""
+ self.rreplaces = ""
+ self.rconflicts = ""
+ self.files = ""
+ self.filelist = ""
+ # Variables that need to be written to their own separate file
+ self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'])
+
+ # Should check PACKAGES here to see if anything removed
+
+ def readPackageInfo(pkg, histfile):
+ pkginfo = PackageInfo(pkg)
+ with open(histfile, "r") as f:
+ for line in f:
+ lns = line.split('=', 1)
+ name = lns[0].strip()
+ value = lns[1].strip(" \t\r\n").strip('"')
+ if name == "PE":
+ pkginfo.pe = value
+ elif name == "PV":
+ pkginfo.pv = value
+ elif name == "PR":
+ pkginfo.pr = value
+ elif name == "PKG":
+ pkginfo.pkg = value
+ elif name == "PKGE":
+ pkginfo.pkge = value
+ elif name == "PKGV":
+ pkginfo.pkgv = value
+ elif name == "PKGR":
+ pkginfo.pkgr = value
+ elif name == "RPROVIDES":
+ pkginfo.rprovides = value
+ elif name == "RDEPENDS":
+ pkginfo.rdepends = value
+ elif name == "RRECOMMENDS":
+ pkginfo.rrecommends = value
+ elif name == "RSUGGESTS":
+ pkginfo.rsuggests = value
+ elif name == "RREPLACES":
+ pkginfo.rreplaces = value
+ elif name == "RCONFLICTS":
+ pkginfo.rconflicts = value
+ elif name == "PKGSIZE":
+ pkginfo.size = long(value)
+ elif name == "FILES":
+ pkginfo.files = value
+ elif name == "FILELIST":
+ pkginfo.filelist = value
+ # Apply defaults
+ if not pkginfo.pkg:
+ pkginfo.pkg = pkginfo.name
+ if not pkginfo.pkge:
+ pkginfo.pkge = pkginfo.pe
+ if not pkginfo.pkgv:
+ pkginfo.pkgv = pkginfo.pv
+ if not pkginfo.pkgr:
+ pkginfo.pkgr = pkginfo.pr
+ return pkginfo
+
+ def getlastpkgversion(pkg):
+ try:
+ histfile = os.path.join(oldpkghistdir, pkg, "latest")
+ return readPackageInfo(pkg, histfile)
+ except EnvironmentError:
+ return None
+
+ def sortpkglist(string):
+ pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+[^)]+\))?', string, 0)
+ pkglist = [p.group(0) for p in pkgiter]
+ pkglist.sort()
+ return ' '.join(pkglist)
+
+ def sortlist(string):
+ items = string.split(' ')
+ items.sort()
+ return ' '.join(items)
+
+ pn = d.getVar('PN', True)
+ pe = d.getVar('PE', True) or "0"
+ pv = d.getVar('PV', True)
+ pr = d.getVar('PR', True)
+
+ pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ packages = ""
+ try:
+ with open(os.path.join(pkgdata_dir, pn)) as f:
+ for line in f.readlines():
+ if line.startswith('PACKAGES: '):
+ packages = oe.utils.squashspaces(line.split(': ', 1)[1])
+ break
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ # Probably a -cross recipe, just ignore
+ return 0
+ else:
+ raise
+
+ packagelist = packages.split()
+ preserve = d.getVar('BUILDHISTORY_PRESERVE', True).split()
+ if not os.path.exists(pkghistdir):
+ bb.utils.mkdirhier(pkghistdir)
+ else:
+ # Remove files for packages that no longer exist
+ for item in os.listdir(pkghistdir):
+ if item not in preserve:
+ if item not in packagelist:
+ itempath = os.path.join(pkghistdir, item)
+ if os.path.isdir(itempath):
+ for subfile in os.listdir(itempath):
+ os.unlink(os.path.join(itempath, subfile))
+ os.rmdir(itempath)
+ else:
+ os.unlink(itempath)
+
+ rcpinfo = RecipeInfo(pn)
+ rcpinfo.pe = pe
+ rcpinfo.pv = pv
+ rcpinfo.pr = pr
+ rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS', True) or ""))
+ rcpinfo.packages = packages
+ write_recipehistory(rcpinfo, d)
+
+ pkgdest = d.getVar('PKGDEST', True)
+ for pkg in packagelist:
+ pkgdata = {}
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
+ for line in f.readlines():
+ item = line.rstrip('\n').split(': ', 1)
+ key = item[0]
+ if key.endswith('_' + pkg):
+ key = key[:-len(pkg)-1]
+ pkgdata[key] = item[1].decode('utf-8').decode('string_escape')
+
+ pkge = pkgdata.get('PKGE', '0')
+ pkgv = pkgdata['PKGV']
+ pkgr = pkgdata['PKGR']
+ #
+ # Find out what the last version was
+ # Make sure the version did not decrease
+ #
+ lastversion = getlastpkgversion(pkg)
+ if lastversion:
+ last_pkge = lastversion.pkge
+ last_pkgv = lastversion.pkgv
+ last_pkgr = lastversion.pkgr
+ r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
+ if r < 0:
+ msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
+ package_qa_handle_error("version-going-backwards", msg, d)
+
+ pkginfo = PackageInfo(pkg)
+ # Apparently the version can be different on a per-package basis (see Python)
+ pkginfo.pe = pkgdata.get('PE', '0')
+ pkginfo.pv = pkgdata['PV']
+ pkginfo.pr = pkgdata['PR']
+ pkginfo.pkg = pkgdata['PKG']
+ pkginfo.pkge = pkge
+ pkginfo.pkgv = pkgv
+ pkginfo.pkgr = pkgr
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
+ pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
+ pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
+ for filevar in pkginfo.filevars:
+ pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
+
+ # Gather information about packaged files
+ val = pkgdata.get('FILES_INFO', '')
+ dictval = json.loads(val)
+ filelist = dictval.keys()
+ filelist.sort()
+ pkginfo.filelist = " ".join(filelist)
+
+ pkginfo.size = int(pkgdata['PKGSIZE'])
+
+ write_pkghistory(pkginfo, d)
+
+ # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
+ bb.build.exec_func("buildhistory_list_pkg_files", d)
+}
+
+
+def write_recipehistory(rcpinfo, d):
+ import codecs
+
+ bb.debug(2, "Writing recipe history")
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+
+ infofile = os.path.join(pkghistdir, "latest")
+ with codecs.open(infofile, "w", encoding='utf8') as f:
+ if rcpinfo.pe != "0":
+ f.write(u"PE = %s\n" % rcpinfo.pe)
+ f.write(u"PV = %s\n" % rcpinfo.pv)
+ f.write(u"PR = %s\n" % rcpinfo.pr)
+ f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
+ f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
+
+
+def write_pkghistory(pkginfo, d):
+ import codecs
+
+ bb.debug(2, "Writing package history for package %s" % pkginfo.name)
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+
+ pkgpath = os.path.join(pkghistdir, pkginfo.name)
+ if not os.path.exists(pkgpath):
+ bb.utils.mkdirhier(pkgpath)
+
+ infofile = os.path.join(pkgpath, "latest")
+ with codecs.open(infofile, "w", encoding='utf8') as f:
+ if pkginfo.pe != "0":
+ f.write(u"PE = %s\n" % pkginfo.pe)
+ f.write(u"PV = %s\n" % pkginfo.pv)
+ f.write(u"PR = %s\n" % pkginfo.pr)
+
+ pkgvars = {}
+ pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else ''
+ pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else ''
+ pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else ''
+ pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else ''
+ for pkgvar in pkgvars:
+ val = pkgvars[pkgvar]
+ if val:
+ f.write(u"%s = %s\n" % (pkgvar, val))
+
+ f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides)
+ f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends)
+ f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends)
+ if pkginfo.rsuggests:
+ f.write(u"RSUGGESTS = %s\n" % pkginfo.rsuggests)
+ if pkginfo.rreplaces:
+ f.write(u"RREPLACES = %s\n" % pkginfo.rreplaces)
+ if pkginfo.rconflicts:
+ f.write(u"RCONFLICTS = %s\n" % pkginfo.rconflicts)
+ f.write(u"PKGSIZE = %d\n" % pkginfo.size)
+ f.write(u"FILES = %s\n" % pkginfo.files)
+ f.write(u"FILELIST = %s\n" % pkginfo.filelist)
+
+ for filevar in pkginfo.filevars:
+ filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
+ val = pkginfo.filevars[filevar]
+ if val:
+ with codecs.open(filevarpath, "w", encoding='utf8') as f:
+ f.write(val)
+ else:
+ if os.path.exists(filevarpath):
+ os.unlink(filevarpath)
+
+#
+# rootfs_type can be: image, sdk_target, sdk_host
+#
+def buildhistory_list_installed(d, rootfs_type="image"):
+ from oe.rootfs import image_list_installed_packages
+ from oe.sdk import sdk_list_installed_packages
+ from oe.utils import format_pkg_list
+
+ process_list = [('file', 'bh_installed_pkgs.txt'),\
+ ('deps', 'bh_installed_pkgs_deps.txt')]
+
+ if rootfs_type == "image":
+ pkgs = image_list_installed_packages(d)
+ else:
+ pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
+
+ for output_type, output_file in process_list:
+ output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
+
+ with open(output_file_full, 'w') as output:
+ output.write(format_pkg_list(pkgs, output_type))
+
+python buildhistory_list_installed_image() {
+ buildhistory_list_installed(d)
+}
+
+python buildhistory_list_installed_sdk_target() {
+ buildhistory_list_installed(d, "sdk_target")
+}
+
+python buildhistory_list_installed_sdk_host() {
+ buildhistory_list_installed(d, "sdk_host")
+}
+
+buildhistory_get_installed() {
+ mkdir -p $1
+
+ # Get list of installed packages
+ pkgcache="$1/installed-packages.tmp"
+ cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
+
+ cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
+ if [ -s $pkgcache ] ; then
+ cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
+ else
+ printf "" > $1/installed-packages.txt
+ fi
+
+ # Produce dependency graph
+ # First, quote each name to handle characters that cause issues for dot
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp && \
+ rm ${WORKDIR}/bh_installed_pkgs_deps.txt
+ # Change delimiter from pipe to -> and set style for recommend lines
+ sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
+ # Add header, sorted and de-duped contents and footer and then delete the temp file
+ printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
+ cat $1/depends.tmp | sort | uniq >> $1/depends.dot
+ echo "}" >> $1/depends.dot
+ rm $1/depends.tmp
+
+ # Produce installed package sizes list
+ oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
+ cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB " $1}' | sort -n -r > $1/installed-package-sizes.txt
+ rm $1/installed-package-sizes.tmp
+
+ # We're now done with the cache, delete it
+ rm $pkgcache
+
+ if [ "$2" != "sdk" ] ; then
+ # Produce some cut-down graphs (for readability)
+ grep -v kernel-image $1/depends.dot | grep -v kernel-3 | grep -v kernel-4 > $1/depends-nokernel.dot
+ grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
+ grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
+ grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
+ fi
+
+ # add complementary package information
+ if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then
+ cp ${WORKDIR}/complementary_pkgs.txt $1
+ fi
+}
+
+buildhistory_get_image_installed() {
+ # Anything requiring the use of the packaging system should be done in here
+ # in case the packaging files are going to be removed for this image
+
+ if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
+ return
+ fi
+
+ buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE}
+}
+
+buildhistory_get_sdk_installed() {
+ # Anything requiring the use of the packaging system should be done in here
+ # in case the packaging files are going to be removed for this SDK
+
+ if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
+ return
+ fi
+
+ buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
+}
+
+buildhistory_get_sdk_installed_host() {
+ buildhistory_get_sdk_installed host
+}
+
+buildhistory_get_sdk_installed_target() {
+ buildhistory_get_sdk_installed target
+}
+
+buildhistory_list_files() {
+ # List the files in the specified directory, but exclude date/time etc.
+ # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
+ if [ "$3" = "fakeroot" ] ; then
+ ( cd $1 && ${FAKEROOTENV} ${FAKEROOTCMD} find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
+ else
+ ( cd $1 && find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
+ fi
+}
+
+buildhistory_list_pkg_files() {
+ # Create individual files-in-package for each recipe's package
+ for pkgdir in $(find ${PKGDEST}/* -maxdepth 0 -type d); do
+ pkgname=$(basename $pkgdir)
+ outfolder="${BUILDHISTORY_DIR_PACKAGE}/$pkgname"
+ outfile="$outfolder/files-in-package.txt"
+ # Make sure the output folder exists so we can create the file
+ if [ ! -d $outfolder ] ; then
+ bbdebug 2 "Folder $outfolder does not exist, file $outfile not created"
+ continue
+ fi
+ buildhistory_list_files $pkgdir $outfile fakeroot
+ done
+}
+
+buildhistory_get_imageinfo() {
+ if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
+ return
+ fi
+
+ mkdir -p ${BUILDHISTORY_DIR_IMAGE}
+ buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
+
+ # Collect files requested in BUILDHISTORY_IMAGE_FILES
+ rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files
+ for f in ${BUILDHISTORY_IMAGE_FILES}; do
+ if [ -f ${IMAGE_ROOTFS}/$f ] ; then
+ mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f`
+ cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f
+ fi
+ done
+
+ # Record some machine-readable meta-information about the image
+ printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
+ cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
+${@buildhistory_get_imagevars(d)}
+END
+ imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
+ echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
+
+ # Add some configuration information
+ echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id.txt
+
+ cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id.txt <<END
+${@buildhistory_get_build_id(d)}
+END
+}
+
+buildhistory_get_sdkinfo() {
+ if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
+ return
+ fi
+
+ buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
+
+ # Collect files requested in BUILDHISTORY_SDK_FILES
+ rm -rf ${BUILDHISTORY_DIR_SDK}/sdk-files
+ for f in ${BUILDHISTORY_SDK_FILES}; do
+ if [ -f ${SDK_OUTPUT}/${SDKPATH}/$f ] ; then
+ mkdir -p ${BUILDHISTORY_DIR_SDK}/sdk-files/`dirname $f`
+ cp ${SDK_OUTPUT}/${SDKPATH}/$f ${BUILDHISTORY_DIR_SDK}/sdk-files/$f
+ fi
+ done
+
+ # Record some machine-readable meta-information about the SDK
+ printf "" > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
+ cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
+${@buildhistory_get_sdkvars(d)}
+END
+ sdksize=`du -ks ${SDK_OUTPUT} | awk '{ print $1 }'`
+ echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
+}
+
+python buildhistory_get_extra_sdkinfo() {
+ import operator
+ import math
+ if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+ tasksizes = {}
+ filesizes = {}
+ for root, _, files in os.walk(d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')):
+ for fn in files:
+ if fn.endswith('.tgz'):
+ fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
+ task = fn.rsplit(':', 1)[1].split('_', 1)[1].split('.')[0]
+ origtotal = tasksizes.get(task, 0)
+ tasksizes[task] = origtotal + fsize
+ filesizes[fn] = fsize
+ with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-package-sizes.txt'), 'w') as f:
+ filesizes_sorted = sorted(filesizes.items(), key=operator.itemgetter(1), reverse=True)
+ for fn, size in filesizes_sorted:
+ f.write('%10d KiB %s\n' % (size, fn))
+ with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-task-sizes.txt'), 'w') as f:
+ tasksizes_sorted = sorted(tasksizes.items(), key=operator.itemgetter(1), reverse=True)
+ for task, size in tasksizes_sorted:
+ f.write('%10d KiB %s\n' % (size, task))
+}
+
+# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
+# unneeded packages but before the removal of packaging files
+ROOTFS_POSTUNINSTALL_COMMAND += " buildhistory_list_installed_image ;\
+ buildhistory_get_image_installed ; "
+
+IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
+
+# We want these to be the last run so that we get called after complementary package installation
+POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
+POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
+
+POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
+POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
+
+SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+
+def buildhistory_get_build_id(d):
+ if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ return ""
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+ statuslines = []
+ for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
+ g = globals()
+ if func not in g:
+ bb.warn("Build configuration function '%s' does not exist" % func)
+ else:
+ flines = g[func](localdata)
+ if flines:
+ statuslines.extend(flines)
+
+ statusheader = d.getVar('BUILDCFG_HEADER', True)
+ return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
+
+def buildhistory_get_metadata_revs(d):
+ # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
+ layers = (d.getVar("BBLAYERS", True) or "").split()
+ medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
+ base_get_metadata_git_branch(i, None).strip(), \
+ base_get_metadata_git_revision(i, None)) \
+ for i in layers]
+ return '\n'.join(medadata_revs)
+
+def outputvars(vars, listvars, d):
+ vars = vars.split()
+ listvars = listvars.split()
+ ret = ""
+ for var in vars:
+ value = d.getVar(var, True) or ""
+ if var in listvars:
+ # Squash out spaces
+ value = oe.utils.squashspaces(value)
+ ret += "%s = %s\n" % (var, value)
+ return ret.rstrip('\n')
+
+def buildhistory_get_imagevars(d):
+ if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ return ""
+ imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
+ listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
+ return outputvars(imagevars, listvars, d)
+
+def buildhistory_get_sdkvars(d):
+ if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ return ""
+ sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
+ if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+ # Extensible SDK uses some additional variables
+ sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS"
+ listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
+ return outputvars(sdkvars, listvars, d)
+
+
+def buildhistory_get_cmdline(d):
+ if sys.argv[0].endswith('bin/bitbake'):
+ bincmd = 'bitbake'
+ else:
+ bincmd = sys.argv[0]
+ return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
+
+
+buildhistory_single_commit() {
+ if [ "$3" = "" ] ; then
+ commitopts="${BUILDHISTORY_DIR}/ --allow-empty"
+ item="No changes"
+ else
+ commitopts="$3 metadata-revs"
+ item="$3"
+ fi
+ if [ "${BUILDHISTORY_BUILD_FAILURES}" = "0" ] ; then
+ result="succeeded"
+ else
+ result="failed"
+ fi
+ case ${BUILDHISTORY_BUILD_INTERRUPTED} in
+ 1)
+ result="$result (interrupted)"
+ ;;
+ 2)
+ result="$result (force interrupted)"
+ ;;
+ esac
+ commitmsgfile=`mktemp`
+ cat > $commitmsgfile << END
+$item: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $2
+
+cmd: $1
+
+result: $result
+
+metadata revisions:
+END
+ cat ${BUILDHISTORY_DIR}/metadata-revs >> $commitmsgfile
+ git commit $commitopts -F $commitmsgfile --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
+ rm $commitmsgfile
+}
+
+buildhistory_commit() {
+ if [ ! -d ${BUILDHISTORY_DIR} ] ; then
+ # Code above that creates this dir never executed, so there can't be anything to commit
+ return
+ fi
+
+ # Create a machine-readable list of metadata revisions for each layer
+ cat > ${BUILDHISTORY_DIR}/metadata-revs <<END
+${@buildhistory_get_metadata_revs(d)}
+END
+
+ ( cd ${BUILDHISTORY_DIR}/
+ # Initialise the repo if necessary
+ if [ ! -e .git ] ; then
+ git init -q
+ else
+ git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
+ git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
+ git tag -f build-minus-1 > /dev/null 2>&1 || true
+ fi
+ # If the user hasn't set up their name/email, set some defaults
+ # just for this repo (otherwise the commit will fail with older
+ # versions of git)
+ if ! git config user.email > /dev/null ; then
+ git config --local user.email "buildhistory@${DISTRO}"
+ fi
+ if ! git config user.name > /dev/null ; then
+ git config --local user.name "buildhistory"
+ fi
+ # Check if there are new/changed files to commit (other than metadata-revs)
+ repostatus=`git status --porcelain | grep -v " metadata-revs$"`
+ HOSTNAME=`hostname 2>/dev/null || echo unknown`
+ CMDLINE="${@buildhistory_get_cmdline(d)}"
+ if [ "$repostatus" != "" ] ; then
+ git add -A .
+ # porcelain output looks like "?? packages/foo/bar"
+ # Ensure we commit metadata-revs with the first commit
+ for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
+ buildhistory_single_commit "$CMDLINE" "$HOSTNAME" "$entry"
+ done
+ git gc --auto --quiet
+ else
+ buildhistory_single_commit "$CMDLINE" "$HOSTNAME"
+ fi
+ if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
+ git push -q ${BUILDHISTORY_PUSH_REPO}
+ fi) || true
+}
+
+python buildhistory_eventhandler() {
+ if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
+ reset = e.data.getVar("BUILDHISTORY_RESET", True)
+ olddir = e.data.getVar("BUILDHISTORY_OLD_DIR", True)
+ if isinstance(e, bb.event.BuildStarted):
+ if reset:
+ import shutil
+ # Clean up after potentially interrupted build.
+ if os.path.isdir(olddir):
+ shutil.rmtree(olddir)
+ rootdir = e.data.getVar("BUILDHISTORY_DIR", True)
+ entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
+ bb.utils.mkdirhier(olddir)
+ for entry in entries:
+ os.rename(os.path.join(rootdir, entry),
+ os.path.join(olddir, entry))
+ elif isinstance(e, bb.event.BuildCompleted):
+ if reset:
+ import shutil
+ shutil.rmtree(olddir)
+ if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
+ bb.note("Writing buildhistory")
+ localdata = bb.data.createCopy(e.data)
+ localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
+ interrupted = getattr(e, '_interrupted', 0)
+ localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
+ bb.build.exec_func("buildhistory_commit", localdata)
+}
+
+addhandler buildhistory_eventhandler
+buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted bb.event.BuildStarted"
+
+
+# FIXME this ought to be moved into the fetcher
+def _get_srcrev_values(d):
+ """
+ Return the version strings for the current recipe
+ """
+
+ scms = []
+ fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
+ urldata = fetcher.ud
+ for u in urldata:
+ if urldata[u].method.supports_srcrev():
+ scms.append(u)
+
+ autoinc_templ = 'AUTOINC+'
+ dict_srcrevs = {}
+ dict_tag_srcrevs = {}
+ for scm in scms:
+ ud = urldata[scm]
+ for name in ud.names:
+ try:
+ rev = ud.method.sortable_revision(ud, d, name)
+ except TypeError:
+ # support old bitbake versions
+ rev = ud.method.sortable_revision(scm, ud, d, name)
+ # Clean this up when we next bump bitbake version
+ if type(rev) != str:
+ autoinc, rev = rev
+ elif rev.startswith(autoinc_templ):
+ rev = rev[len(autoinc_templ):]
+ dict_srcrevs[name] = rev
+ if 'tag' in ud.parm:
+ tag = ud.parm['tag'];
+ key = name+'_'+tag
+ dict_tag_srcrevs[key] = rev
+ return (dict_srcrevs, dict_tag_srcrevs)
+
+do_fetch[postfuncs] += "write_srcrev"
+do_fetch[vardepsexclude] += "write_srcrev"
+python write_srcrev() {
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
+
+ srcrevs, tag_srcrevs = _get_srcrev_values(d)
+ if srcrevs:
+ if not os.path.exists(pkghistdir):
+ bb.utils.mkdirhier(pkghistdir)
+ old_tag_srcrevs = {}
+ if os.path.exists(srcrevfile):
+ with open(srcrevfile) as f:
+ for line in f:
+ if line.startswith('# tag_'):
+ key, value = line.split("=", 1)
+ key = key.replace('# tag_', '').strip()
+ value = value.replace('"', '').strip()
+ old_tag_srcrevs[key] = value
+ with open(srcrevfile, 'w') as f:
+ orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
+ if orig_srcrev != 'INVALID':
+ f.write('# SRCREV = "%s"\n' % orig_srcrev)
+ if len(srcrevs) > 1:
+ for name, srcrev in srcrevs.items():
+ orig_srcrev = d.getVar('SRCREV_%s' % name, False)
+ if orig_srcrev:
+ f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
+ f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
+ else:
+ f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next())
+ if len(tag_srcrevs) > 0:
+ for name, srcrev in tag_srcrevs.items():
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN', True)
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+
+ else:
+ if os.path.exists(srcrevfile):
+ os.remove(srcrevfile)
+}
diff --git a/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass b/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
new file mode 100644
index 000000000..d73350b94
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
@@ -0,0 +1,36 @@
+# Summarize sstate usage at the end of the build
+python buildstats_summary () {
+ import collections
+ import os.path
+
+ bsdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}")
+ if not os.path.exists(bsdir):
+ return
+
+ sstatetasks = (e.data.getVar('SSTATETASKS', True) or '').split()
+ built = collections.defaultdict(lambda: [set(), set()])
+ for pf in os.listdir(bsdir):
+ taskdir = os.path.join(bsdir, pf)
+ if not os.path.isdir(taskdir):
+ continue
+
+ tasks = os.listdir(taskdir)
+ for t in sstatetasks:
+ no_sstate, sstate = built[t]
+ if t in tasks:
+ no_sstate.add(pf)
+ elif t + '_setscene' in tasks:
+ sstate.add(pf)
+
+ header_printed = False
+ for t in sstatetasks:
+ no_sstate, sstate = built[t]
+ if no_sstate | sstate:
+ if not header_printed:
+ header_printed = True
+ bb.note("Build completion summary:")
+
+ bb.note(" {0}: {1}% sstate reuse ({2} setscene, {3} scratch)".format(t, 100*len(sstate)/(len(sstate)+len(no_sstate)), len(sstate), len(no_sstate)))
+}
+addhandler buildstats_summary
+buildstats_summary[eventmask] = "bb.event.BuildCompleted"
diff --git a/import-layers/yocto-poky/meta/classes/buildstats.bbclass b/import-layers/yocto-poky/meta/classes/buildstats.bbclass
new file mode 100644
index 000000000..34ecb0386
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/buildstats.bbclass
@@ -0,0 +1,185 @@
+BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
+
+################################################################################
+# Build statistics gathering.
+#
+# The CPU and Time gathering/tracking functions and bbevent inspiration
+# were written by Christopher Larson.
+#
+################################################################################
+
+def get_buildprocess_cputime(pid):
+ with open("/proc/%d/stat" % pid, "r") as f:
+ fields = f.readline().rstrip().split()
+ # 13: utime, 14: stime, 15: cutime, 16: cstime
+ return sum(int(field) for field in fields[13:16])
+
+def get_process_cputime(pid):
+ import resource
+ with open("/proc/%d/stat" % pid, "r") as f:
+ fields = f.readline().rstrip().split()
+ stats = {
+ 'utime' : fields[13],
+ 'stime' : fields[14],
+ 'cutime' : fields[15],
+ 'cstime' : fields[16],
+ }
+ iostats = {}
+ if os.path.isfile("/proc/%d/io" % pid):
+ with open("/proc/%d/io" % pid, "r") as f:
+ while True:
+ i = f.readline().strip()
+ if not i:
+ break
+ i = i.split(": ")
+ iostats[i[0]] = i[1]
+ resources = resource.getrusage(resource.RUSAGE_SELF)
+ childres = resource.getrusage(resource.RUSAGE_CHILDREN)
+ return stats, iostats, resources, childres
+
+def get_cputime():
+ with open("/proc/stat", "r") as f:
+ fields = f.readline().rstrip().split()[1:]
+ return sum(int(field) for field in fields)
+
+def set_timedata(var, d, server_time):
+ d.setVar(var, server_time)
+
+def get_timedata(var, d, end_time):
+ oldtime = d.getVar(var, False)
+ if oldtime is None:
+ return
+ return end_time - oldtime
+
+def set_buildtimedata(var, d):
+ import time
+ time = time.time()
+ cputime = get_cputime()
+ proctime = get_buildprocess_cputime(os.getpid())
+ d.setVar(var, (time, cputime, proctime))
+
+def get_buildtimedata(var, d):
+ import time
+ timedata = d.getVar(var, False)
+ if timedata is None:
+ return
+ oldtime, oldcpu, oldproc = timedata
+ procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
+ cpudiff = get_cputime() - oldcpu
+ end_time = time.time()
+ timediff = end_time - oldtime
+ if cpudiff > 0:
+ cpuperc = float(procdiff) * 100 / cpudiff
+ else:
+ cpuperc = None
+ return timediff, cpuperc
+
+def write_task_data(status, logfile, e, d):
+ bn = d.getVar('BUILDNAME', True)
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
+ with open(os.path.join(logfile), "a") as f:
+ elapsedtime = get_timedata("__timedata_task", d, e.time)
+ if elapsedtime:
+ f.write(d.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
+ (e.task, elapsedtime)))
+ cpu, iostats, resources, childres = get_process_cputime(os.getpid())
+ if cpu:
+ f.write("utime: %s\n" % cpu['utime'])
+ f.write("stime: %s\n" % cpu['stime'])
+ f.write("cutime: %s\n" % cpu['cutime'])
+ f.write("cstime: %s\n" % cpu['cstime'])
+ for i in iostats:
+ f.write("IO %s: %s\n" % (i, iostats[i]))
+ rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
+ for i in rusages:
+ f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
+ for i in rusages:
+ f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
+ if status is "passed":
+ f.write("Status: PASSED \n")
+ else:
+ f.write("Status: FAILED \n")
+ f.write("Ended: %0.2f \n" % e.time)
+
+python run_buildstats () {
+ import bb.build
+ import bb.event
+ import time, subprocess, platform
+
+ bn = d.getVar('BUILDNAME', True)
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
+ taskdir = os.path.join(bsdir, d.getVar('PF', True))
+
+ if isinstance(e, bb.event.BuildStarted):
+ ########################################################################
+ # If the kernel was not configured to provide I/O statistics, issue
+ # a one time warning.
+ ########################################################################
+ if not os.path.isfile("/proc/%d/io" % os.getpid()):
+ bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
+
+ ########################################################################
+ # at first pass make the buildstats hierarchy and then
+ # set the buildname
+ ########################################################################
+ bb.utils.mkdirhier(bsdir)
+ set_buildtimedata("__timedata_build", d)
+ build_time = os.path.join(bsdir, "build_stats")
+ # write start of build into build_time
+ with open(build_time, "a") as f:
+ host_info = platform.uname()
+ f.write("Host Info: ")
+ for x in host_info:
+ if x:
+ f.write(x + " ")
+ f.write("\n")
+ f.write("Build Started: %0.2f \n" % time.time())
+
+ elif isinstance(e, bb.event.BuildCompleted):
+ build_time = os.path.join(bsdir, "build_stats")
+ with open(build_time, "a") as f:
+ ########################################################################
+ # Write build statistics for the build
+ ########################################################################
+ timedata = get_buildtimedata("__timedata_build", d)
+ if timedata:
+ time, cpu = timedata
+ # write end of build and cpu used into build_time
+ f.write("Elapsed time: %0.2f seconds \n" % (time))
+ if cpu:
+ f.write("CPU usage: %0.1f%% \n" % cpu)
+
+ if isinstance(e, bb.build.TaskStarted):
+ set_timedata("__timedata_task", d, e.time)
+ bb.utils.mkdirhier(taskdir)
+ # write into the task event file the name and start time
+ with open(os.path.join(taskdir, e.task), "a") as f:
+ f.write("Event: %s \n" % bb.event.getName(e))
+ f.write("Started: %0.2f \n" % e.time)
+
+ elif isinstance(e, bb.build.TaskSucceeded):
+ write_task_data("passed", os.path.join(taskdir, e.task), e, d)
+ if e.task == "do_rootfs":
+ bs = os.path.join(bsdir, "build_stats")
+ with open(bs, "a") as f:
+ rootfs = d.getVar('IMAGE_ROOTFS', True)
+ rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
+ f.write("Uncompressed Rootfs size: %s" % rootfs_size)
+
+ elif isinstance(e, bb.build.TaskFailed):
+ # Can have a failure before TaskStarted so need to mkdir here too
+ bb.utils.mkdirhier(taskdir)
+ write_task_data("failed", os.path.join(taskdir, e.task), e, d)
+ ########################################################################
+ # Lets make things easier and tell people where the build failed in
+ # build_status. We do this here because BuildCompleted triggers no
+ # matter what the status of the build actually is
+ ########################################################################
+ build_status = os.path.join(bsdir, "build_stats")
+ with open(build_status, "a") as f:
+ f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
+}
+
+addhandler run_buildstats
+run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
+
diff --git a/import-layers/yocto-poky/meta/classes/ccache.bbclass b/import-layers/yocto-poky/meta/classes/ccache.bbclass
new file mode 100644
index 000000000..2cdce4693
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/ccache.bbclass
@@ -0,0 +1,8 @@
+CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
+export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
+CCACHE_DISABLE[unexport] = "1"
+
+do_configure[dirs] =+ "${CCACHE_DIR}"
+do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
+
+do_clean[cleandirs] += "${CCACHE_DIR}"
diff --git a/import-layers/yocto-poky/meta/classes/chrpath.bbclass b/import-layers/yocto-poky/meta/classes/chrpath.bbclass
new file mode 100644
index 000000000..9c68855ab
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/chrpath.bbclass
@@ -0,0 +1,117 @@
+CHRPATH_BIN ?= "chrpath"
+PREPROCESS_RELOCATE_DIRS ?= ""
+
+def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
+ import subprocess as sub
+
+ p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned successfully, process stderr for results
+ if p.returncode != 0:
+ return
+
+ # Handle RUNPATH as well as RPATH
+ err = err.replace("RUNPATH=","RPATH=")
+ # Throw away everything other than the rpath list
+ curr_rpath = err.partition("RPATH=")[2]
+ #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
+ rpaths = curr_rpath.split(":")
+ new_rpaths = []
+ modified = False
+ for rpath in rpaths:
+ # If rpath is already dynamic copy it to new_rpath and continue
+ if rpath.find("$ORIGIN") != -1:
+ new_rpaths.append(rpath.strip())
+ continue
+ rpath = os.path.normpath(rpath)
+ if baseprefix not in rpath and tmpdir not in rpath:
+ new_rpaths.append(rpath.strip())
+ continue
+ new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/"))))
+ modified = True
+
+ # if we have modified some rpaths call chrpath to update the binary
+ if modified:
+ args = ":".join(new_rpaths)
+ #bb.note("Setting rpath for %s to %s" %(fpath, args))
+ p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
+ raise bb.build.FuncFailed
+
+def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
+ import subprocess as sub
+
+ p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned successfully, process stderr for results
+ if p.returncode != 0:
+ return
+ for l in err.split("\n"):
+ if "(compatibility" not in l:
+ continue
+ rpath = l.partition("(compatibility")[0].strip()
+ if baseprefix not in rpath:
+ continue
+
+ newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
+ p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+
+def process_dir (rootdir, directory, d):
+ import stat
+
+ rootdir = os.path.normpath(rootdir)
+ cmd = d.expand('${CHRPATH_BIN}')
+ tmpdir = os.path.normpath(d.getVar('TMPDIR', False))
+ baseprefix = os.path.normpath(d.expand('${base_prefix}'))
+ hostos = d.getVar("HOST_OS", True)
+
+ #bb.debug("Checking %s for binaries to process" % directory)
+ if not os.path.exists(directory):
+ return
+
+ if "linux" in hostos:
+ process_file = process_file_linux
+ elif "darwin" in hostos:
+ process_file = process_file_darwin
+ else:
+ # Relocations not supported
+ return
+
+ dirs = os.listdir(directory)
+ for file in dirs:
+ fpath = directory + "/" + file
+ fpath = os.path.normpath(fpath)
+ if os.path.islink(fpath):
+ # Skip symlinks
+ continue
+
+ if os.path.isdir(fpath):
+ process_dir(rootdir, fpath, d)
+ else:
+ #bb.note("Testing %s for relocatability" % fpath)
+
+ # We need read and write permissions for chrpath, if we don't have
+ # them then set them temporarily. Take a copy of the files
+ # permissions so that we can restore them afterwards.
+ perms = os.stat(fpath)[stat.ST_MODE]
+ if os.access(fpath, os.W_OK|os.R_OK):
+ perms = None
+ else:
+ # Temporarily make the file writeable so we can chrpath it
+ os.chmod(fpath, perms|stat.S_IRWXU)
+ process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
+
+ if perms:
+ os.chmod(fpath, perms)
+
+def rpath_replace (path, d):
+ bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
+
+ for bindir in bindirs:
+ #bb.note ("Processing directory " + bindir)
+ directory = path + "/" + bindir
+ process_dir (path, directory, d)
+
diff --git a/import-layers/yocto-poky/meta/classes/clutter.bbclass b/import-layers/yocto-poky/meta/classes/clutter.bbclass
new file mode 100644
index 000000000..167407dfd
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/clutter.bbclass
@@ -0,0 +1,22 @@
+
+def get_minor_dir(v):
+ import re
+ m = re.match("^([0-9]+)\.([0-9]+)", v)
+ return "%s.%s" % (m.group(1), m.group(2))
+
+def get_real_name(n):
+ import re
+ m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
+ return "%s" % (m.group(1))
+
+VERMINOR = "${@get_minor_dir("${PV}")}"
+REALNAME = "${@get_real_name("${BPN}")}"
+
+CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
+
+CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}"
+
+SRC_URI = "${CLUTTER_SRC_FTP}"
+S = "${WORKDIR}/${REALNAME}-${PV}"
+
+inherit autotools pkgconfig gtk-doc gettext
diff --git a/import-layers/yocto-poky/meta/classes/cmake.bbclass b/import-layers/yocto-poky/meta/classes/cmake.bbclass
new file mode 100644
index 000000000..02f313a86
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cmake.bbclass
@@ -0,0 +1,137 @@
+# Path to the CMake file to process.
+OECMAKE_SOURCEPATH ?= "${S}"
+
+DEPENDS_prepend = "cmake-native "
+B = "${WORKDIR}/build"
+
+# We need to unset CCACHE otherwise cmake gets too confused
+CCACHE = ""
+
+# C/C++ Compiler (without cpu arch/tune arguments)
+OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
+OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
+OECMAKE_AR ?= "${AR}"
+
+# Compiler flags
+OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
+OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
+OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
+OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
+OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
+OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
+
+OECMAKE_RPATH ?= ""
+OECMAKE_PERLNATIVE_DIR ??= ""
+OECMAKE_EXTRA_ROOT_PATH ?= ""
+
+OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
+OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
+
+EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+
+# CMake expects target architectures in the format of uname(2),
+# which do not always match TARGET_ARCH, so all the necessary
+# conversions should happen here.
+def map_target_arch_to_uname_arch(target_arch):
+ if target_arch == "powerpc":
+ return "ppc"
+ if target_arch == "powerpc64":
+ return "ppc64"
+ return target_arch
+
+cmake_do_generate_toolchain_file() {
+ cat > ${WORKDIR}/toolchain.cmake <<EOF
+# CMake system name must be something like "Linux".
+# This is important for cross-compiling.
+set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH', True))} )
+set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
+set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
+set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
+set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
+set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
+set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
+set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
+set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
+set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
+set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
+set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
+set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
+
+# only search in the paths provided so cmake doesnt pick
+# up libraries and tools from the native build machine
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
+set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
+set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
+set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
+set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
+
+# Use qt.conf settings
+set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
+
+# We need to set the rpath to the correct directory as cmake does not provide any
+# directory as rpath by default
+set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
+
+# Use native cmake modules
+list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
+
+# add for non /usr/lib libdir, e.g. /usr/lib64
+set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
+
+EOF
+}
+
+addtask generate_toolchain_file after do_patch before do_configure
+
+cmake_do_configure() {
+ if [ "${OECMAKE_BUILDPATH}" ]; then
+ bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
+ fi
+
+ if [ "${S}" != "${B}" ]; then
+ rm -rf ${B}
+ mkdir -p ${B}
+ cd ${B}
+ else
+ find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
+ fi
+
+ # Just like autotools cmake can use a site file to cache result that need generated binaries to run
+ if [ -e ${WORKDIR}/site-file.cmake ] ; then
+ OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
+ else
+ OECMAKE_SITEFILE=""
+ fi
+
+ cmake \
+ ${OECMAKE_SITEFILE} \
+ ${OECMAKE_SOURCEPATH} \
+ -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${libexecdir} \
+ -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${sharedstatedir} \
+ -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${libdir} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${includedir} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${datadir} \
+ -DCMAKE_INSTALL_SO_NO_EXE=0 \
+ -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
+ -DCMAKE_VERBOSE_MAKEFILE=1 \
+ ${EXTRA_OECMAKE} \
+ -Wno-dev
+}
+
+cmake_do_compile() {
+ cd ${B}
+ base_do_compile
+}
+
+cmake_do_install() {
+ cd ${B}
+ oe_runmake 'DESTDIR=${D}' install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/import-layers/yocto-poky/meta/classes/cml1.bbclass b/import-layers/yocto-poky/meta/classes/cml1.bbclass
new file mode 100644
index 000000000..b5dc028a2
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cml1.bbclass
@@ -0,0 +1,76 @@
+cml1_do_configure() {
+ set -e
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake oldconfig
+}
+
+EXPORT_FUNCTIONS do_configure
+addtask configure after do_unpack do_patch before do_compile
+
+inherit terminal
+
+OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
+HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
+HOSTLDFLAGS = "${BUILD_LDFLAGS}"
+CROSS_CURSES_LIB = "-lncurses -ltinfo"
+CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
+TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
+
+KCONFIG_CONFIG_COMMAND ??= "menuconfig"
+python do_menuconfig() {
+ import shutil
+
+ try:
+ mtime = os.path.getmtime(".config")
+ shutil.copy(".config", ".config.orig")
+ except OSError:
+ mtime = 0
+
+ oe_terminal("${SHELL} -c \"make %s; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND', True),
+ d.getVar('PN', True ) + ' Configuration', d)
+
+ # FIXME this check can be removed when the minimum bitbake version has been bumped
+ if hasattr(bb.build, 'write_taint'):
+ try:
+ newmtime = os.path.getmtime(".config")
+ except OSError:
+ newmtime = 0
+
+ if newmtime > mtime:
+ bb.note("Configuration changed, recompile will be forced")
+ bb.build.write_taint('do_compile', d)
+}
+do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
+do_menuconfig[nostamp] = "1"
+addtask menuconfig after do_configure
+
+python do_diffconfig() {
+ import shutil
+ import subprocess
+
+ workdir = d.getVar('WORKDIR', True)
+ fragment = workdir + '/fragment.cfg'
+ configorig = '.config.orig'
+ config = '.config'
+
+ try:
+ md5newconfig = bb.utils.md5_file(configorig)
+ md5config = bb.utils.md5_file(config)
+ isdiff = md5newconfig != md5config
+ except IOError as e:
+ bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
+
+ if isdiff:
+ statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
+ subprocess.call(statement, shell=True)
+
+ shutil.copy(configorig, config)
+
+ bb.plain("Config fragment has been dumped into:\n %s" % fragment)
+ else:
+ if os.path.exists(fragment):
+ os.unlink(fragment)
+}
+
+do_diffconfig[nostamp] = "1"
+addtask diffconfig
diff --git a/import-layers/yocto-poky/meta/classes/compress_doc.bbclass b/import-layers/yocto-poky/meta/classes/compress_doc.bbclass
new file mode 100644
index 000000000..8073c173e
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/compress_doc.bbclass
@@ -0,0 +1,260 @@
+# Compress man pages in ${mandir} and info pages in ${infodir}
+#
+# 1. The doc will be compressed to gz format by default.
+#
+# 2. It will automatically correct the compressed doc which is not
+# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
+# of ${DOC_COMPRESS} policy
+#
+# 3. It is easy to add a new type compression by editing
+# local.conf, such as:
+# DOC_COMPRESS_LIST_append = ' abc'
+# DOC_COMPRESS = 'abc'
+# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
+# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
+
+# All supported compression policy
+DOC_COMPRESS_LIST ?= "gz xz bz2"
+
+# Compression policy, must be one of ${DOC_COMPRESS_LIST}
+DOC_COMPRESS ?= "gz"
+
+# Compression shell command
+DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
+DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
+DOC_COMPRESS_CMD[xz] ?= "xz -v"
+
+# Decompression shell command
+DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
+DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
+DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
+
+PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
+python package_do_compress_doc() {
+ compress_mode = d.getVar('DOC_COMPRESS', True)
+ compress_list = (d.getVar('DOC_COMPRESS_LIST', True) or '').split()
+ if compress_mode not in compress_list:
+ bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
+
+ dvar = d.getVar('PKGD', True)
+ compress_cmds = {}
+ decompress_cmds = {}
+ for mode in compress_list:
+ compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode, True)
+ decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode, True)
+
+ mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True))
+ if os.path.exists(mandir):
+ # Decompress doc files which format is not compress_mode
+ decompress_doc(mandir, compress_mode, decompress_cmds)
+ compress_doc(mandir, compress_mode, compress_cmds)
+
+ infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir", True))
+ if os.path.exists(infodir):
+ # Decompress doc files which format is not compress_mode
+ decompress_doc(infodir, compress_mode, decompress_cmds)
+ compress_doc(infodir, compress_mode, compress_cmds)
+}
+
+def _get_compress_format(file, compress_format_list):
+ for compress_format in compress_format_list:
+ compress_suffix = '.' + compress_format
+ if file.endswith(compress_suffix):
+ return compress_format
+
+ return ''
+
+# Collect hardlinks to dict, each element in dict lists hardlinks
+# which points to the same doc file.
+# {hardlink10: [hardlink11, hardlink12],,,}
+# The hardlink10, hardlink11 and hardlink12 are the same file.
+def _collect_hardlink(hardlink_dict, file):
+ for hardlink in hardlink_dict:
+ # Add to the existed hardlink
+ if os.path.samefile(hardlink, file):
+ hardlink_dict[hardlink].append(file)
+ return hardlink_dict
+
+ hardlink_dict[file] = []
+ return hardlink_dict
+
+def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
+ for target in hardlink_dict:
+ if decompress:
+ compress_format = _get_compress_format(target, shell_cmds.keys())
+ cmd = "%s -f %s" % (shell_cmds[compress_format], target)
+ bb.note('decompress hardlink %s' % target)
+ else:
+ cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
+ bb.note('compress hardlink %s' % target)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ return
+
+ for hardlink_dup in hardlink_dict[target]:
+ if decompress:
+ # Remove compress suffix
+ compress_suffix = '.' + compress_format
+ new_hardlink = hardlink_dup[:-len(compress_suffix)]
+ new_target = target[:-len(compress_suffix)]
+ else:
+ # Append compress suffix
+ compress_suffix = '.' + compress_mode
+ new_hardlink = hardlink_dup + compress_suffix
+ new_target = target + compress_suffix
+
+ bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
+ if not os.path.exists(new_hardlink):
+ os.link(new_target, new_hardlink)
+ if os.path.exists(hardlink_dup):
+ os.unlink(hardlink_dup)
+
+def _process_symlink(file, compress_format, decompress=False):
+ compress_suffix = '.' + compress_format
+ if decompress:
+ # Remove compress suffix
+ new_linkname = file[:-len(compress_suffix)]
+ new_source = os.readlink(file)[:-len(compress_suffix)]
+ else:
+ # Append compress suffix
+ new_linkname = file + compress_suffix
+ new_source = os.readlink(file) + compress_suffix
+
+ bb.note('symlink %s-->%s' % (new_linkname, new_source))
+ if not os.path.exists(new_linkname):
+ os.symlink(new_source, new_linkname)
+
+ os.unlink(file)
+
+def _is_info(file):
+ flags = '.info .info-'.split()
+ for flag in flags:
+ if flag in os.path.basename(file):
+ return True
+
+ return False
+
+def _is_man(file):
+ import re
+
+ # It refers MANSECT-var in man(1.6g)'s man.config
+ # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
+ # Not start with '.', and contain the above colon-seperate element
+ p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
+ if p.search(file):
+ return True
+
+ return False
+
+def _is_compress_doc(file, compress_format_list):
+ compress_format = _get_compress_format(file, compress_format_list)
+ compress_suffix = '.' + compress_format
+ if file.endswith(compress_suffix):
+ # Remove the compress suffix
+ uncompress_file = file[:-len(compress_suffix)]
+ if _is_info(uncompress_file) or _is_man(uncompress_file):
+ return True, compress_format
+
+ return False, ''
+
+def compress_doc(topdir, compress_mode, compress_cmds):
+ hardlink_dict = {}
+ for root, dirs, files in os.walk(topdir):
+ for f in files:
+ file = os.path.join(root, f)
+ if os.path.isdir(file):
+ continue
+
+ if _is_info(file) or _is_man(file):
+ # Symlink
+ if os.path.islink(file):
+ _process_symlink(file, compress_mode)
+ # Hardlink
+ elif os.lstat(file).st_nlink > 1:
+ _collect_hardlink(hardlink_dict, file)
+ # Normal file
+ elif os.path.isfile(file):
+ cmd = "%s %s" % (compress_cmds[compress_mode], file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ continue
+ bb.note('compress file %s' % file)
+
+ _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
+
+# Decompress doc files which format is not compress_mode
+def decompress_doc(topdir, compress_mode, decompress_cmds):
+ hardlink_dict = {}
+ decompress = True
+ for root, dirs, files in os.walk(topdir):
+ for f in files:
+ file = os.path.join(root, f)
+ if os.path.isdir(file):
+ continue
+
+ res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
+ # Decompress files which format is not compress_mode
+ if res and compress_mode!=compress_format:
+ # Symlink
+ if os.path.islink(file):
+ _process_symlink(file, compress_format, decompress)
+ # Hardlink
+ elif os.lstat(file).st_nlink > 1:
+ _collect_hardlink(hardlink_dict, file)
+ # Normal file
+ elif os.path.isfile(file):
+ cmd = "%s %s" % (decompress_cmds[compress_format], file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ continue
+ bb.note('decompress file %s' % file)
+
+ _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
+
+python compress_doc_updatealternatives () {
+ if not bb.data.inherits_class('update-alternatives', d):
+ return
+
+ mandir = d.getVar("mandir", True)
+ infodir = d.getVar("infodir", True)
+ compress_mode = d.getVar('DOC_COMPRESS', True)
+ for pkg in (d.getVar('PACKAGES', True) or "").split():
+ old_names = (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split()
+ new_names = []
+ for old_name in old_names:
+ old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name, True)
+ old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', old_name, True) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or \
+ d.getVar('ALTERNATIVE_TARGET', True) or \
+ old_link
+ # Sometimes old_target is specified as relative to the link name.
+ old_target = os.path.join(os.path.dirname(old_link), old_target)
+
+ # The updatealternatives used for compress doc
+ if mandir in old_target or infodir in old_target:
+ new_name = old_name + '.' + compress_mode
+ new_link = old_link + '.' + compress_mode
+ new_target = old_target + '.' + compress_mode
+ d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
+ d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
+ if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True):
+ d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
+ d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
+ elif d.getVarFlag('ALTERNATIVE_TARGET', old_name, True):
+ d.delVarFlag('ALTERNATIVE_TARGET', old_name)
+ d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
+ elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True):
+ d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
+ elif d.getVar('ALTERNATIVE_TARGET', old_name, True):
+ d.setVar('ALTERNATIVE_TARGET', new_target)
+
+ new_names.append(new_name)
+
+ if new_names:
+ d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass b/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass
new file mode 100644
index 000000000..907c1836b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass
@@ -0,0 +1,64 @@
+# Deploy sources for recipes for compliance with copyleft-style licenses
+# Defaults to using symlinks, as it's a quick operation, and one can easily
+# follow the links when making use of the files (e.g. tar with the -h arg).
+#
+# vi:sts=4:sw=4:et
+
+inherit copyleft_filter
+
+COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
+
+python do_prepare_copyleft_sources () {
+ """Populate a tree of the recipe sources and emit patch series files"""
+ import os.path
+ import shutil
+
+ p = d.getVar('P', True)
+ included, reason = copyleft_should_include(d)
+ if not included:
+ bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
+ return
+ else:
+ bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
+
+ sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
+ dl_dir = d.getVar('DL_DIR', True)
+ src_uri = d.getVar('SRC_URI', True).split()
+ fetch = bb.fetch2.Fetch(src_uri, d)
+ ud = fetch.ud
+
+ pf = d.getVar('PF', True)
+ dest = os.path.join(sources_dir, pf)
+ shutil.rmtree(dest, ignore_errors=True)
+ bb.utils.mkdirhier(dest)
+
+ for u in ud.values():
+ local = os.path.normpath(fetch.localpath(u.url))
+ if local.endswith('.bb'):
+ continue
+ elif local.endswith('/'):
+ local = local[:-1]
+
+ if u.mirrortarball:
+ tarball_path = os.path.join(dl_dir, u.mirrortarball)
+ if os.path.exists(tarball_path):
+ local = tarball_path
+
+ oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
+
+ patches = src_patches(d)
+ for patch in patches:
+ _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
+ else:
+ series = os.path.join(dest, 'series')
+
+ with open(series, 'a') as s:
+ s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
+}
+
+addtask prepare_copyleft_sources after do_fetch before do_build
+do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
+do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass b/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass
new file mode 100644
index 000000000..46be7f7d2
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass
@@ -0,0 +1,79 @@
+# Filter the license, the copyleft_should_include returns True for the
+# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
+# COPYLEFT_LICENSE_EXCLUDE.
+#
+# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
+#
+# vi:sts=4:sw=4:et
+
+COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
+COPYLEFT_LICENSE_INCLUDE[type] = 'list'
+COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
+
+COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
+COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
+COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses'
+
+COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
+COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
+
+COPYLEFT_RECIPE_TYPES ?= 'target'
+COPYLEFT_RECIPE_TYPES[type] = 'list'
+COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
+
+COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
+COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
+COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
+
+COPYLEFT_PN_INCLUDE ?= ''
+COPYLEFT_PN_INCLUDE[type] = 'list'
+COPYLEFT_PN_INCLUDE[doc] = 'Space separated list of recipe names to include'
+
+COPYLEFT_PN_EXCLUDE ?= ''
+COPYLEFT_PN_EXCLUDE[type] = 'list'
+COPYLEFT_PN_EXCLUDE[doc] = 'Space separated list of recipe names to exclude'
+
+def copyleft_recipe_type(d):
+ for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
+ if oe.utils.inherits(d, recipe_type):
+ return recipe_type
+ return 'target'
+
+def copyleft_should_include(d):
+ """
+ Determine if this recipe's sources should be deployed for compliance
+ """
+ import ast
+ import oe.license
+ from fnmatch import fnmatchcase as fnmatch
+
+ included, motive = False, 'recipe did not match anything'
+
+ recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
+ if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
+ include, motive = False, 'recipe type "%s" is excluded' % recipe_type
+
+ include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
+ exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
+
+ try:
+ is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
+ else:
+ if is_included:
+ if reason:
+ included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
+ else:
+ included, motive = False, 'recipe does not include a copyleft license'
+ else:
+ included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
+
+ if any(fnmatch(d.getVar('PN', True), name) \
+ for name in oe.data.typed_value('COPYLEFT_PN_INCLUDE', d)):
+ included, motive = True, 'recipe included by name'
+ if any(fnmatch(d.getVar('PN', True), name) \
+ for name in oe.data.typed_value('COPYLEFT_PN_EXCLUDE', d)):
+ included, motive = False, 'recipe excluded by name'
+
+ return included, motive
diff --git a/import-layers/yocto-poky/meta/classes/core-image.bbclass b/import-layers/yocto-poky/meta/classes/core-image.bbclass
new file mode 100644
index 000000000..705cad8d9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/core-image.bbclass
@@ -0,0 +1,71 @@
+# Common code for generating core reference images
+#
+# Copyright (C) 2007-2011 Linux Foundation
+
+LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d690 \
+ file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
+# IMAGE_FEATURES control content of the core reference images
+#
+# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
+# this gives us working (console only) rootfs.
+#
+# Available IMAGE_FEATURES:
+#
+# - x11 - X server
+# - x11-base - X server with minimal environment
+# - x11-sato - OpenedHand Sato environment
+# - tools-debug - debugging tools
+# - eclipse-debug - Eclipse remote debugging support
+# - tools-profile - profiling tools
+# - tools-testapps - tools usable to make some device tests
+# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
+# - nfs-server - NFS server
+# - nfs-client - NFS client
+# - ssh-server-dropbear - SSH server (dropbear)
+# - ssh-server-openssh - SSH server (openssh)
+# - hwcodecs - Install hardware acceleration codecs
+# - package-management - installs package management tools and preserves the package manager database
+# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
+# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
+# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
+# - doc-pkgs - documentation packages for all installed packages in the rootfs
+# - ptest-pkgs - ptest packages for all ptest-enabled recipes
+# - read-only-rootfs - tweaks an image to support read-only rootfs
+#
+FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
+FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
+FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
+FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
+FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
+FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
+FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
+FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
+FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
+FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
+FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
+FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
+FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
+
+
+# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
+# Including image feature foo would replace the image features bar1 and bar2
+IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
+
+# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
+# An error exception would be raised if both image features foo and bar1(or bar2) are included
+
+MACHINE_HWCODECS ??= ""
+
+CORE_IMAGE_BASE_INSTALL = '\
+ packagegroup-core-boot \
+ packagegroup-base-extended \
+ \
+ ${CORE_IMAGE_EXTRA_INSTALL} \
+ '
+
+CORE_IMAGE_EXTRA_INSTALL ?= ""
+
+IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
+
+inherit image
diff --git a/import-layers/yocto-poky/meta/classes/cpan-base.bbclass b/import-layers/yocto-poky/meta/classes/cpan-base.bbclass
new file mode 100644
index 000000000..55ac05269
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cpan-base.bbclass
@@ -0,0 +1,40 @@
+#
+# cpan-base providers various perl related information needed for building
+# cpan modules
+#
+FILES_${PN} += "${libdir}/perl ${datadir}/perl"
+
+DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
+RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
+
+PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
+
+# Determine the staged version of perl from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
+def get_perl_version(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile("^version='(\d*\.\d*\.\d*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+def is_target(d):
+ if not bb.data.inherits_class('native', d):
+ return "yes"
+ return "no"
+
+PERLLIBDIRS = "${libdir}/perl"
+PERLLIBDIRS_class-native = "${libdir}/perl-native"
+PERLVERSION := "${@get_perl_version(d)}"
+PERLVERSION[vardepvalue] = ""
diff --git a/import-layers/yocto-poky/meta/classes/cpan.bbclass b/import-layers/yocto-poky/meta/classes/cpan.bbclass
new file mode 100644
index 000000000..8e079e0d5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cpan.bbclass
@@ -0,0 +1,55 @@
+#
+# This is for perl modules that use the old Makefile.PL build system
+#
+inherit cpan-base perlnative
+
+EXTRA_CPANFLAGS ?= ""
+EXTRA_PERLFLAGS ?= ""
+
+# Env var which tells perl if it should use host (no) or target (yes) settings
+export PERLCONFIGTARGET = "${@is_target(d)}"
+
+# Env var which tells perl where the perl include files are
+export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
+export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
+
+cpan_do_configure () {
+ export PERL5LIB="${PERL_ARCHLIB}"
+ yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor ${EXTRA_CPANFLAGS}
+
+ # Makefile.PLs can exit with success without generating a
+ # Makefile, e.g. in cases of missing configure time
+ # dependencies. This is considered a best practice by
+ # cpantesters.org. See:
+ # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
+ # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
+ [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
+
+ if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
+ . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
+ # Use find since there can be a Makefile generated for each Makefile.PL
+ for f in `find -name Makefile.PL`; do
+ f2=`echo $f | sed -e 's/.PL//'`
+ test -f $f2 || continue
+ sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
+ -e 's/perl.real/perl/' \
+ -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
+ $f2
+ done
+ fi
+}
+
+cpan_do_compile () {
+ oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
+}
+
+cpan_do_install () {
+ oe_runmake DESTDIR="${D}" install_vendor
+ for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
+ sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
+ done
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/cpan_build.bbclass b/import-layers/yocto-poky/meta/classes/cpan_build.bbclass
new file mode 100644
index 000000000..fac074d61
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cpan_build.bbclass
@@ -0,0 +1,40 @@
+#
+# This is for perl modules that use the new Build.PL build system
+#
+inherit cpan-base perlnative
+
+EXTRA_CPAN_BUILD_FLAGS ?= ""
+
+# Env var which tells perl if it should use host (no) or target (yes) settings
+export PERLCONFIGTARGET = "${@is_target(d)}"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
+export LD = "${CCLD}"
+
+cpan_build_do_configure () {
+ if [ "${@is_target(d)}" = "yes" ]; then
+ # build for target
+ . ${STAGING_LIBDIR}/perl/config.sh
+ fi
+
+ perl Build.PL --installdirs vendor --destdir ${D} \
+ ${EXTRA_CPAN_BUILD_FLAGS}
+
+ # Build.PLs can exit with success without generating a
+ # Build, e.g. in cases of missing configure time
+ # dependencies. This is considered a best practice by
+ # cpantesters.org. See:
+ # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
+ # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
+ [ -e Build ] || bbfatal "No Build was generated by Build.PL"
+}
+
+cpan_build_do_compile () {
+ perl Build verbose=1
+}
+
+cpan_build_do_install () {
+ perl Build install --destdir ${D}
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
new file mode 100644
index 000000000..e07b1bdb6
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -0,0 +1,182 @@
+#
+# NOTE - When using this class the user is responsible for ensuring that
+# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
+# is changed, another nativesdk xxx-canadian-cross can be installed
+#
+
+
+# SDK packages are built either explicitly by the user,
+# or indirectly via dependency. No need to be in 'world'.
+EXCLUDE_FROM_WORLD = "1"
+CLASSOVERRIDE = "class-cross-canadian"
+STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+#
+# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
+#
+PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
+CANADIANEXTRAOS = "linux-uclibc linux-musl"
+CANADIANEXTRAVENDOR = ""
+MODIFYTOS ??= "1"
+python () {
+ archs = d.getVar('PACKAGE_ARCHS', True).split()
+ sdkarchs = []
+ for arch in archs:
+ sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
+ d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
+
+ # Allow the following code segment to be disabled, e.g. meta-environment
+ if d.getVar("MODIFYTOS", True) != "1":
+ return
+
+ if d.getVar("TCLIBC", True) == "baremetal":
+ return
+
+ tos = d.getVar("TARGET_OS", True)
+ whitelist = []
+ for variant in ["", "spe", "x32", "eabi", "n32"]:
+ for libc in ["", "uclibc", "musl"]:
+ entry = "linux"
+ if variant and libc:
+ entry = entry + "-" + libc + variant
+ elif variant:
+ entry = entry + "-gnu" + variant
+ elif libc:
+ entry = entry + "-" + libc
+ whitelist.append(entry)
+ if tos not in whitelist:
+ bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS", True))
+
+ for n in ["PROVIDES", "DEPENDS"]:
+ d.setVar(n, d.getVar(n, True))
+ d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN", True))
+ for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
+ n = prefix + "_FOR_TARGET"
+ d.setVar(n, d.getVar(n, True))
+ # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
+ # however we need the old value in some variables. We expand those here first.
+ tarch = d.getVar("TARGET_ARCH", True)
+ if tarch == "x86_64":
+ d.setVar("LIBCEXTENSION", "")
+ d.setVar("ABIEXTENSION", "")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnux32 linux-uclibcx32 linux-muslx32")
+ elif tarch == "powerpc":
+ # PowerPC can build "linux" and "linux-gnuspe"
+ d.setVar("LIBCEXTENSION", "")
+ d.setVar("ABIEXTENSION", "")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnuspe linux-uclibcspe linux-muslspe")
+ elif tarch == "mips64":
+ d.appendVar("CANADIANEXTRAOS", " linux-gnun32 linux-uclibcn32 linux-musln32")
+ if tarch == "arm" or tarch == "armeb":
+ d.setVar("TARGET_OS", "linux-gnueabi")
+ else:
+ d.setVar("TARGET_OS", "linux")
+
+ # Also need to handle multilib target vendors
+ vendors = d.getVar("CANADIANEXTRAVENDOR", True)
+ if not vendors:
+ vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
+ origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL", True)
+ if origvendor:
+ d.setVar("TARGET_VENDOR", origvendor)
+ if origvendor not in vendors.split():
+ vendors = origvendor + " " + vendors
+ d.setVar("CANADIANEXTRAVENDOR", vendors)
+}
+MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
+
+TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
+
+PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
+PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
+
+HOST_ARCH = "${SDK_ARCH}"
+HOST_VENDOR = "${SDK_VENDOR}"
+HOST_OS = "${SDK_OS}"
+HOST_PREFIX = "${SDK_PREFIX}"
+HOST_CC_ARCH = "${SDK_CC_ARCH}"
+HOST_LD_ARCH = "${SDK_LD_ARCH}"
+HOST_AS_ARCH = "${SDK_AS_ARCH}"
+
+#assign DPKG_ARCH
+DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH', True), '')}"
+
+CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+CFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CFLAGS}"
+LDFLAGS = "${BUILDSDK_LDFLAGS} \
+ -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
+ -Wl,-rpath,${libdir}/.. "
+
+DEPENDS_GETTEXT = "gettext-native nativesdk-gettext"
+
+#
+# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
+# binaries
+#
+DEPENDS_append = " chrpath-replacement-native"
+EXTRANATIVEPATH += "chrpath-native"
+
+# Path mangling needed by the cross packaging
+# Note that we use := here to ensure that libdir and includedir are
+# target paths.
+target_base_prefix := "${base_prefix}"
+target_prefix := "${prefix}"
+target_exec_prefix := "${exec_prefix}"
+target_base_libdir = "${target_base_prefix}/${baselib}"
+target_libdir = "${target_exec_prefix}/${baselib}"
+target_includedir := "${includedir}"
+
+# Change to place files in SDKPATH
+base_prefix = "${SDKPATHNATIVE}"
+prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+sbindir = "${bindir}"
+base_bindir = "${bindir}"
+base_sbindir = "${bindir}"
+libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+FILES_${PN} = "${prefix}"
+
+export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_packagedata[stamp-extra-info] = ""
+
+USE_NLS = "${SDKUSE_NLS}"
+
+# We have to us TARGET_ARCH but we care about the absolute value
+# and not any particular tune that is enabled.
+TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
+
+# If MLPREFIX is set by multilib code, shlibs
+# points to the wrong place so force it
+SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
+SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
+
+cross_canadian_bindirlinks () {
+ for i in linux ${CANADIANEXTRAOS}
+ do
+ for v in ${CANADIANEXTRAVENDOR}
+ do
+ d=${D}${bindir}/../${TARGET_ARCH}$v-$i
+ if [ -d $d ];
+ then
+ continue
+ fi
+ install -d $d
+ for j in `ls ${D}${bindir}`
+ do
+ p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
+ ln -s ../${TARGET_SYS}/$j $d/$p
+ done
+ done
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/cross.bbclass b/import-layers/yocto-poky/meta/classes/cross.bbclass
new file mode 100644
index 000000000..81d1c9d85
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cross.bbclass
@@ -0,0 +1,71 @@
+inherit relocatable
+
+# Cross packages are built indirectly via dependency,
+# no need for them to be a direct target of 'world'
+EXCLUDE_FROM_WORLD = "1"
+
+CLASSOVERRIDE = "class-cross"
+PACKAGES = ""
+PACKAGES_DYNAMIC = ""
+PACKAGES_DYNAMIC_class-native = ""
+
+HOST_ARCH = "${BUILD_ARCH}"
+HOST_VENDOR = "${BUILD_VENDOR}"
+HOST_OS = "${BUILD_OS}"
+HOST_PREFIX = "${BUILD_PREFIX}"
+HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+HOST_LD_ARCH = "${BUILD_LD_ARCH}"
+HOST_AS_ARCH = "${BUILD_AS_ARCH}"
+
+STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
+
+PACKAGE_ARCH = "${BUILD_ARCH}"
+
+MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
+
+export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = ""
+
+CPPFLAGS = "${BUILD_CPPFLAGS}"
+CFLAGS = "${BUILD_CFLAGS}"
+CXXFLAGS = "${BUILD_CFLAGS}"
+LDFLAGS = "${BUILD_LDFLAGS}"
+LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
+
+TOOLCHAIN_OPTIONS = ""
+
+DEPENDS_GETTEXT = "gettext-native"
+
+# Path mangling needed by the cross packaging
+# Note that we use := here to ensure that libdir and includedir are
+# target paths.
+target_base_prefix := "${base_prefix}"
+target_prefix := "${prefix}"
+target_exec_prefix := "${exec_prefix}"
+target_base_libdir = "${target_base_prefix}/${baselib}"
+target_libdir = "${target_exec_prefix}/${baselib}"
+target_includedir := "${includedir}"
+
+# Overrides for paths
+CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
+prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+base_prefix = "${STAGING_DIR_NATIVE}"
+exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
+sbindir = "${bindir}"
+base_bindir = "${bindir}"
+base_sbindir = "${bindir}"
+libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
+libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
+
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
+do_populate_sysroot[stamp-extra-info] = ""
+do_packagedata[stamp-extra-info] = ""
+
+do_install () {
+ oe_runmake 'DESTDIR=${D}' install
+}
+
+USE_NLS = "no"
+
+inherit nopackages
diff --git a/import-layers/yocto-poky/meta/classes/crosssdk.bbclass b/import-layers/yocto-poky/meta/classes/crosssdk.bbclass
new file mode 100644
index 000000000..7315c38f1
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/crosssdk.bbclass
@@ -0,0 +1,36 @@
+inherit cross
+
+CLASSOVERRIDE = "class-crosssdk"
+MACHINEOVERRIDES = ""
+PACKAGE_ARCH = "${SDK_ARCH}"
+python () {
+ # set TUNE_PKGARCH to SDK_ARCH
+ d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
+}
+
+STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}"
+STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+TARGET_ARCH = "${SDK_ARCH}"
+TARGET_VENDOR = "${SDK_VENDOR}"
+TARGET_OS = "${SDK_OS}"
+TARGET_PREFIX = "${SDK_PREFIX}"
+TARGET_CC_ARCH = "${SDK_CC_ARCH}"
+TARGET_LD_ARCH = "${SDK_LD_ARCH}"
+TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_FPU = ""
+
+target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
+target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
+target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
+target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+baselib = "lib"
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_packagedata[stamp-extra-info] = ""
+
+# Need to force this to ensure consitency across architectures
+EXTRA_OECONF_GCC_FLOAT = ""
+
+USE_NLS = "no"
diff --git a/import-layers/yocto-poky/meta/classes/debian.bbclass b/import-layers/yocto-poky/meta/classes/debian.bbclass
new file mode 100644
index 000000000..be7cacca9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/debian.bbclass
@@ -0,0 +1,141 @@
+# Debian package renaming only occurs when a package is built
+# We therefore have to make sure we build all runtime packages
+# before building the current package to make the packages runtime
+# depends are correct
+#
+# Custom library package names can be defined setting
+# DEBIANNAME_ + pkgname to the desired name.
+#
+# Better expressed as ensure all RDEPENDS package before we package
+# This means we can't have circular RDEPENDS/RRECOMMENDS
+
+AUTO_LIBNAME_PKGS = "${PACKAGES}"
+
+inherit package
+
+DEBIANRDEP = "do_packagedata"
+do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
+do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
+do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
+do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
+
+python () {
+ if not d.getVar("PACKAGES", True):
+ d.setVar("DEBIANRDEP", "")
+}
+
+python debian_package_name_hook () {
+ import glob, copy, stat, errno, re
+
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES', True)
+ bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
+ lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
+ so_re = re.compile("lib.*\.so")
+
+ def socrunch(s):
+ s = s.lower().replace('_', '-')
+ m = re.match("^(.*)(.)\.so\.(.*)$", s)
+ if m is None:
+ return None
+ if m.group(2) in '0123456789':
+ bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
+ else:
+ bin = m.group(1) + m.group(2) + m.group(3)
+ dev = m.group(1) + m.group(2)
+ return (bin, dev)
+
+ def isexec(path):
+ try:
+ s = os.stat(path)
+ except (os.error, AttributeError):
+ return 0
+ return (s[stat.ST_MODE] & stat.S_IEXEC)
+
+ def add_rprovides(pkg, d):
+ newpkg = d.getVar('PKG_' + pkg, True)
+ if newpkg and newpkg != pkg:
+ provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split()
+ if pkg not in provs:
+ d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV", True) + ")")
+
+ def auto_libname(packages, orig_pkg):
+ sonames = []
+ has_bins = 0
+ has_libs = 0
+ for file in pkgfiles[orig_pkg]:
+ root = os.path.dirname(file)
+ if bin_re.match(root):
+ has_bins = 1
+ if lib_re.match(root):
+ has_libs = 1
+ if so_re.match(os.path.basename(file)):
+ cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ m = re.match("\s+SONAME\s+([^\s]*)", l)
+ if m and not m.group(1) in sonames:
+ sonames.append(m.group(1))
+
+ bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
+ soname = None
+ if len(sonames) == 1:
+ soname = sonames[0]
+ elif len(sonames) > 1:
+ lead = d.getVar('LEAD_SONAME', True)
+ if lead:
+ r = re.compile(lead)
+ filtered = []
+ for s in sonames:
+ if r.match(s):
+ filtered.append(s)
+ if len(filtered) == 1:
+ soname = filtered[0]
+ elif len(filtered) > 1:
+ bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
+ else:
+ bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
+ else:
+ bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
+
+ if has_libs and not has_bins and soname:
+ soname_result = socrunch(soname)
+ if soname_result:
+ (pkgname, devname) = soname_result
+ for pkg in packages.split():
+ if (d.getVar('PKG_' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME_' + pkg, False)):
+ add_rprovides(pkg, d)
+ continue
+ debian_pn = d.getVar('DEBIANNAME_' + pkg, False)
+ if debian_pn:
+ newpkg = debian_pn
+ elif pkg == orig_pkg:
+ newpkg = pkgname
+ else:
+ newpkg = pkg.replace(orig_pkg, devname, 1)
+ mlpre=d.getVar('MLPREFIX', True)
+ if mlpre:
+ if not newpkg.find(mlpre) == 0:
+ newpkg = mlpre + newpkg
+ if newpkg != pkg:
+ d.setVar('PKG_' + pkg, newpkg)
+ add_rprovides(pkg, d)
+ else:
+ add_rprovides(orig_pkg, d)
+
+ # reversed sort is needed when some package is substring of another
+ # ie in ncurses we get without reverse sort:
+ # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
+ # and later
+ # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
+ # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
+ for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
+ auto_libname(packages, pkg)
+}
+
+EXPORT_FUNCTIONS package_name_hook
+
+DEBIAN_NAMES = "1"
+
diff --git a/import-layers/yocto-poky/meta/classes/deploy.bbclass b/import-layers/yocto-poky/meta/classes/deploy.bbclass
new file mode 100644
index 000000000..8ad07da01
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/deploy.bbclass
@@ -0,0 +1,11 @@
+DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
+SSTATETASKS += "do_deploy"
+do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
+do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
+
+python do_deploy_setscene () {
+ sstate_setscene(d)
+}
+addtask do_deploy_setscene
+do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[stamp-extra-info] = "${MACHINE}"
diff --git a/import-layers/yocto-poky/meta/classes/devshell.bbclass b/import-layers/yocto-poky/meta/classes/devshell.bbclass
new file mode 100644
index 000000000..341d9c000
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/devshell.bbclass
@@ -0,0 +1,156 @@
+inherit terminal
+
+DEVSHELL = "${SHELL}"
+
+python do_devshell () {
+ if d.getVarFlag("do_devshell", "manualfakeroot", True):
+ d.prependVar("DEVSHELL", "pseudo ")
+ fakeenv = d.getVar("FAKEROOTENV", True).split()
+ for f in fakeenv:
+ k = f.split("=")
+ d.setVar(k[0], k[1])
+ d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
+ d.delVarFlag("do_devshell", "fakeroot")
+
+ oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d)
+}
+
+addtask devshell after do_patch
+
+# The directory that the terminal starts in
+DEVSHELL_STARTDIR ?= "${S}"
+do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
+do_devshell[nostamp] = "1"
+
+# devshell and fakeroot/pseudo need careful handling since only the final
+# command should run under fakeroot emulation, any X connection should
+# be done as the normal user. We therfore carefully construct the envionment
+# manually
+python () {
+ if d.getVarFlag("do_devshell", "fakeroot", True):
+ # We need to signal our code that we want fakeroot however we
+ # can't manipulate the environment and variables here yet (see YOCTO #4795)
+ d.setVarFlag("do_devshell", "manualfakeroot", "1")
+ d.delVarFlag("do_devshell", "fakeroot")
+}
+
+def devpyshell(d):
+
+ import code
+ import select
+ import signal
+ import termios
+
+ m, s = os.openpty()
+ sname = os.ttyname(s)
+
+ def noechoicanon(fd):
+ old = termios.tcgetattr(fd)
+ old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
+ # &~ termios.ISIG
+ termios.tcsetattr(fd, termios.TCSADRAIN, old)
+
+ # No echo or buffering over the pty
+ noechoicanon(s)
+
+ pid = os.fork()
+ if pid:
+ os.close(m)
+ oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
+ os._exit(0)
+ else:
+ os.close(s)
+
+ os.dup2(m, sys.stdin.fileno())
+ os.dup2(m, sys.stdout.fileno())
+ os.dup2(m, sys.stderr.fileno())
+
+ sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
+ sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0)
+
+ bb.utils.nonblockingfd(sys.stdout)
+ bb.utils.nonblockingfd(sys.stderr)
+ bb.utils.nonblockingfd(sys.stdin)
+
+ _context = {
+ "os": os,
+ "bb": bb,
+ "time": time,
+ "d": d,
+ }
+
+ ps1 = "pydevshell> "
+ ps2 = "... "
+ buf = []
+ more = False
+
+ i = code.InteractiveInterpreter(locals=_context)
+ print("OE PyShell (PN = %s)\n" % d.getVar("PN", True))
+
+ def prompt(more):
+ if more:
+ prompt = ps2
+ else:
+ prompt = ps1
+ sys.stdout.write(prompt)
+
+ # Restore Ctrl+C since bitbake masks this
+ def signal_handler(signal, frame):
+ raise KeyboardInterrupt
+ signal.signal(signal.SIGINT, signal_handler)
+
+ child = None
+
+ prompt(more)
+ while True:
+ try:
+ try:
+ (r, _, _) = select.select([sys.stdin], [], [], 1)
+ if not r:
+ continue
+ line = sys.stdin.readline().strip()
+ if not line:
+ prompt(more)
+ continue
+ except EOFError as e:
+ sys.stdout.write("\n")
+ except (OSError, IOError) as e:
+ if e.errno == 11:
+ continue
+ if e.errno == 5:
+ return
+ raise
+ else:
+ if not child:
+ child = int(line)
+ continue
+ buf.append(line)
+ source = "\n".join(buf)
+ more = i.runsource(source, "<pyshell>")
+ if not more:
+ buf = []
+ prompt(more)
+ except KeyboardInterrupt:
+ i.write("\nKeyboardInterrupt\n")
+ buf = []
+ more = False
+ prompt(more)
+ except SystemExit:
+ # Easiest way to ensure everything exits
+ os.kill(child, signal.SIGTERM)
+ break
+
+python do_devpyshell() {
+ import signal
+
+ try:
+ devpyshell(d)
+ except SystemExit:
+ # Stop the SIGTERM above causing an error exit code
+ return
+ finally:
+ return
+}
+addtask devpyshell after do_patch
+
+do_devpyshell[nostamp] = "1"
diff --git a/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass b/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
new file mode 100644
index 000000000..7e91dbcf4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
@@ -0,0 +1,37 @@
+# Allow checking of required and conflicting DISTRO_FEATURES
+#
+# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
+# in DISTRO_FEATURES.
+# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
+# in DISTRO_FEATURES.
+# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
+# DISTRO_FEATURES.
+#
+# Copyright 2013 (C) O.S. Systems Software LTDA.
+
+python () {
+ # Assume at least one var is set.
+ distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
+
+ any_of_distro_features = d.getVar('ANY_OF_DISTRO_FEATURES', True)
+ if any_of_distro_features:
+ any_of_distro_features = any_of_distro_features.split()
+ if set.isdisjoint(set(any_of_distro_features),set(distro_features)):
+ raise bb.parse.SkipPackage("one of '%s' needs to be in DISTRO_FEATURES" % any_of_distro_features)
+
+ required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True)
+ if required_distro_features:
+ required_distro_features = required_distro_features.split()
+ for f in required_distro_features:
+ if f in distro_features:
+ continue
+ else:
+ raise bb.parse.SkipPackage("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
+
+ conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True)
+ if conflict_distro_features:
+ conflict_distro_features = conflict_distro_features.split()
+ for f in conflict_distro_features:
+ if f in distro_features:
+ raise bb.parse.SkipPackage("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f)
+}
diff --git a/import-layers/yocto-poky/meta/classes/distrodata.bbclass b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
new file mode 100644
index 000000000..51bfc1e54
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
@@ -0,0 +1,480 @@
+include conf/distro/include/package_regex.inc
+include conf/distro/include/upstream_tracking.inc
+include conf/distro/include/distro_alias.inc
+include conf/distro/include/maintainers.inc
+
+addhandler distro_eventhandler
+distro_eventhandler[eventmask] = "bb.event.BuildStarted"
+python distro_eventhandler() {
+ import oe.distro_check as dc
+ import csv
+ logfile = dc.create_log_file(e.data, "distrodata.csv")
+
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ with open(logfile, "a") as f:
+ writer = csv.writer(f)
+ writer.writerow(['Package', 'Description', 'Owner', 'License',
+ 'VerMatch', 'Version', 'Upstream', 'Reason', 'Recipe Status',
+ 'Distro 1', 'Distro 2', 'Distro 3'])
+ f.close()
+ bb.utils.unlockfile(lf)
+
+ return
+}
+
+addtask distrodata_np
+do_distrodata_np[nostamp] = "1"
+python do_distrodata_np() {
+ localdata = bb.data.createCopy(d)
+ pn = d.getVar("PN", True)
+ bb.note("Package Name: %s" % pn)
+
+ import oe.distro_check as dist_check
+ tmpdir = d.getVar('TMPDIR', True)
+ distro_check_dir = os.path.join(tmpdir, "distro_check")
+ datetime = localdata.getVar('DATETIME', True)
+ dist_check.update_distro_data(distro_check_dir, datetime, localdata)
+
+ if pn.find("-native") != -1:
+ pnstripped = pn.split("-native")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-cross") != -1:
+ pnstripped = pn.split("-cross")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-crosssdk") != -1:
+ pnstripped = pn.split("-crosssdk")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.startswith("nativesdk-"):
+ pnstripped = pn.replace("nativesdk-", "")
+ bb.note("NativeSDK Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+
+ if pn.find("-initial") != -1:
+ pnstripped = pn.split("-initial")
+ bb.note("initial Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ """generate package information from .bb file"""
+ pname = localdata.getVar('PN', True)
+ pcurver = localdata.getVar('PV', True)
+ pdesc = localdata.getVar('DESCRIPTION', True)
+ if pdesc is not None:
+ pdesc = pdesc.replace(',','')
+ pdesc = pdesc.replace('\n','')
+
+ pgrp = localdata.getVar('SECTION', True)
+ plicense = localdata.getVar('LICENSE', True).replace(',','_')
+
+ rstatus = localdata.getVar('RECIPE_COLOR', True)
+ if rstatus is not None:
+ rstatus = rstatus.replace(',','')
+
+ pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
+ if pcurver == pupver:
+ vermatch="1"
+ else:
+ vermatch="0"
+ noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
+ if noupdate_reason is None:
+ noupdate="0"
+ else:
+ noupdate="1"
+ noupdate_reason = noupdate_reason.replace(',','')
+
+ maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
+ result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
+
+ bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
+ (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
+ line = pn
+ for i in result:
+ line = line + "," + i
+ bb.note("%s\n" % line)
+}
+do_distrodata_np[vardepsexclude] = "DATETIME"
+
+addtask distrodata
+do_distrodata[nostamp] = "1"
+python do_distrodata() {
+ import csv
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ logfile = os.path.join(logpath, "distrodata.csv")
+
+ import oe.distro_check as dist_check
+ localdata = bb.data.createCopy(d)
+ tmpdir = d.getVar('TMPDIR', True)
+ distro_check_dir = os.path.join(tmpdir, "distro_check")
+ datetime = localdata.getVar('DATETIME', True)
+ dist_check.update_distro_data(distro_check_dir, datetime, localdata)
+
+ pn = d.getVar("PN", True)
+ bb.note("Package Name: %s" % pn)
+
+ if pn.find("-native") != -1:
+ pnstripped = pn.split("-native")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.startswith("nativesdk-"):
+ pnstripped = pn.replace("nativesdk-", "")
+ bb.note("NativeSDK Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-cross") != -1:
+ pnstripped = pn.split("-cross")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-crosssdk") != -1:
+ pnstripped = pn.split("-crosssdk")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-initial") != -1:
+ pnstripped = pn.split("-initial")
+ bb.note("initial Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ """generate package information from .bb file"""
+ pname = localdata.getVar('PN', True)
+ pcurver = localdata.getVar('PV', True)
+ pdesc = localdata.getVar('DESCRIPTION', True)
+ if pdesc is not None:
+ pdesc = pdesc.replace(',','')
+ pdesc = pdesc.replace('\n','')
+
+ pgrp = localdata.getVar('SECTION', True)
+ plicense = localdata.getVar('LICENSE', True).replace(',','_')
+
+ rstatus = localdata.getVar('RECIPE_COLOR', True)
+ if rstatus is not None:
+ rstatus = rstatus.replace(',','')
+
+ pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
+ if pcurver == pupver:
+ vermatch="1"
+ else:
+ vermatch="0"
+
+ noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
+ if noupdate_reason is None:
+ noupdate="0"
+ else:
+ noupdate="1"
+ noupdate_reason = noupdate_reason.replace(',','')
+
+ maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
+ # do the comparison
+ result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
+
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ with open(logfile, "a") as f:
+ row = [pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus]
+ row.extend(result)
+
+ writer = csv.writer(f)
+ writer.writerow(row)
+ f.close()
+ bb.utils.unlockfile(lf)
+}
+do_distrodata[vardepsexclude] = "DATETIME"
+
+addtask distrodataall after do_distrodata
+do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
+do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_distrodataall[nostamp] = "1"
+do_distrodataall() {
+ :
+}
+
+addhandler checkpkg_eventhandler
+checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
+python checkpkg_eventhandler() {
+ import csv
+
+ def parse_csv_file(filename):
+ package_dict = {}
+
+ with open(filename, "r") as f:
+ reader = csv.reader(f, delimiter='\t')
+ for row in reader:
+ pn = row[0]
+
+ if reader.line_num == 1:
+ header = row
+ continue
+
+ if not pn in package_dict.keys():
+ package_dict[pn] = row
+ f.close()
+
+ with open(filename, "w") as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow(header)
+ for pn in package_dict.keys():
+ writer.writerow(package_dict[pn])
+ f.close()
+
+ del package_dict
+
+ if bb.event.getName(e) == "BuildStarted":
+ import oe.distro_check as dc
+ logfile = dc.create_log_file(e.data, "checkpkg.csv")
+
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ with open(logfile, "a") as f:
+ writer = csv.writer(f, delimiter='\t')
+ headers = ['Package', 'Version', 'Upver', 'License', 'Section',
+ 'Home', 'Release', 'Depends', 'BugTracker', 'PE', 'Description',
+ 'Status', 'Tracking', 'URI', 'MAINTAINER', 'NoUpReason']
+ writer.writerow(headers)
+ f.close()
+ bb.utils.unlockfile(lf)
+ elif bb.event.getName(e) == "BuildCompleted":
+ import os
+ filename = "tmp/log/checkpkg.csv"
+ if os.path.isfile(filename):
+ lf = bb.utils.lockfile("%s.lock"%filename)
+ parse_csv_file(filename)
+ bb.utils.unlockfile(lf)
+ return
+}
+
+addtask checkpkg
+do_checkpkg[nostamp] = "1"
+python do_checkpkg() {
+ localdata = bb.data.createCopy(d)
+ import csv
+ import re
+ import tempfile
+ import subprocess
+ import oe.recipeutils
+ from bb.utils import vercmp_string
+ from bb.fetch2 import FetchError, NoMethodError, decodeurl
+
+ """first check whether a uri is provided"""
+ src_uri = (d.getVar('SRC_URI', True) or '').split()
+ if src_uri:
+ uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
+ else:
+ uri_type = "none"
+
+ """initialize log files."""
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ logfile = os.path.join(logpath, "checkpkg.csv")
+
+ """generate package information from .bb file"""
+ pname = d.getVar('PN', True)
+
+ if pname.find("-native") != -1:
+ if d.getVar('BBCLASSEXTEND', True):
+ return
+ pnstripped = pname.split("-native")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pname.startswith("nativesdk-"):
+ if d.getVar('BBCLASSEXTEND', True):
+ return
+ pnstripped = pname.replace("nativesdk-", "")
+ bb.note("NativeSDK Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pname.find("-cross") != -1:
+ pnstripped = pname.split("-cross")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pname.find("-initial") != -1:
+ pnstripped = pname.split("-initial")
+ bb.note("initial Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ pdesc = localdata.getVar('DESCRIPTION', True)
+ pgrp = localdata.getVar('SECTION', True)
+ pversion = localdata.getVar('PV', True)
+ plicense = localdata.getVar('LICENSE', True)
+ psection = localdata.getVar('SECTION', True)
+ phome = localdata.getVar('HOMEPAGE', True)
+ prelease = localdata.getVar('PR', True)
+ pdepends = localdata.getVar('DEPENDS', True)
+ pbugtracker = localdata.getVar('BUGTRACKER', True)
+ ppe = localdata.getVar('PE', True)
+ psrcuri = localdata.getVar('SRC_URI', True)
+ maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+
+ """ Get upstream version version """
+ pupver = ""
+ pstatus = ""
+
+ try:
+ uv = oe.recipeutils.get_recipe_upstream_version(localdata)
+
+ pupver = uv['version']
+ except Exception as e:
+ if e is FetchError:
+ pstatus = "ErrAccess"
+ elif e is NoMethodError:
+ pstatus = "ErrUnsupportedProto"
+ else:
+ pstatus = "ErrUnknown"
+
+ """Set upstream version status"""
+ if not pupver:
+ pupver = "N/A"
+ else:
+ pv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pversion, uri_type)
+ upv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pupver, uri_type)
+
+ cmp = vercmp_string(pv, upv)
+ if cmp == -1:
+ pstatus = "UPDATE"
+ elif cmp == 0:
+ pstatus = "MATCH"
+
+ if psrcuri:
+ psrcuri = psrcuri.split()[0]
+ else:
+ psrcuri = "none"
+ pdepends = "".join(pdepends.split("\t"))
+ pdesc = "".join(pdesc.split("\t"))
+ no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ with open(logfile, "a") as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow([pname, pversion, pupver, plicense, psection, phome,
+ prelease, pdepends, pbugtracker, ppe, pdesc, pstatus, pupver,
+ psrcuri, maintainer, no_upgr_reason])
+ f.close()
+ bb.utils.unlockfile(lf)
+}
+
+addtask checkpkgall after do_checkpkg
+do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg"
+do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_checkpkgall[nostamp] = "1"
+do_checkpkgall() {
+ :
+}
+
+addhandler distro_check_eventhandler
+distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
+python distro_check_eventhandler() {
+ """initialize log files."""
+ import oe.distro_check as dc
+ result_file = dc.create_log_file(e.data, "distrocheck.csv")
+ return
+}
+
+addtask distro_check
+do_distro_check[nostamp] = "1"
+python do_distro_check() {
+ """checks if the package is present in other public Linux distros"""
+ import oe.distro_check as dc
+ import shutil
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
+ return
+
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+ tmpdir = d.getVar('TMPDIR', True)
+ distro_check_dir = os.path.join(tmpdir, "distro_check")
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ result_file = os.path.join(logpath, "distrocheck.csv")
+ datetime = localdata.getVar('DATETIME', True)
+ dc.update_distro_data(distro_check_dir, datetime, localdata)
+
+ # do the comparison
+ result = dc.compare_in_distro_packages_list(distro_check_dir, d)
+
+ # save the results
+ dc.save_distro_check_result(result, datetime, result_file, d)
+}
+
+addtask distro_checkall after do_distro_check
+do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check"
+do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_distro_checkall[nostamp] = "1"
+do_distro_checkall() {
+ :
+}
+#
+#Check Missing License Text.
+#Use this task to generate the missing license text data for pkg-report system,
+#then we can search those recipes which license text isn't exsit in common-licenses directory
+#
+addhandler checklicense_eventhandler
+checklicense_eventhandler[eventmask] = "bb.event.BuildStarted"
+python checklicense_eventhandler() {
+ """initialize log files."""
+ import csv
+ import oe.distro_check as dc
+ logfile = dc.create_log_file(e.data, "missinglicense.csv")
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ with open(logfile, "a") as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow(['Package', 'License', 'MissingLicense'])
+ f.close()
+ bb.utils.unlockfile(lf)
+ return
+}
+
+addtask checklicense
+do_checklicense[nostamp] = "1"
+python do_checklicense() {
+ import csv
+ import shutil
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ pn = d.getVar('PN', True)
+ logfile = os.path.join(logpath, "missinglicense.csv")
+ generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
+ license_types = d.getVar('LICENSE', True)
+ for license_type in ((license_types.replace('+', '').replace('|', '&')
+ .replace('(', '').replace(')', '').replace(';', '')
+ .replace(',', '').replace(" ", "").split("&"))):
+ if not os.path.isfile(os.path.join(generic_directory, license_type)):
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ with open(logfile, "a") as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow([pn, license_types, license_type])
+ f.close()
+ bb.utils.unlockfile(lf)
+ return
+}
+
+addtask checklicenseall after do_checklicense
+do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense"
+do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_checklicenseall[nostamp] = "1"
+do_checklicenseall() {
+ :
+}
+
+
diff --git a/import-layers/yocto-poky/meta/classes/distutils-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils-base.bbclass
new file mode 100644
index 000000000..aa18e8b29
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils-base.bbclass
@@ -0,0 +1,4 @@
+DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
+RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
+
+inherit distutils-common-base pythonnative
diff --git a/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass
new file mode 100644
index 000000000..08511f59c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass
@@ -0,0 +1,17 @@
+inherit python-dir
+
+EXTRA_OEMAKE = ""
+
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+
+FILES_${PN}-staticdev += "\
+ ${PYTHON_SITEPACKAGES_DIR}/*.a \
+"
+FILES_${PN}-dev += "\
+ ${datadir}/pkgconfig \
+ ${libdir}/pkgconfig \
+ ${PYTHON_SITEPACKAGES_DIR}/*.la \
+"
diff --git a/import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass
new file mode 100644
index 000000000..509cb9551
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass
@@ -0,0 +1,3 @@
+inherit distutils-common-base
+
+DEPENDS += "${@["${PYTHON_PN}-native", ""][(d.getVar('PACKAGES', True) == '')]}"
diff --git a/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass b/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
new file mode 100644
index 000000000..8d9b3f78f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
@@ -0,0 +1,77 @@
+DISTUTILS_BUILD_ARGS ?= ""
+DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
+DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
+ --install-data=${STAGING_DATADIR}"
+DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
+ --install-data=${D}/${datadir}"
+
+distutils_do_compile() {
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
+}
+
+distutils_stage_headers() {
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+}
+
+distutils_stage_all() {
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+}
+
+distutils_do_install() {
+ echo "Beginning ${PN} Install ..."
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ echo "Step 2 of ${PN} Install ..."
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+
+ echo "Step 3 of ${PN} Install ..."
+ # support filenames with *spaces*
+ find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
+ sed -i -e s:${D}::g $i
+ done
+
+ echo "Step 4 of ${PN} Install ..."
+ if test -e ${D}${bindir} ; then
+ for i in ${D}${bindir}/* ; do \
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ done
+ fi
+
+ echo "Step 4 of ${PN} Install ..."
+ if test -e ${D}${sbindir}; then
+ for i in ${D}${sbindir}/* ; do \
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ done
+ fi
+
+ echo "Step 5 of ${PN} Install ..."
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if test -e ${D}${datadir}/share; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ fi
+}
+
+#EXPORT_FUNCTIONS do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
diff --git a/import-layers/yocto-poky/meta/classes/distutils.bbclass b/import-layers/yocto-poky/meta/classes/distutils.bbclass
new file mode 100644
index 000000000..da48a2ed5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils.bbclass
@@ -0,0 +1,86 @@
+inherit distutils-base
+
+DISTUTILS_BUILD_ARGS ?= ""
+DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
+DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
+ --install-data=${STAGING_DATADIR}"
+DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
+ --install-data=${D}/${datadir}"
+
+distutils_do_compile() {
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py build execution failed."
+}
+
+distutils_stage_headers() {
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+}
+
+distutils_stage_all() {
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+}
+
+distutils_do_install() {
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+
+ # support filenames with *spaces*
+ # only modify file if it contains path and recompile it
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \; -exec ${STAGING_BINDIR_NATIVE}/python-native/python -mcompileall {} \;
+
+ if test -e ${D}${bindir} ; then
+ for i in ${D}${bindir}/* ; do \
+ if [ ${PN} != "${BPN}-native" ]; then
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
+ fi
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ done
+ fi
+
+ if test -e ${D}${sbindir}; then
+ for i in ${D}${sbindir}/* ; do \
+ if [ ${PN} != "${BPN}-native" ]; then
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
+ fi
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ done
+ fi
+
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py*
+
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if test -e ${D}${datadir}/share; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ rmdir ${D}${datadir}/share
+ fi
+
+ # Fix backport modules
+ if test -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py && test -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py; then
+ rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py;
+ rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.pyc;
+ fi
+}
+
+EXPORT_FUNCTIONS do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
diff --git a/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
new file mode 100644
index 000000000..2a093d3a8
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
@@ -0,0 +1,8 @@
+DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
+RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
+
+PYTHON_BASEVERSION = "3.5"
+PYTHON_ABI = "m"
+
+inherit distutils-common-base python3native
+
diff --git a/import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass
new file mode 100644
index 000000000..db9a1a73c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass
@@ -0,0 +1,4 @@
+PYTHON_BASEVERSION = "3.5"
+PYTHON_ABI = "m"
+
+inherit distutils-native-base
diff --git a/import-layers/yocto-poky/meta/classes/distutils3.bbclass b/import-layers/yocto-poky/meta/classes/distutils3.bbclass
new file mode 100644
index 000000000..4f6ca4482
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distutils3.bbclass
@@ -0,0 +1,100 @@
+inherit distutils3-base
+
+DISTUTILS_BUILD_ARGS ?= ""
+DISTUTILS_BUILD_EXT_ARGS ?= ""
+DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
+DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
+ --install-data=${STAGING_DATADIR}"
+DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
+ --install-data=${D}/${datadir}"
+
+distutils3_do_compile() {
+ if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
+ SYS=${MACHINE}
+ else
+ SYS=${HOST_SYS}
+ fi
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build ${DISTUTILS_BUILD_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
+}
+distutils3_do_compile[vardepsexclude] = "MACHINE"
+
+distutils3_stage_headers() {
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
+ SYS=${MACHINE}
+ else
+ SYS=${HOST_SYS}
+ fi
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+}
+distutils3_stage_headers[vardepsexclude] = "MACHINE"
+
+distutils3_stage_all() {
+ if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
+ SYS=${MACHINE}
+ else
+ SYS=${HOST_SYS}
+ fi
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+}
+distutils3_stage_all[vardepsexclude] = "MACHINE"
+
+distutils3_do_install() {
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
+ SYS=${MACHINE}
+ else
+ SYS=${HOST_SYS}
+ fi
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
+ BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
+ bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+
+ # support filenames with *spaces*
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
+
+ if test -e ${D}${bindir} ; then
+ for i in ${D}${bindir}/* ; do \
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ done
+ fi
+
+ if test -e ${D}${sbindir}; then
+ for i in ${D}${sbindir}/* ; do \
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ done
+ fi
+
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if test -e ${D}${datadir}/share; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ rmdir ${D}${datadir}/share
+ fi
+}
+distutils3_do_install[vardepsexclude] = "MACHINE"
+
+EXPORT_FUNCTIONS do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
diff --git a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
new file mode 100644
index 000000000..da7eb4781
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
@@ -0,0 +1,154 @@
+# Copyright (C) 2012 Linux Foundation
+# Author: Richard Purdie
+# Some code and influence taken from srctree.bbclass:
+# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+#
+# externalsrc.bbclass enables use of an existing source tree, usually external to
+# the build system to build a piece of software rather than the usual fetch/unpack/patch
+# process.
+#
+# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
+# directory you want to use containing the sources e.g. from local.conf for a recipe
+# called "myrecipe" you would do:
+#
+# INHERIT += "externalsrc"
+# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
+#
+# In order to make this class work for both target and native versions (or with
+# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
+# directory under the work directory (split source and build directories). This is
+# the default, but the build directory can be set to the source directory if
+# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
+#
+# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
+#
+
+SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
+EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
+
+python () {
+ externalsrc = d.getVar('EXTERNALSRC', True)
+ if externalsrc:
+ d.setVar('S', externalsrc)
+ externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
+ if externalsrcbuild:
+ d.setVar('B', externalsrcbuild)
+ else:
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+
+ local_srcuri = []
+ fetch = bb.fetch2.Fetch((d.getVar('SRC_URI', True) or '').split(), d)
+ for url in fetch.urls:
+ url_data = fetch.ud[url]
+ parm = url_data.parm
+ if (url_data.type == 'file' or
+ 'type' in parm and parm['type'] == 'kmeta'):
+ local_srcuri.append(url)
+
+ d.setVar('SRC_URI', ' '.join(local_srcuri))
+
+ if '{SRCPV}' in d.getVar('PV', False):
+ # Dummy value because the default function can't be called with blank SRC_URI
+ d.setVar('SRCPV', '999')
+
+ tasks = filter(lambda k: d.getVarFlag(k, "task", True), d.keys())
+
+ for task in tasks:
+ if task.endswith("_setscene"):
+ # sstate is never going to work for external source trees, disable it
+ bb.build.deltask(task, d)
+ else:
+ # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
+ d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
+
+ # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
+ cleandirs = (d.getVarFlag(task, 'cleandirs', False) or '').split()
+ setvalue = False
+ for cleandir in cleandirs[:]:
+ if d.expand(cleandir) == externalsrc:
+ cleandirs.remove(cleandir)
+ setvalue = True
+ if setvalue:
+ d.setVarFlag(task, 'cleandirs', ' '.join(cleandirs))
+
+ fetch_tasks = ['do_fetch', 'do_unpack']
+ # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
+ # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
+ d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
+
+ for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
+ if local_srcuri and task in fetch_tasks:
+ continue
+ bb.build.deltask(task, d)
+
+ d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
+ d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
+
+ # Force the recipe to be always re-parsed so that the file_checksums
+ # function is run every time
+ d.setVar('BB_DONT_CACHE', '1')
+ d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
+
+ # We don't want the workdir to go away
+ d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
+
+ # If B=S the same builddir is used even for different architectures.
+ # Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
+ # change of do_configure task hash is correctly detected and stamps are
+ # invalidated if e.g. MACHINE changes.
+ if d.getVar('S', True) == d.getVar('B', True):
+ configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
+ d.setVar('CONFIGURESTAMPFILE', configstamp)
+ d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
+}
+
+python externalsrc_configure_prefunc() {
+ # Create desired symlinks
+ symlinks = (d.getVar('EXTERNALSRC_SYMLINKS', True) or '').split()
+ for symlink in symlinks:
+ symsplit = symlink.split(':', 1)
+ lnkfile = os.path.join(d.getVar('S', True), symsplit[0])
+ target = d.expand(symsplit[1])
+ if len(symsplit) > 1:
+ if os.path.islink(lnkfile):
+ # Link already exists, leave it if it points to the right location already
+ if os.readlink(lnkfile) == target:
+ continue
+ os.unlink(lnkfile)
+ elif os.path.exists(lnkfile):
+ # File/dir exists with same name as link, just leave it alone
+ continue
+ os.symlink(target, lnkfile)
+}
+
+python externalsrc_compile_prefunc() {
+ # Make it obvious that this is happening, since forgetting about it could lead to much confusion
+ bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
+}
+
+def srctree_hash_files(d):
+ import shutil
+ import subprocess
+ import tempfile
+
+ s_dir = d.getVar('EXTERNALSRC', True)
+ git_dir = os.path.join(s_dir, '.git')
+ oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+
+ ret = " "
+ if os.path.exists(git_dir):
+ with tempfile.NamedTemporaryFile(dir=git_dir, prefix='oe-devtool-index') as tmp_index:
+ # Clone index
+ shutil.copy2(os.path.join(git_dir, 'index'), tmp_index.name)
+ # Update our custom index
+ env = os.environ.copy()
+ env['GIT_INDEX_FILE'] = tmp_index.name
+ subprocess.check_output(['git', 'add', '.'], cwd=s_dir, env=env)
+ sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env)
+ with open(oe_hash_file, 'w') as fobj:
+ fobj.write(sha1)
+ ret = oe_hash_file + ':True'
+ else:
+ ret = d.getVar('EXTERNALSRC', True) + '/*:True'
+ return ret
diff --git a/import-layers/yocto-poky/meta/classes/extrausers.bbclass b/import-layers/yocto-poky/meta/classes/extrausers.bbclass
new file mode 100644
index 000000000..43900f359
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/extrausers.bbclass
@@ -0,0 +1,65 @@
+# This bbclass is mainly used for image level user/group configuration.
+# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
+
+# Below is an example showing how to use this functionality.
+# INHERIT += "extrausers"
+# EXTRA_USERS_PARAMS = "\
+# useradd -p '' tester; \
+# groupadd developers; \
+# userdel nobody; \
+# groupdel -g video; \
+# groupmod -g 1020 developers; \
+# usermod -s /bin/sh tester; \
+# "
+
+
+inherit useradd_base
+
+IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}"
+
+# Image level user / group settings
+ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
+
+# Image level user / group settings
+set_user_group () {
+ user_group_settings="${EXTRA_USERS_PARAMS}"
+ export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+ setting=`echo $user_group_settings | cut -d ';' -f1`
+ remaining=`echo $user_group_settings | cut -d ';' -f2-`
+ while test "x$setting" != "x"; do
+ cmd=`echo $setting | cut -d ' ' -f1`
+ opts=`echo $setting | cut -d ' ' -f2-`
+ # Different from useradd.bbclass, there's no file locking issue here, as
+ # this setting is actually a serial process. So we only retry once.
+ case $cmd in
+ useradd)
+ perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
+ ;;
+ groupadd)
+ perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
+ ;;
+ userdel)
+ perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
+ ;;
+ groupdel)
+ perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
+ ;;
+ usermod)
+ perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
+ ;;
+ groupmod)
+ perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
+ ;;
+ *)
+ bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
+ ;;
+ esac
+ # Avoid infinite loop if the last parameter doesn't end with ';'
+ if [ "$setting" = "$remaining" ]; then
+ break
+ fi
+ # iterate to the next setting
+ setting=`echo $remaining | cut -d ';' -f1`
+ remaining=`echo $remaining | cut -d ';' -f2-`
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/fontcache.bbclass b/import-layers/yocto-poky/meta/classes/fontcache.bbclass
new file mode 100644
index 000000000..8ebdfc4f5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/fontcache.bbclass
@@ -0,0 +1,56 @@
+#
+# This class will generate the proper postinst/postrm scriptlets for font
+# packages.
+#
+
+DEPENDS += "qemu-native"
+inherit qemu
+
+FONT_PACKAGES ??= "${PN}"
+FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
+FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
+FONTCONFIG_CACHE_PARAMS ?= "-v"
+# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
+# something has to be set, because qemuwrapper is using this variable after -E
+# multiple variables aren't allowed because for qemu they are separated
+# by comma and in -n "$D" case they should be separated by space
+FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
+fontcache_common() {
+if [ -n "$D" ] ; then
+ $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} \
+ 'bindir="${bindir}"' \
+ 'libdir="${libdir}"' \
+ 'base_libdir="${base_libdir}"' \
+ 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
+ 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
+ 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
+else
+ ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
+fi
+}
+
+python () {
+ font_pkgs = d.getVar('FONT_PACKAGES', True).split()
+ deps = d.getVar("FONT_EXTRA_RDEPENDS", True)
+
+ for pkg in font_pkgs:
+ if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
+}
+
+python add_fontcache_postinsts() {
+ for pkg in d.getVar('FONT_PACKAGES', True).split():
+ bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('fontcache_common', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('fontcache_common', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
+
+PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass b/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass
new file mode 100644
index 000000000..bd2613cf1
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass
@@ -0,0 +1,24 @@
+# Extract UUID from ${ROOTFS}, which must have been built
+# by the time that this function gets called. Only works
+# on ext file systems and depends on tune2fs.
+def get_rootfs_uuid(d):
+ import subprocess
+ rootfs = d.getVar('ROOTFS', True)
+ output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ for line in output.split('\n'):
+ if line.startswith('Filesystem UUID:'):
+ uuid = line.split()[-1]
+ bb.note('UUID of %s: %s' % (rootfs, uuid))
+ return uuid
+ bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
+
+# Replace the special <<uuid-of-rootfs>> inside a string (like the
+# root= APPEND string in a syslinux.cfg or gummiboot entry) with the
+# actual UUID of the rootfs. Does nothing if the special string
+# is not used.
+def replace_rootfs_uuid(d, string):
+ UUID_PLACEHOLDER = '<<uuid-of-rootfs>>'
+ if UUID_PLACEHOLDER in string:
+ uuid = get_rootfs_uuid(d)
+ string = string.replace(UUID_PLACEHOLDER, uuid)
+ return string
diff --git a/import-layers/yocto-poky/meta/classes/gconf.bbclass b/import-layers/yocto-poky/meta/classes/gconf.bbclass
new file mode 100644
index 000000000..d7afa7282
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gconf.bbclass
@@ -0,0 +1,70 @@
+DEPENDS += "gconf gconf-native"
+
+# These are for when gconftool is used natively and the prefix isn't necessarily
+# the sysroot. TODO: replicate the postinst logic for -native packages going
+# into sysroot as they won't be running their own install-time schema
+# registration (disabled below) nor the postinst script (as they don't happen).
+export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
+export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
+
+# Disable install-time schema registration as we're a packaging system so this
+# happens in the postinst script, not at install time. Set both the configure
+# script option and the traditional envionment variable just to make sure.
+EXTRA_OECONF += "--disable-schemas-install"
+export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
+
+gconf_postinst() {
+if [ "x$D" != "x" ]; then
+ export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
+else
+ export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
+fi
+
+SCHEMA_LOCATION=$D/etc/gconf/schemas
+for SCHEMA in ${SCHEMA_FILES}; do
+ if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
+ HOME=$D/root gconftool-2 \
+ --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
+ fi
+done
+}
+
+gconf_prerm() {
+SCHEMA_LOCATION=/etc/gconf/schemas
+for SCHEMA in ${SCHEMA_FILES}; do
+ if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
+ HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
+ gconftool-2 \
+ --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
+ fi
+done
+}
+
+python populate_packages_append () {
+ import re
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
+
+ for pkg in packages:
+ schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
+ schemas = []
+ schema_re = re.compile(".*\.schemas$")
+ if os.path.exists(schema_dir):
+ for f in os.listdir(schema_dir):
+ if schema_re.match(f):
+ schemas.append(f)
+ if schemas != []:
+ bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
+ d.setVar('SCHEMA_FILES', " ".join(schemas))
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gconf_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ if not prerm:
+ prerm = '#!/bin/sh\n'
+ prerm += d.getVar('gconf_prerm', True)
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
+}
diff --git a/import-layers/yocto-poky/meta/classes/gettext.bbclass b/import-layers/yocto-poky/meta/classes/gettext.bbclass
new file mode 100644
index 000000000..03b89b245
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gettext.bbclass
@@ -0,0 +1,19 @@
+def gettext_dependencies(d):
+ if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ return ""
+ if d.getVar('USE_NLS', True) == 'no':
+ return "gettext-minimal-native"
+ return d.getVar('DEPENDS_GETTEXT', False)
+
+def gettext_oeconf(d):
+ if d.getVar('USE_NLS', True) == 'no':
+ return '--disable-nls'
+ # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
+ if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ return '--disable-nls'
+ return "--enable-nls"
+
+DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
+
+BASEDEPENDS =+ "${@gettext_dependencies(d)}"
+EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
diff --git a/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass b/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
new file mode 100644
index 000000000..91461b11e
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
@@ -0,0 +1,37 @@
+DEPENDS += "qemu-native"
+inherit qemu
+
+GIO_MODULE_PACKAGES ??= "${PN}"
+
+gio_module_cache_common() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ binprefix=${MLPREFIX} \
+ libdir=${libdir} \
+ base_libdir=${base_libdir} \
+ bindir=${bindir}
+else
+ ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
+fi
+}
+
+python populate_packages_append () {
+ packages = d.getVar('GIO_MODULE_PACKAGES', True).split()
+
+ for pkg in packages:
+ bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gio_module_cache_common', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('gio_module_cache_common', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/gnome.bbclass b/import-layers/yocto-poky/meta/classes/gnome.bbclass
new file mode 100644
index 000000000..c6202bbb7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gnome.bbclass
@@ -0,0 +1 @@
+inherit gnomebase gtk-icon-cache gconf mime
diff --git a/import-layers/yocto-poky/meta/classes/gnomebase.bbclass b/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
new file mode 100644
index 000000000..e5c67760c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
@@ -0,0 +1,30 @@
+def gnome_verdir(v):
+ return oe.utils.trim_version(v, 2)
+
+GNOME_COMPRESS_TYPE ?= "xz"
+SECTION ?= "x11/gnome"
+GNOMEBN ?= "${BPN}"
+SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
+
+DEPENDS += "gnome-common-native"
+
+FILES_${PN} += "${datadir}/application-registry \
+ ${datadir}/mime-info \
+ ${datadir}/mime/packages \
+ ${datadir}/mime/application \
+ ${datadir}/gnome-2.0 \
+ ${datadir}/polkit* \
+ ${datadir}/GConf \
+ ${datadir}/glib-2.0/schemas \
+"
+
+FILES_${PN}-doc += "${datadir}/devhelp"
+
+inherit autotools pkgconfig
+
+do_install_append() {
+ rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
+ rm -rf ${D}${localstatedir}/scrollkeeper/*
+ rm -f ${D}${datadir}/applications/*.cache
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass b/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass
new file mode 100644
index 000000000..b1bdd268e
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass
@@ -0,0 +1,9 @@
+# This variable is set to True if gobject-introspection-data is in
+# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
+#
+# It should be used in recipes to determine whether introspection data should be built,
+# so that qemu use can be avoided when necessary.
+GI_DATA_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
+ bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+
+
diff --git a/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass b/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
new file mode 100644
index 000000000..2d73e402c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
@@ -0,0 +1,37 @@
+# Inherit this class in recipes to enable building their introspection files
+
+# This sets up autoconf-based recipes to build introspection data (or not),
+# depending on distro and machine features (see gobject-introspection-data class).
+inherit gobject-introspection-data
+EXTRA_OECONF_prepend = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+
+UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
+
+# Generating introspection data depends on a combination of native and target
+# introspection tools, and qemu to run the target tools.
+DEPENDS_append = " gobject-introspection gobject-introspection-native qemu-native"
+
+# This is necessary for python scripts to succeed - distutils fails if these
+# are not set
+export BUILD_SYS
+export HOST_SYS
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+# This is used by introspection tools to find .gir includes
+export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+
+do_configure_prepend_class-target () {
+ # introspection.m4 pre-packaged with upstream tarballs does not yet
+ # have our fixes
+ mkdir -p ${S}/m4
+ cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
+}
+
+# .typelib files are needed at runtime and so they go to the main package (so
+# they'll be together with libraries they support).
+FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
+
+# .gir files go to dev package, as they're needed for developing (but not for
+# running) things that depends on introspection.
+FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir"
diff --git a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
new file mode 100644
index 000000000..4ce3d2844
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
@@ -0,0 +1,156 @@
+# grub-efi.bbclass
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# Released under the MIT license (see packages/COPYING)
+
+# Provide grub-efi specific functions for building bootable images.
+
+# External variables
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
+# ${LABELS} - a list of targets for the automatic config
+# ${APPEND} - an override list of append strings for each label
+# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
+# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
+# ${GRUB_ROOT} - grub's root device.
+
+do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
+do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy"
+
+GRUB_SERIAL ?= "console=ttyS0,115200"
+GRUB_CFG_VM = "${S}/grub_vm.cfg"
+GRUB_CFG_LIVE = "${S}/grub_live.cfg"
+GRUB_TIMEOUT ?= "10"
+#FIXME: build this from the machine config
+GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
+
+EFIDIR = "/EFI/BOOT"
+GRUB_ROOT ?= "${ROOT}"
+APPEND ?= ""
+
+# Need UUID utility code.
+inherit fs-uuid
+
+efi_populate() {
+ # DEST must be the root of the image so that EFIDIR is not
+ # nested under a top level directory.
+ DEST=$1
+
+ install -d ${DEST}${EFIDIR}
+
+ GRUB_IMAGE="bootia32.efi"
+ if [ "${TARGET_ARCH}" = "x86_64" ]; then
+ GRUB_IMAGE="bootx64.efi"
+ fi
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
+
+ install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ # Build a EFI directory to create efi.img
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/vmlinuz ${EFIIMGDIR}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ echo "fs0:${EFIPATH}\\${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
+
+python build_efi_cfg() {
+ import sys
+
+ workdir = d.getVar('WORKDIR', True)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ gfxserial = d.getVar('GRUB_GFXSERIAL', True) or ""
+
+ labels = d.getVar('LABELS', True)
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('GRUB_CFG', True)
+ if not cfile:
+ raise bb.build.FuncFailed('Unable to read GRUB_CFG')
+
+ try:
+ cfgfile = file(cfile, 'w')
+ except OSError:
+ raise bb.build.funcFailed('Unable to open %s' % (cfile))
+
+ cfgfile.write('# Automatically created by OE\n')
+
+ opts = d.getVar('GRUB_OPTS', True)
+ if opts:
+ for opt in opts.split(';'):
+ cfgfile.write('%s\n' % opt)
+
+ cfgfile.write('default=%s\n' % (labels.split()[0]))
+
+ timeout = d.getVar('GRUB_TIMEOUT', True)
+ if timeout:
+ cfgfile.write('timeout=%s\n' % timeout)
+ else:
+ cfgfile.write('timeout=50\n')
+
+ root = d.getVar('GRUB_ROOT', True)
+ if not root:
+ raise bb.build.FuncFailed('GRUB_ROOT not defined')
+
+ if gfxserial == "1":
+ btypes = [ [ " graphics console", "" ],
+ [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
+ else:
+ btypes = [ [ "", "" ] ]
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ overrides = localdata.getVar('OVERRIDES', True)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+
+ for btype in btypes:
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
+ bb.data.update_data(localdata)
+
+ cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
+
+ cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
+
+ append = localdata.getVar('APPEND', True)
+ initrd = localdata.getVar('INITRD', True)
+
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ cfgfile.write('%s' % (append))
+ cfgfile.write(' %s' % btype[1])
+ cfgfile.write('\n')
+
+ if initrd:
+ cfgfile.write('initrd /initrd')
+ cfgfile.write('\n}\n')
+
+ cfgfile.close()
+}
diff --git a/import-layers/yocto-poky/meta/classes/gsettings.bbclass b/import-layers/yocto-poky/meta/classes/gsettings.bbclass
new file mode 100644
index 000000000..dec5abc02
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gsettings.bbclass
@@ -0,0 +1,37 @@
+# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
+# form on package install and remove.
+#
+# The compiled schemas are platform-agnostic, so we can depend on
+# glib-2.0-native for the native tool and run the postinst script when the
+# rootfs builds to save a little time on first boot.
+
+# TODO use a trigger so that this runs once per package operation run
+
+DEPENDS += "glib-2.0-native"
+
+RDEPENDS_${PN} += "glib-2.0-utils"
+
+FILES_${PN} += "${datadir}/glib-2.0/schemas"
+
+gsettings_postinstrm () {
+ glib-compile-schemas $D${datadir}/glib-2.0/schemas
+}
+
+python populate_packages_append () {
+ pkg = d.getVar('PN', True)
+ bb.note("adding gsettings postinst scripts to %s" % pkg)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gsettings_postinstrm', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ bb.note("adding gsettings postrm scripts to %s" % pkg)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('gsettings_postinstrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
diff --git a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
new file mode 100644
index 000000000..e32f98dcf
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
@@ -0,0 +1,25 @@
+# Helper class to pull in the right gtk-doc dependencies and disable
+# gtk-doc.
+#
+# Long-term it would be great if this class could be toggled between
+# gtk-doc-stub-native and the real gtk-doc-native, which would enable
+# re-generation of documentation. For now, we'll make do with this which
+# packages up any existing documentation (so from tarball builds).
+
+# The documentation directory, where the infrastructure will be copied.
+# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
+GTKDOC_DOCDIR ?= "${S}"
+
+DEPENDS_append = " gtk-doc-stub-native"
+
+EXTRA_OECONF_append = "\
+ --disable-gtk-doc \
+ --disable-gtk-doc-html \
+ --disable-gtk-doc-pdf \
+"
+
+do_configure_prepend () {
+ ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} )
+}
+
+inherit pkgconfig
diff --git a/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass b/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass
new file mode 100644
index 000000000..0f1052b08
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass
@@ -0,0 +1,64 @@
+FILES_${PN} += "${datadir}/icons/hicolor"
+
+DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-icon-utils-native"
+
+gtk_icon_cache_postinst() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ libdir_native=${libdir_native}
+else
+
+ # Update the pixbuf loaders in case they haven't been registered yet
+ ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
+
+ for icondir in /usr/share/icons/* ; do
+ if [ -d $icondir ] ; then
+ gtk-update-icon-cache -fqt $icondir
+ fi
+ done
+fi
+}
+
+gtk_icon_cache_postrm() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ libdir=${libdir}
+else
+ for icondir in /usr/share/icons/* ; do
+ if [ -d $icondir ] ; then
+ gtk-update-icon-cache -qt $icondir
+ fi
+ done
+fi
+}
+
+python populate_packages_append () {
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
+
+ for pkg in packages:
+ icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
+ if not os.path.exists(icon_dir):
+ continue
+
+ bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
+ d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+
+ bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gtk_icon_cache_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('gtk_icon_cache_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass b/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
new file mode 100644
index 000000000..c099cd38e
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
@@ -0,0 +1,87 @@
+# This class will update the inputmethod module cache for virtual keyboards
+#
+# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
+
+DEPENDS =+ "qemu-native"
+
+inherit qemu
+
+GTKIMMODULES_PACKAGES ?= "${PN}"
+
+gtk_immodule_cache_postinst() {
+if [ "x$D" != "x" ]; then
+ if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
+ IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
+ ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
+ $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
+ sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
+ elif [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
+ IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
+ ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
+ $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
+ sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
+ fi
+
+ [ $? -ne 0 ] && exit 1
+ exit 0
+fi
+if [ ! -z `which gtk-query-immodules-2.0` ]; then
+ gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
+fi
+if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+fi
+}
+
+gtk_immodule_cache_postrm() {
+if [ "x$D" != "x" ]; then
+ if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
+ IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
+ ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
+ $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
+ sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
+ elif [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
+ IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
+ ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
+ $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
+ sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
+ fi
+
+ [ $? -ne 0 ] && exit 1
+ exit 0
+fi
+if [ ! -z `which gtk-query-immodules-2.0` ]; then
+ gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
+fi
+if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+fi
+}
+
+python populate_packages_append () {
+ gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split()
+
+ for pkg in gtkimmodules_pkgs:
+ bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gtk_immodule_cache_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('gtk_immodule_cache_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
+
+python __anonymous() {
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
+ if not gtkimmodules_check:
+ bb_filename = d.getVar('FILE', False)
+ raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/gummiboot.bbclass b/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
new file mode 100644
index 000000000..1ebb9462d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
@@ -0,0 +1,119 @@
+# Copyright (C) 2014 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# gummiboot.bbclass - equivalent of grub-efi.bbclass
+# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
+# (images built by image-live.bbclass or image-vm.bbclass)
+
+do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy"
+do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy"
+
+EFIDIR = "/EFI/BOOT"
+
+GUMMIBOOT_CFG ?= "${S}/loader.conf"
+GUMMIBOOT_ENTRIES ?= ""
+GUMMIBOOT_TIMEOUT ?= "10"
+
+# Need UUID utility code.
+inherit fs-uuid
+
+efi_populate() {
+ DEST=$1
+
+ EFI_IMAGE="gummibootia32.efi"
+ DEST_EFI_IMAGE="bootia32.efi"
+ if [ "${TARGET_ARCH}" = "x86_64" ]; then
+ EFI_IMAGE="gummibootx64.efi"
+ DEST_EFI_IMAGE="bootx64.efi"
+ fi
+
+ install -d ${DEST}${EFIDIR}
+ # gummiboot requires these paths for configuration files
+ # they are not customizable so no point in new vars
+ install -d ${DEST}/loader
+ install -d ${DEST}/loader/entries
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
+ install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf
+ for i in ${GUMMIBOOT_ENTRIES}; do
+ install -m 0644 ${i} ${DEST}/loader/entries
+ done
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/vmlinuz ${EFIIMGDIR}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
+
+python build_efi_cfg() {
+ s = d.getVar("S", True)
+ labels = d.getVar('LABELS', True)
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('GUMMIBOOT_CFG', True)
+ try:
+ cfgfile = open(cfile, 'w')
+ except OSError:
+ raise bb.build.funcFailed('Unable to open %s' % (cfile))
+
+ cfgfile.write('# Automatically created by OE\n')
+ cfgfile.write('default %s\n' % (labels.split()[0]))
+ timeout = d.getVar('GUMMIBOOT_TIMEOUT', True)
+ if timeout:
+ cfgfile.write('timeout %s\n' % timeout)
+ else:
+ cfgfile.write('timeout 10\n')
+ cfgfile.close()
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ overrides = localdata.getVar('OVERRIDES', True)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+
+ entryfile = "%s/%s.conf" % (s, label)
+ d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile)
+ try:
+ entrycfg = open(entryfile, "w")
+ except OSError:
+ raise bb.build.funcFailed('Unable to open %s' % (entryfile))
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
+ bb.data.update_data(localdata)
+
+ entrycfg.write('title %s\n' % label)
+ entrycfg.write('linux /vmlinuz\n')
+
+ append = localdata.getVar('APPEND', True)
+ initrd = localdata.getVar('INITRD', True)
+
+ if initrd:
+ entrycfg.write('initrd /initrd\n')
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ entrycfg.write('options LABEL=%s ' % lb)
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ entrycfg.write('%s' % append)
+ entrycfg.write('\n')
+ entrycfg.close()
+}
diff --git a/import-layers/yocto-poky/meta/classes/gzipnative.bbclass b/import-layers/yocto-poky/meta/classes/gzipnative.bbclass
new file mode 100644
index 000000000..326cbbb6f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/gzipnative.bbclass
@@ -0,0 +1,5 @@
+EXTRANATIVEPATH += "pigz-native gzip-native"
+DEPENDS += "gzip-native"
+
+# tar may get run by do_unpack or do_populate_lic which could call gzip
+do_unpack[depends] += "gzip-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/icecc.bbclass b/import-layers/yocto-poky/meta/classes/icecc.bbclass
new file mode 100644
index 000000000..e1c06c49c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/icecc.bbclass
@@ -0,0 +1,333 @@
+# IceCream distributed compiling support
+#
+# Stages directories with symlinks from gcc/g++ to icecc, for both
+# native and cross compilers. Depending on each configure or compile,
+# the directories are added at the head of the PATH list and ICECC_CXX
+# and ICEC_CC are set.
+#
+# For the cross compiler, creates a tar.gz of our toolchain and sets
+# ICECC_VERSION accordingly.
+#
+# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
+# necessary environment tar.gz file to be used by the remote machines.
+# It also supports meta-toolchain generation
+#
+# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which'
+# but nothing is sure ;)
+#
+# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user
+# or the default one provided by icecc-create-env.bb will be used
+# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
+#
+# User can specify if specific packages or packages belonging to class should not use icecc to distribute
+# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
+# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
+# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
+#
+#########################################################################################
+#Error checking is kept to minimum so double check any parameters you pass to the class
+###########################################################################################
+
+BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC"
+
+ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
+
+def icecc_dep_prepend(d):
+ # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
+ # we need that built is the responsibility of the patch function / class, not
+ # the application.
+ if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ return "icecc-create-env-native"
+ return ""
+
+DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
+
+def get_cross_kernel_cc(bb,d):
+ kernel_cc = d.getVar('KERNEL_CC', False)
+
+ # evaluate the expression by the shell if necessary
+ if '`' in kernel_cc or '$(' in kernel_cc:
+ kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1]
+
+ kernel_cc = d.expand(kernel_cc)
+ kernel_cc = kernel_cc.replace('ccache', '').strip()
+ kernel_cc = kernel_cc.split(' ')[0]
+ kernel_cc = kernel_cc.strip()
+ return kernel_cc
+
+def get_icecc(d):
+ return d.getVar('ICECC_PATH', False) or bb.utils.which(os.getenv("PATH"), "icecc")
+
+def create_path(compilers, bb, d):
+ """
+ Create Symlinks for the icecc in the staging directory
+ """
+ staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
+ if icecc_is_kernel(bb, d):
+ staging += "-kernel"
+
+ #check if the icecc path is set by the user
+ icecc = get_icecc(d)
+
+ # Create the dir if necessary
+ try:
+ os.stat(staging)
+ except:
+ try:
+ os.makedirs(staging)
+ except:
+ pass
+
+ for compiler in compilers:
+ gcc_path = os.path.join(staging, compiler)
+ try:
+ os.stat(gcc_path)
+ except:
+ try:
+ os.symlink(icecc, gcc_path)
+ except:
+ pass
+
+ return staging
+
+def use_icecc(bb,d):
+ if d.getVar('ICECC_DISABLED', False) == "1":
+ # don't even try it, when explicitly disabled
+ return "no"
+
+ # allarch recipes don't use compiler
+ if icecc_is_allarch(bb, d):
+ return "no"
+
+ pn = d.getVar('PN', True)
+
+ system_class_blacklist = []
+ user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL', False) or "none").split()
+ package_class_blacklist = system_class_blacklist + user_class_blacklist
+
+ for black in package_class_blacklist:
+ if bb.data.inherits_class(black, d):
+ bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
+ return "no"
+
+ # "system" recipe blacklist contains a list of packages that can not distribute compile tasks
+ # for one reason or the other
+ # this is the old list (which doesn't seem to be valid anymore, because I was able to build
+ # all these with icecc enabled)
+ # system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
+ # when adding new entry, please document why (how it failed) so that we can re-evaluate it later
+ # e.g. when there is new version
+ # building libgcc-initial with icecc fails with CPP sanity check error if host sysroot contains cross gcc built for another target tune/variant
+ system_package_blacklist = ["libgcc-initial"]
+ user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL', False) or "").split()
+ user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL', False) or "").split()
+ package_blacklist = system_package_blacklist + user_package_blacklist
+
+ if pn in package_blacklist:
+ bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
+ return "no"
+
+ if pn in user_package_whitelist:
+ bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
+ return "yes"
+
+ if d.getVar('PARALLEL_MAKE', False) == "":
+ bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn)
+ return "no"
+
+ return "yes"
+
+def icecc_is_allarch(bb, d):
+ return d.getVar("PACKAGE_ARCH", True) == "all" or bb.data.inherits_class('allarch', d)
+
+def icecc_is_kernel(bb, d):
+ return \
+ bb.data.inherits_class("kernel", d);
+
+def icecc_is_native(bb, d):
+ return \
+ bb.data.inherits_class("cross", d) or \
+ bb.data.inherits_class("native", d);
+
+# Don't pollute allarch signatures with TARGET_FPU
+icecc_version[vardepsexclude] += "TARGET_FPU"
+def icecc_version(bb, d):
+ if use_icecc(bb, d) == "no":
+ return ""
+
+ parallel = d.getVar('ICECC_PARALLEL_MAKE', False) or ""
+ if not d.getVar('PARALLEL_MAKE', False) == "" and parallel:
+ d.setVar("PARALLEL_MAKE", parallel)
+
+ if icecc_is_native(bb, d):
+ archive_name = "local-host-env"
+ elif d.expand('${HOST_PREFIX}') == "":
+ bb.fatal(d.expand("${PN}"), " NULL prefix")
+ else:
+ prefix = d.expand('${HOST_PREFIX}' )
+ distro = d.expand('${DISTRO}')
+ target_sys = d.expand('${TARGET_SYS}')
+ float = d.getVar('TARGET_FPU', False) or "hard"
+ archive_name = prefix + distro + "-" + target_sys + "-" + float
+ if icecc_is_kernel(bb, d):
+ archive_name += "-kernel"
+
+ import socket
+ ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
+ tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
+
+ return tar_file
+
+def icecc_path(bb,d):
+ if use_icecc(bb, d) == "no":
+ # don't create unnecessary directories when icecc is disabled
+ return
+
+ if icecc_is_kernel(bb, d):
+ return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
+
+ else:
+ prefix = d.expand('${HOST_PREFIX}')
+ return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
+
+def icecc_get_external_tool(bb, d, tool):
+ external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
+ target_prefix = d.expand('${TARGET_PREFIX}')
+ return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
+
+# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
+icecc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
+def icecc_get_tool(bb, d, tool):
+ if icecc_is_native(bb, d):
+ return bb.utils.which(os.getenv("PATH"), tool)
+ elif icecc_is_kernel(bb, d):
+ return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
+ else:
+ ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
+ target_sys = d.expand('${TARGET_SYS}')
+ tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
+ if os.path.isfile(tool_bin):
+ return tool_bin
+ else:
+ external_tool_bin = icecc_get_external_tool(bb, d, tool)
+ if os.path.isfile(external_tool_bin):
+ return external_tool_bin
+ else:
+ return ""
+
+def icecc_get_and_check_tool(bb, d, tool):
+ # Check that g++ or gcc is not a symbolic link to icecc binary in
+ # PATH or icecc-create-env script will silently create an invalid
+ # compiler environment package.
+ t = icecc_get_tool(bb, d, tool)
+ if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
+ bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
+ return ""
+ else:
+ return t
+
+wait_for_file() {
+ local TIME_ELAPSED=0
+ local FILE_TO_TEST=$1
+ local TIMEOUT=$2
+ until [ -f "$FILE_TO_TEST" ]
+ do
+ TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
+ if [ $TIME_ELAPSED -gt $TIMEOUT ]
+ then
+ return 1
+ fi
+ sleep 1
+ done
+}
+
+def set_icecc_env():
+ # dummy python version of set_icecc_env
+ return
+
+set_icecc_env() {
+ if [ "${@use_icecc(bb, d)}" = "no" ]
+ then
+ return
+ fi
+ ICECC_VERSION="${@icecc_version(bb, d)}"
+ if [ "x${ICECC_VERSION}" = "x" ]
+ then
+ bbwarn "Cannot use icecc: could not get ICECC_VERSION"
+ return
+ fi
+
+ ICE_PATH="${@icecc_path(bb, d)}"
+ if [ "x${ICE_PATH}" = "x" ]
+ then
+ bbwarn "Cannot use icecc: could not get ICE_PATH"
+ return
+ fi
+
+ ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
+ ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
+ # cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
+ ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
+ if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
+ then
+ bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
+ return
+ fi
+
+ ICE_VERSION=`$ICECC_CC -dumpversion`
+ ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
+ if [ ! -x "${ICECC_ENV_EXEC}" ]
+ then
+ bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
+ return
+ fi
+
+ ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
+ # for target recipes should return something like:
+ # /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
+ # and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
+ if [ "`dirname "${ICECC_AS}"`" = "." ]
+ then
+ ICECC_AS="${ICECC_WHICH_AS}"
+ fi
+
+ if [ ! -f "${ICECC_VERSION}.done" ]
+ then
+ mkdir -p "`dirname "${ICECC_VERSION}"`"
+
+ # the ICECC_VERSION generation step must be locked by a mutex
+ # in order to prevent race conditions
+ if flock -n "${ICECC_VERSION}.lock" \
+ ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
+ then
+ touch "${ICECC_VERSION}.done"
+ elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ]
+ then
+ # locking failed so wait for ${ICECC_VERSION}.done to appear
+ bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
+ return
+ fi
+ fi
+
+ export ICECC_VERSION ICECC_CC ICECC_CXX
+ export PATH="$ICE_PATH:$PATH"
+ export CCACHE_PATH="$PATH"
+
+ bbnote "Using icecc"
+}
+
+do_configure_prepend() {
+ set_icecc_env
+}
+
+do_compile_prepend() {
+ set_icecc_env
+}
+
+do_compile_kernelmodules_prepend() {
+ set_icecc_env
+}
+
+do_install_prepend() {
+ set_icecc_env
+}
diff --git a/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass b/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
new file mode 100644
index 000000000..197b24235
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
@@ -0,0 +1,78 @@
+#
+# Writes build information to target filesystem on /etc/build
+#
+# Copyright (C) 2014 Intel Corporation
+# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
+#
+# Licensed under the MIT license, see COPYING.MIT for details
+#
+# Usage: add INHERIT += "image-buildinfo" to your conf file
+#
+
+# Desired variables to display
+IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
+
+# From buildhistory.bbclass
+def image_buildinfo_outputvars(vars, listvars, d):
+ vars = vars.split()
+ listvars = listvars.split()
+ ret = ""
+ for var in vars:
+ value = d.getVar(var, True) or ""
+ if (d.getVarFlag(var, 'type', True) == "list"):
+ value = oe.utils.squashspaces(value)
+ ret += "%s = %s\n" % (var, value)
+ return ret.rstrip('\n')
+
+# Gets git branch's status (clean or dirty)
+def get_layer_git_status(path):
+ import subprocess
+ try:
+ subprocess.check_output("cd %s; PSEUDO_UNLOAD=1 git diff --quiet --no-ext-diff" % path,
+ shell=True,
+ stderr=subprocess.STDOUT)
+ return ""
+ except subprocess.CalledProcessError, ex:
+ # Silently treat errors as "modified", without checking for the
+ # (expected) return code 1 in a modified git repo. For example, we get
+ # output and a 129 return code when a layer isn't a git repo at all.
+ return "-- modified"
+
+# Returns layer revisions along with their respective status
+def get_layer_revs(d):
+ layers = (d.getVar("BBLAYERS", True) or "").split()
+ medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \
+ base_get_metadata_git_branch(i, None).strip(), \
+ base_get_metadata_git_revision(i, None), \
+ get_layer_git_status(i)) \
+ for i in layers]
+ return '\n'.join(medadata_revs)
+
+def buildinfo_target(d):
+ # Get context
+ if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ return ""
+ # Single and list variables to be read
+ vars = (d.getVar("IMAGE_BUILDINFO_VARS", True) or "")
+ listvars = (d.getVar("IMAGE_BUILDINFO_LVARS", True) or "")
+ return image_buildinfo_outputvars(vars, listvars, d)
+
+# Write build information to target filesystem
+python buildinfo () {
+ with open(d.expand('${IMAGE_ROOTFS}${sysconfdir}/build'), 'w') as build:
+ build.writelines((
+ '''-----------------------
+Build Configuration: |
+-----------------------
+''',
+ buildinfo_target(d),
+ '''
+-----------------------
+Layer Revisions: |
+-----------------------
+''',
+ get_layer_revs(d)
+ ))
+}
+
+IMAGE_PREPROCESS_COMMAND += "buildinfo;"
diff --git a/import-layers/yocto-poky/meta/classes/image-live.bbclass b/import-layers/yocto-poky/meta/classes/image-live.bbclass
new file mode 100644
index 000000000..c8a861060
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-live.bbclass
@@ -0,0 +1,284 @@
+# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
+# Released under the MIT license (see packages/COPYING)
+
+# Creates a bootable image using syslinux, your kernel and an optional
+# initrd
+
+#
+# End result is two things:
+#
+# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
+# an initrd and a rootfs image. These can be written to harddisks directly and
+# also booted on USB flash disks (write them there with dd).
+#
+# 2. A CD .iso image
+
+# Boot process is that the initrd will boot and process which label was selected
+# in syslinux. Actions based on the label are then performed (e.g. installing to
+# an hdd)
+
+# External variables (also used by syslinux.bbclass)
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
+# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
+# ${NOISO} - skip building the ISO image if set to 1
+# ${NOHDD} - skip building the HDD image if set to 1
+# ${HDDIMG_ID} - FAT image volume-id
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+
+inherit live-vm-common
+
+do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
+ mtools-native:do_populate_sysroot \
+ cdrtools-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
+ ${MLPREFIX}syslinux:do_populate_sysroot \
+ syslinux-native:do_populate_sysroot \
+ ${@oe.utils.ifelse(d.getVar('COMPRESSISO', False),'zisofs-tools-native:do_populate_sysroot','')} \
+ ${PN}:do_image_ext4 \
+ "
+
+
+LABELS_LIVE ?= "boot install"
+ROOT_LIVE ?= "root=/dev/ram0"
+INITRD_IMAGE_LIVE ?= "core-image-minimal-initramfs"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
+
+ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.ext4"
+
+IMAGE_TYPEDEP_live = "ext4"
+IMAGE_TYPEDEP_iso = "ext4"
+IMAGE_TYPEDEP_hddimg = "ext4"
+IMAGE_TYPES_MASKED += "live hddimg iso"
+
+python() {
+ image_b = d.getVar('IMAGE_BASENAME', True)
+ initrd_i = d.getVar('INITRD_IMAGE_LIVE', True)
+ if image_b == initrd_i:
+ bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
+ bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
+ else:
+ d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
+}
+
+HDDDIR = "${S}/hddimg"
+ISODIR = "${S}/iso"
+EFIIMGDIR = "${S}/efi_img"
+COMPACT_ISODIR = "${S}/iso.z"
+COMPRESSISO ?= "0"
+
+ISOLINUXDIR ?= "/isolinux"
+ISO_BOOTIMG = "isolinux/isolinux.bin"
+ISO_BOOTCAT = "isolinux/boot.cat"
+MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
+
+BOOTIMG_VOLUME_ID ?= "boot"
+BOOTIMG_EXTRA_SPACE ?= "512"
+
+populate_live() {
+ populate_kernel $1
+ if [ -s "${ROOTFS}" ]; then
+ install -m 0644 ${ROOTFS} $1/rootfs.img
+ fi
+}
+
+build_iso() {
+ # Only create an ISO if we have an INITRD and NOISO was not set
+ if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
+ bbnote "ISO image will not be created."
+ return
+ fi
+ # ${INITRD} is a list of multiple filesystem images
+ for fs in ${INITRD}
+ do
+ if [ ! -s "$fs" ]; then
+ bbnote "ISO image will not be created. $fs is invalid."
+ return
+ fi
+ done
+
+ populate_live ${ISODIR}
+
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_iso_populate ${ISODIR}
+ fi
+ if [ "${EFI}" = "1" ]; then
+ efi_iso_populate ${ISODIR}
+ build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
+ fi
+
+ # EFI only
+ if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
+ # Work around bug in isohybrid where it requires isolinux.bin
+ # In the boot catalog, even though it is not used
+ mkdir -p ${ISODIR}/${ISOLINUXDIR}
+ install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
+ fi
+
+ if [ "${COMPRESSISO}" = "1" ] ; then
+ # create compact directory, compress iso
+ mkdir -p ${COMPACT_ISODIR}
+ mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
+
+ # move compact iso to iso, then remove compact directory
+ mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
+ rm -Rf ${COMPACT_ISODIR}
+ mkisofs_compress_opts="-R -z -D -l"
+ else
+ mkisofs_compress_opts="-r"
+ fi
+
+ # Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
+ # when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
+ # leave a few space for other files.
+ mkisofs_iso_level=""
+
+ if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
+ rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
+ # 4080218931 = 3.8 * 1024 * 1024 * 1024
+ if [ $rootfs_img_size -gt 4080218931 ]; then
+ bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
+ mkisofs_iso_level="-iso-level 3"
+ fi
+ fi
+
+ if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
+ # PCBIOS only media
+ mkisofs -V ${BOOTIMG_VOLUME_ID} \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+ -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
+ $mkisofs_compress_opts \
+ ${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
+ else
+ # EFI only OR EFI+PCBIOS
+ mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+ -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
+ $mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
+ -eltorito-alt-boot -eltorito-platform efi \
+ -b efi.img -no-emul-boot \
+ ${ISODIR}
+ isohybrid_args="-u"
+ fi
+
+ isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
+}
+
+build_fat_img() {
+ FATSOURCEDIR=$1
+ FATIMG=$2
+
+ # Calculate the size required for the final image including the
+ # data and filesystem overhead.
+ # Sectors: 512 bytes
+ # Blocks: 1024 bytes
+
+ # Determine the sector count just for the data
+ SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
+
+ # Account for the filesystem overhead. This includes directory
+ # entries in the clusters as well as the FAT itself.
+ # Assumptions:
+ # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
+ # padding will be minimal on those smaller images and not
+ # worth the logic here to caclulate the smaller FAT sizes)
+ # < 16 entries per directory
+ # 8.3 filenames only
+
+ # 32 bytes per dir entry
+ DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
+ # 32 bytes for every end-of-directory dir entry
+ DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
+ # 4 bytes per FAT entry per sector of data
+ FAT_BYTES=$(expr $SECTORS \* 4)
+ # 4 bytes per FAT entry per end-of-cluster list
+ FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
+
+ # Use a ceiling function to determine FS overhead in sectors
+ DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
+ # There are two FATs on the image
+ FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
+ SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
+
+ # Determine the final size in blocks accounting for some padding
+ BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
+
+ # mkdosfs will sometimes use FAT16 when it is not appropriate,
+ # resulting in a boot failure from SYSLINUX. Use FAT32 for
+ # images larger than 512MB, otherwise let mkdosfs decide.
+ if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
+ FATSIZE="-F 32"
+ fi
+
+ # mkdosfs will fail if ${FATIMG} exists. Since we are creating an
+ # new image, it is safe to delete any previous image.
+ if [ -e ${FATIMG} ]; then
+ rm ${FATIMG}
+ fi
+
+ if [ -z "${HDDIMG_ID}" ]; then
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ ${BLOCKS}
+ else
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ ${BLOCKS} -i ${HDDIMG_ID}
+ fi
+
+ # Copy FATSOURCEDIR recursively into the image file directly
+ mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
+}
+
+build_hddimg() {
+ # Create an HDD image
+ if [ "${NOHDD}" != "1" ] ; then
+ populate_live ${HDDDIR}
+
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_hddimg_populate ${HDDDIR}
+ fi
+ if [ "${EFI}" = "1" ]; then
+ efi_hddimg_populate ${HDDDIR}
+ fi
+
+ # Check the size of ${HDDDIR}/rootfs.img, error out if it
+ # exceeds 4GB, it is the single file's max size of FAT fs.
+ if [ -f ${HDDDIR}/rootfs.img ]; then
+ rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
+ max_size=`expr 4 \* 1024 \* 1024 \* 1024`
+ if [ $rootfs_img_size -gt $max_size ]; then
+ bberror "${HDDDIR}/rootfs.img execeeds 4GB,"
+ bberror "this doesn't work on FAT filesystem, you can try either of:"
+ bberror "1) Reduce the size of rootfs.img"
+ bbfatal "2) Use iso, vmdk or vdi to instead of hddimg\n"
+ fi
+ fi
+
+ build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_hddimg_install
+ fi
+
+ chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+ fi
+}
+
+python do_bootimg() {
+ set_live_vm_vars(d, 'LIVE')
+ if d.getVar("PCBIOS", True) == "1":
+ bb.build.exec_func('build_syslinux_cfg', d)
+ if d.getVar("EFI", True) == "1":
+ bb.build.exec_func('build_efi_cfg', d)
+ bb.build.exec_func('build_hddimg', d)
+ bb.build.exec_func('build_iso', d)
+ bb.build.exec_func('create_symlinks', d)
+}
+do_bootimg[subimages] = "hddimg iso"
+do_bootimg[imgsuffix] = "."
+
+addtask bootimg before do_image_complete
diff --git a/import-layers/yocto-poky/meta/classes/image-mklibs.bbclass b/import-layers/yocto-poky/meta/classes/image-mklibs.bbclass
new file mode 100644
index 000000000..5f6df1b17
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-mklibs.bbclass
@@ -0,0 +1,56 @@
+do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
+
+IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
+
+inherit linuxloader
+
+mklibs_optimize_image_doit() {
+ rm -rf ${WORKDIR}/mklibs
+ mkdir -p ${WORKDIR}/mklibs/dest
+ cd ${IMAGE_ROOTFS}
+ du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
+
+ # Build a list of dynamically linked executable ELF files.
+ # Omit libc/libpthread as a special case because it has an interpreter
+ # but is primarily what we intend to strip down.
+ for i in `find . -type f -executable ! -name 'libc-*' ! -name 'libpthread-*'`; do
+ file $i | grep -q ELF || continue
+ ${HOST_PREFIX}readelf -l $i | grep -q INTERP || continue
+ echo $i
+ done > ${WORKDIR}/mklibs/executables.list
+
+ dynamic_loader=$(linuxloader)
+
+ mklibs -v \
+ --ldlib ${dynamic_loader} \
+ --libdir ${baselib} \
+ --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
+ --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
+ --root ${IMAGE_ROOTFS} \
+ --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
+ -d ${WORKDIR}/mklibs/dest \
+ `cat ${WORKDIR}/mklibs/executables.list`
+
+ cd ${WORKDIR}/mklibs/dest
+ for i in *
+ do
+ cp $i `find ${IMAGE_ROOTFS} -name $i`
+ done
+
+ cd ${IMAGE_ROOTFS}
+ du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
+
+ echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
+ echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
+}
+
+mklibs_optimize_image() {
+ for img in ${MKLIBS_OPTIMIZED_IMAGES}
+ do
+ if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
+ then
+ mklibs_optimize_image_doit
+ break
+ fi
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/image-prelink.bbclass b/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
new file mode 100644
index 000000000..4157df021
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
@@ -0,0 +1,54 @@
+do_rootfs[depends] += "prelink-native:do_populate_sysroot"
+
+IMAGE_PREPROCESS_COMMAND += "prelink_setup; prelink_image; "
+
+python prelink_setup () {
+ oe.utils.write_ld_so_conf(d)
+}
+
+inherit linuxloader
+
+prelink_image () {
+# export PSEUDO_DEBUG=4
+# /bin/env | /bin/grep PSEUDO
+# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
+# echo "LD_PRELOAD=$LD_PRELOAD"
+
+ pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
+ echo "Size before prelinking $pre_prelink_size."
+
+ # We need a prelink conf on the filesystem, add one if it's missing
+ if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
+ cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
+ ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
+ dummy_prelink_conf=true;
+ else
+ dummy_prelink_conf=false;
+ fi
+
+ # We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
+ ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
+ if [ -e $ldsoconf ]; then
+ cp $ldsoconf $ldsoconf.prelink
+ fi
+ cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
+
+ dynamic_loader=$(linuxloader)
+
+ # prelink!
+ ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+
+ # Remove the prelink.conf if we had to add it.
+ if [ "$dummy_prelink_conf" = "true" ]; then
+ rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
+ fi
+
+ if [ -e $ldsoconf.prelink ]; then
+ mv $ldsoconf.prelink $ldsoconf
+ else
+ rm $ldsoconf
+ fi
+
+ pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
+ echo "Size after prelinking $pre_prelink_size."
+}
diff --git a/import-layers/yocto-poky/meta/classes/image-swab.bbclass b/import-layers/yocto-poky/meta/classes/image-swab.bbclass
new file mode 100644
index 000000000..6b02cadaf
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-swab.bbclass
@@ -0,0 +1,94 @@
+HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
+SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
+SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
+TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
+TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
+
+SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
+BB_DEFAULT_TASK = "generate_swabber_report"
+
+# Several recipes don't build with parallel make when run under strace
+# Ideally these should be fixed but as a temporary measure disable parallel
+# builds for troublesome recipes
+PARALLEL_MAKE_pn-openssl = ""
+PARALLEL_MAKE_pn-glibc = ""
+PARALLEL_MAKE_pn-glib-2.0 = ""
+PARALLEL_MAKE_pn-libxml2 = ""
+PARALLEL_MAKE_pn-readline = ""
+PARALLEL_MAKE_pn-util-linux = ""
+PARALLEL_MAKE_pn-binutils = ""
+PARALLEL_MAKE_pn-bison = ""
+PARALLEL_MAKE_pn-cmake = ""
+PARALLEL_MAKE_pn-elfutils = ""
+PARALLEL_MAKE_pn-gcc = ""
+PARALLEL_MAKE_pn-gcc-runtime = ""
+PARALLEL_MAKE_pn-m4 = ""
+PARALLEL_MAKE_pn-opkg = ""
+PARALLEL_MAKE_pn-pkgconfig = ""
+PARALLEL_MAKE_pn-prelink = ""
+PARALLEL_MAKE_pn-rpm = ""
+PARALLEL_MAKE_pn-tcl = ""
+PARALLEL_MAKE_pn-beecrypt = ""
+PARALLEL_MAKE_pn-curl = ""
+PARALLEL_MAKE_pn-gmp = ""
+PARALLEL_MAKE_pn-libmpc = ""
+PARALLEL_MAKE_pn-libxslt = ""
+PARALLEL_MAKE_pn-lzo = ""
+PARALLEL_MAKE_pn-popt = ""
+PARALLEL_MAKE_pn-linux-wrs = ""
+PARALLEL_MAKE_pn-libgcrypt = ""
+PARALLEL_MAKE_pn-gpgme = ""
+PARALLEL_MAKE_pn-udev = ""
+PARALLEL_MAKE_pn-gnutls = ""
+
+python() {
+ # NOTE: It might be useful to detect host infection on native and cross
+ # packages but as it turns out to be pretty hard to do this for all native
+ # and cross packages which aren't swabber-native or one of its dependencies
+ # I have ignored them for now...
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
+ deps = (d.getVarFlag('do_setscene', 'depends', True) or "").split()
+ deps.append('strace-native:do_populate_sysroot')
+ d.setVarFlag('do_setscene', 'depends', " ".join(deps))
+ logdir = d.expand("${TRACE_LOGDIR}")
+ bb.utils.mkdirhier(logdir)
+ else:
+ d.setVar('STRACEFUNC', '')
+}
+
+STRACEPID = "${@os.getpid()}"
+STRACEFUNC = "imageswab_attachstrace"
+
+do_configure[prefuncs] += "${STRACEFUNC}"
+do_compile[prefuncs] += "${STRACEFUNC}"
+
+imageswab_attachstrace () {
+ STRACE=`which strace`
+
+ if [ -x "$STRACE" ]; then
+ swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
+ fi
+}
+
+do_generate_swabber_report () {
+
+ update_distro ${HOST_DATA}
+
+ # Swabber can't create the directory for us
+ mkdir -p ${SWABBER_REPORT}
+
+ REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
+
+ if [ `which ccache` ] ; then
+ CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
+ fi
+
+ if [ "$(ls -A ${HOST_DATA})" ]; then
+ echo "Generating swabber report"
+ swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
+ else
+ echo "No host data, cannot generate swabber report."
+ fi
+}
+addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
+do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/image-vm.bbclass b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
new file mode 100644
index 000000000..47f73261f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
@@ -0,0 +1,175 @@
+# image-vm.bbclass
+# (loosly based off image-live.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
+#
+# Create an image which can be placed directly onto a harddisk using dd and then
+# booted.
+#
+# This uses syslinux. extlinux would have been nice but required the ext2/3
+# partition to be mounted. grub requires to run itself as part of the install
+# process.
+#
+# The end result is a 512 boot sector populated with an MBR and partition table
+# followed by an msdos fat16 partition containing syslinux and a linux kernel
+# completed by the ext2/3 rootfs.
+#
+# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
+# won't touch fat12 partitions.
+
+inherit live-vm-common
+
+do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
+ virtual/kernel:do_deploy \
+ syslinux:do_populate_sysroot \
+ syslinux-native:do_populate_sysroot \
+ parted-native:do_populate_sysroot \
+ mtools-native:do_populate_sysroot \
+ ${PN}:do_image_ext4 \
+ "
+
+IMAGE_TYPEDEP_vmdk = "ext4"
+IMAGE_TYPEDEP_vdi = "ext4"
+IMAGE_TYPEDEP_qcow2 = "ext4"
+IMAGE_TYPEDEP_hdddirect = "ext4"
+IMAGE_TYPES_MASKED += "vmdk vdi qcow2 hdddirect"
+
+ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.ext4"
+
+# Used by bootloader
+LABELS_VM ?= "boot"
+ROOT_VM ?= "root=/dev/sda2"
+# Using an initramfs is optional. Enable it by setting INITRD_IMAGE_VM.
+INITRD_IMAGE_VM ?= ""
+INITRD_VM ?= "${@'${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_VM}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE_VM}' else ''}"
+do_bootdirectdisk[depends] += "${@'${INITRD_IMAGE_VM}:do_image_complete' if '${INITRD_IMAGE_VM}' else ''}"
+
+BOOTDD_VOLUME_ID ?= "boot"
+BOOTDD_EXTRA_SPACE ?= "16384"
+
+DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
+DISK_SIGNATURE[vardepsexclude] = "DISK_SIGNATURE_GENERATED"
+
+build_boot_dd() {
+ HDDDIR="${S}/hdd/boot"
+ HDDIMG="${S}/hdd.image"
+ IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
+
+ populate_kernel $HDDDIR
+
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_hddimg_populate $HDDDIR
+ fi
+ if [ "${EFI}" = "1" ]; then
+ efi_hddimg_populate $HDDDIR
+ fi
+
+ BLOCKS=`du -bks $HDDDIR | cut -f 1`
+ BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
+
+ # Remove it since mkdosfs would fail when it exists
+ rm -f $HDDIMG
+ mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
+ mcopy -i $HDDIMG -s $HDDDIR/* ::/
+
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_hdddirect_install $HDDIMG
+ fi
+ chmod 644 $HDDIMG
+
+ ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
+ TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
+ END1=`expr $BLOCKS \* 1024`
+ END2=`expr $END1 + 512`
+ END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
+
+ echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
+ rm -rf $IMAGE
+ dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
+
+ parted $IMAGE mklabel msdos
+ parted $IMAGE mkpart primary fat16 0 ${END1}B
+ parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
+ parted $IMAGE set 1 boot on
+
+ parted $IMAGE print
+
+ awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
+ dd of=$IMAGE bs=1 seek=440 conv=notrunc
+
+ OFFSET=`expr $END2 / 512`
+ if [ "${PCBIOS}" = "1" ]; then
+ dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
+ fi
+
+ dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
+ dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
+
+ cd ${DEPLOY_DIR_IMAGE}
+ rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+ ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+}
+
+python do_bootdirectdisk() {
+ validate_disk_signature(d)
+ set_live_vm_vars(d, 'VM')
+ if d.getVar("PCBIOS", True) == "1":
+ bb.build.exec_func('build_syslinux_cfg', d)
+ if d.getVar("EFI", True) == "1":
+ bb.build.exec_func('build_efi_cfg', d)
+ bb.build.exec_func('build_boot_dd', d)
+}
+
+def generate_disk_signature():
+ import uuid
+
+ signature = str(uuid.uuid4())[:8]
+
+ if signature != '00000000':
+ return signature
+ else:
+ return 'ffffffff'
+
+def validate_disk_signature(d):
+ import re
+
+ disk_signature = d.getVar("DISK_SIGNATURE", True)
+
+ if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
+ bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
+
+DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
+
+run_qemu_img (){
+ type="$1"
+ qemu-img convert -O $type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.$type
+ ln -sf ${IMAGE_NAME}.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type
+}
+create_vmdk_image () {
+ run_qemu_img vmdk
+}
+
+create_vdi_image () {
+ run_qemu_img vdi
+}
+
+create_qcow2_image () {
+ run_qemu_img qcow2
+}
+
+python do_vmimg() {
+ if 'vmdk' in d.getVar('IMAGE_FSTYPES', True):
+ bb.build.exec_func('create_vmdk_image', d)
+ if 'vdi' in d.getVar('IMAGE_FSTYPES', True):
+ bb.build.exec_func('create_vdi_image', d)
+ if 'qcow2' in d.getVar('IMAGE_FSTYPES', True):
+ bb.build.exec_func('create_qcow2_image', d)
+}
+
+addtask bootdirectdisk before do_vmimg
+addtask vmimg after do_bootdirectdisk before do_image_complete
+do_vmimg[depends] += "qemu-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/image.bbclass b/import-layers/yocto-poky/meta/classes/image.bbclass
new file mode 100644
index 000000000..8bfd24193
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image.bbclass
@@ -0,0 +1,547 @@
+inherit rootfs_${IMAGE_PKGTYPE}
+
+# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk
+# in the non-Linux SDK_OS case, such as mingw32
+SDKEXTCLASS ?= "${@['populate_sdk', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS", True)]}"
+inherit ${SDKEXTCLASS}
+
+TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
+TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
+POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
+
+inherit gzipnative
+
+LICENSE = "MIT"
+PACKAGES = ""
+DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross"
+RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
+RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
+inherit ${TESTIMAGECLASS}
+
+# IMAGE_FEATURES may contain any available package group
+IMAGE_FEATURES ?= ""
+IMAGE_FEATURES[type] = "list"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password post-install-logging"
+
+# Generate companion debugfs?
+IMAGE_GEN_DEBUGFS ?= "0"
+
+# rootfs bootstrap install
+ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
+
+# These packages will be removed from a read-only rootfs after all other
+# packages have been installed
+ROOTFS_RO_UNNEEDED = "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
+
+# packages to install from features
+FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
+FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
+FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
+FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
+
+# Define some very basic feature package groups
+FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
+SPLASH ?= "psplash"
+FEATURE_PACKAGES_splash = "${SPLASH}"
+
+IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
+
+def check_image_features(d):
+ valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split()
+ valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
+ for var in d:
+ if var.startswith("PACKAGE_GROUP_"):
+ bb.warn("PACKAGE_GROUP is deprecated, please use FEATURE_PACKAGES instead")
+ valid_features.append(var[14:])
+ elif var.startswith("FEATURE_PACKAGES_"):
+ valid_features.append(var[17:])
+ valid_features.sort()
+
+ features = set(oe.data.typed_value('IMAGE_FEATURES', d))
+ for feature in features:
+ if feature not in valid_features:
+ if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
+ raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
+ else:
+ raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
+
+IMAGE_INSTALL ?= ""
+IMAGE_INSTALL[type] = "list"
+export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
+PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
+
+# Images are generally built explicitly, do not need to be part of world.
+EXCLUDE_FROM_WORLD = "1"
+
+USE_DEVFS ?= "1"
+USE_DEPMOD ?= "1"
+
+PID = "${@os.getpid()}"
+
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
+LDCONFIGDEPEND_libc-uclibc = ""
+LDCONFIGDEPEND_libc-musl = ""
+
+# This is needed to have depmod data in PKGDATA_DIR,
+# but if you're building small initramfs image
+# e.g. to include it in your kernel, you probably
+# don't want this dependency, which is causing dependency loop
+KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
+
+do_rootfs[depends] += " \
+ makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
+ virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
+ ${KERNELDEPMODDEPEND} \
+"
+do_rootfs[recrdeptask] += "do_packagedata"
+
+def rootfs_command_variables(d):
+ return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
+ 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
+
+python () {
+ variables = rootfs_command_variables(d) + sdk_command_variables(d)
+ for var in variables:
+ if d.getVar(var, False):
+ d.setVarFlag(var, 'func', '1')
+}
+
+def rootfs_variables(d):
+ from oe.rootfs import variable_depends
+ variables = ['IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+ 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
+ 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
+ 'COMPRESSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED']
+ variables.extend(rootfs_command_variables(d))
+ variables.extend(variable_depends(d))
+ return " ".join(variables)
+
+do_rootfs[vardeps] += "${@rootfs_variables(d)}"
+
+do_build[depends] += "virtual/kernel:do_deploy"
+
+def build_live(d):
+ if bb.utils.contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg
+ d.setVar('NOISO', bb.utils.contains('IMAGE_FSTYPES', "iso", "0", "1", d))
+ d.setVar('NOHDD', bb.utils.contains('IMAGE_FSTYPES', "hddimg", "0", "1", d))
+ if d.getVar('NOISO', True) == "0" or d.getVar('NOHDD', True) == "0":
+ return "image-live"
+ return ""
+ return "image-live"
+
+IMAGE_TYPE_live = "${@build_live(d)}"
+inherit ${IMAGE_TYPE_live}
+
+IMAGE_TYPE_vm = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2", "hdddirect"], "image-vm", "", d)}'
+inherit ${IMAGE_TYPE_vm}
+
+python () {
+ deps = " " + imagetypes_getdepends(d)
+ d.appendVarFlag('do_rootfs', 'depends', deps)
+
+ deps = ""
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
+ deps += " %s:do_populate_sysroot" % dep
+ d.appendVarFlag('do_build', 'depends', deps)
+
+ #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
+ #Check for replaces image features
+ features = set(oe.data.typed_value('IMAGE_FEATURES', d))
+ remain_features = features.copy()
+ for feature in features:
+ replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split())
+ remain_features -= replaces
+
+ #Check for conflict image features
+ for feature in remain_features:
+ conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split())
+ temp = conflicts & remain_features
+ if temp:
+ bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
+
+ d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features)))
+
+ check_image_features(d)
+ initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
+ if initramfs_image != "":
+ d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True))
+ d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_image_complete" % initramfs_image)
+}
+
+IMAGE_CLASSES += "image_types"
+inherit ${IMAGE_CLASSES}
+
+IMAGE_POSTPROCESS_COMMAND ?= ""
+
+# some default locales
+IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
+
+LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
+
+# Prefer image, but use the fallback files for lookups if the image ones
+# aren't yet available.
+PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
+
+inherit rootfs-postcommands
+
+PACKAGE_EXCLUDE ??= ""
+PACKAGE_EXCLUDE[type] = "list"
+
+fakeroot python do_rootfs () {
+ from oe.rootfs import create_rootfs
+ from oe.manifest import create_manifest
+
+ # Handle package exclusions
+ excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
+ inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
+ inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
+
+ d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
+ d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
+
+ for pkg in excl_pkgs:
+ if pkg in inst_pkgs:
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
+ inst_pkgs.remove(pkg)
+
+ if pkg in inst_attempt_pkgs:
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
+ inst_attempt_pkgs.remove(pkg)
+
+ d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
+ d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
+
+ # Ensure we handle package name remapping
+ # We have to delay the runtime_mapping_rename until just before rootfs runs
+ # otherwise, the multilib renaming could step in and squash any fixups that
+ # may have occurred.
+ pn = d.getVar('PN', True)
+ runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
+ runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
+ runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
+
+ # Generate the initial manifest
+ create_manifest(d)
+
+ # Generate rootfs
+ create_rootfs(d)
+}
+do_rootfs[dirs] = "${TOPDIR}"
+do_rootfs[cleandirs] += "${S}"
+do_rootfs[umask] = "022"
+addtask rootfs before do_build
+
+fakeroot python do_image () {
+ from oe.utils import execute_pre_post_process
+
+ pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND", True)
+
+ execute_pre_post_process(d, pre_process_cmds)
+}
+do_image[dirs] = "${TOPDIR}"
+do_image[umask] = "022"
+addtask do_image after do_rootfs before do_build
+
+fakeroot python do_image_complete () {
+ from oe.utils import execute_pre_post_process
+
+ post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
+
+ execute_pre_post_process(d, post_process_cmds)
+}
+do_image_complete[dirs] = "${TOPDIR}"
+do_image_complete[umask] = "022"
+addtask do_image_complete after do_image before do_build
+
+#
+# Write environment variables used by wic
+# to tmp/sysroots/<machine>/imgdata/<image>.env
+#
+python do_rootfs_wicenv () {
+ wicvars = d.getVar('WICVARS', True)
+ if not wicvars:
+ return
+
+ stdir = d.getVar('STAGING_DIR_TARGET', True)
+ outdir = os.path.join(stdir, 'imgdata')
+ bb.utils.mkdirhier(outdir)
+ basename = d.getVar('IMAGE_BASENAME', True)
+ with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
+ for var in wicvars.split():
+ value = d.getVar(var, True)
+ if value:
+ envf.write('%s="%s"\n' % (var, value.strip()))
+}
+addtask do_rootfs_wicenv after do_image before do_image_wic
+do_rootfs_wicenv[vardeps] += "${WICVARS}"
+do_rootfs_wicenv[prefuncs] = 'set_image_size'
+
+def setup_debugfs_variables(d):
+ d.appendVar('IMAGE_ROOTFS', '-dbg')
+ d.appendVar('IMAGE_LINK_NAME', '-dbg')
+ d.appendVar('IMAGE_NAME','-dbg')
+ debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True)
+ if debugfs_image_fstypes:
+ d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
+
+python setup_debugfs () {
+ setup_debugfs_variables(d)
+}
+
+python () {
+ vardeps = set()
+ ctypes = d.getVar('COMPRESSIONTYPES', True).split()
+ old_overrides = d.getVar('OVERRIDES', 0)
+
+ def _image_base_type(type):
+ basetype = type
+ for ctype in ctypes:
+ if type.endswith("." + ctype):
+ basetype = type[:-len("." + ctype)]
+ break
+
+ if basetype != type:
+ # New base type itself might be generated by a conversion command.
+ basetype = _image_base_type(basetype)
+
+ return basetype
+
+ basetypes = {}
+ alltypes = d.getVar('IMAGE_FSTYPES', True).split()
+ typedeps = {}
+
+ if d.getVar('IMAGE_GEN_DEBUGFS', True) == "1":
+ debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True).split()
+ for t in debugfs_fstypes:
+ alltypes.append("debugfs_" + t)
+
+ def _add_type(t):
+ baset = _image_base_type(t)
+ input_t = t
+ if baset not in basetypes:
+ basetypes[baset]= []
+ if t not in basetypes[baset]:
+ basetypes[baset].append(t)
+ debug = ""
+ if t.startswith("debugfs_"):
+ t = t[8:]
+ debug = "debugfs_"
+ deps = (d.getVar('IMAGE_TYPEDEP_' + t, True) or "").split()
+ vardeps.add('IMAGE_TYPEDEP_' + t)
+ if baset not in typedeps:
+ typedeps[baset] = set()
+ deps = [debug + dep for dep in deps]
+ for dep in deps:
+ if dep not in alltypes:
+ alltypes.append(dep)
+ _add_type(dep)
+ basedep = _image_base_type(dep)
+ typedeps[baset].add(basedep)
+
+ if baset != input_t:
+ _add_type(baset)
+
+ for t in alltypes[:]:
+ _add_type(t)
+
+ d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
+
+ maskedtypes = (d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
+
+ for t in basetypes:
+ vardeps = set()
+ cmds = []
+ subimages = []
+ realt = t
+
+ if t in maskedtypes:
+ continue
+
+ localdata = bb.data.createCopy(d)
+ debug = ""
+ if t.startswith("debugfs_"):
+ setup_debugfs_variables(localdata)
+ debug = "setup_debugfs "
+ realt = t[8:]
+ localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
+ bb.data.update_data(localdata)
+ localdata.setVar('type', realt)
+ # Delete DATETIME so we don't expand any references to it now
+ # This means the task's hash can be stable rather than having hardcoded
+ # date/time values. It will get expanded at execution time.
+ # Similarly TMPDIR since otherwise we see QA stamp comparision problems
+ localdata.delVar('DATETIME')
+ localdata.delVar('TMPDIR')
+
+ image_cmd = localdata.getVar("IMAGE_CMD", True)
+ vardeps.add('IMAGE_CMD_' + realt)
+ if image_cmd:
+ cmds.append("\t" + image_cmd)
+ else:
+ bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
+ cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
+
+ rm_tmp_images = set()
+ def gen_conversion_cmds(bt):
+ for ctype in ctypes:
+ if bt.endswith("." + ctype):
+ type = bt[0:-len(ctype) - 1]
+ if type.startswith("debugfs_"):
+ type = type[8:]
+ # Create input image first.
+ gen_conversion_cmds(type)
+ localdata.setVar('type', type)
+ cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
+ vardeps.add('COMPRESS_CMD_' + ctype)
+ subimages.append(type + "." + ctype)
+ if type not in alltypes:
+ rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
+
+ for bt in basetypes[t]:
+ gen_conversion_cmds(bt)
+
+ localdata.setVar('type', realt)
+ if t not in alltypes:
+ rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
+ else:
+ subimages.append(realt)
+
+ # Clean up after applying all conversion commands. Some of them might
+ # use the same input, therefore we cannot delete sooner without applying
+ # some complex dependency analysis.
+ for image in rm_tmp_images:
+ cmds.append("\trm " + image)
+
+ after = 'do_image'
+ for dep in typedeps[t]:
+ after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
+
+ t = t.replace("-", "_").replace(".", "_")
+
+ d.setVar('do_image_%s' % t, '\n'.join(cmds))
+ d.setVarFlag('do_image_%s' % t, 'func', '1')
+ d.setVarFlag('do_image_%s' % t, 'fakeroot', '1')
+ d.setVarFlag('do_image_%s' % t, 'prefuncs', debug + 'set_image_size')
+ d.setVarFlag('do_image_%s' % t, 'postfuncs', 'create_symlinks')
+ d.setVarFlag('do_image_%s' % t, 'subimages', ' '.join(subimages))
+ d.appendVarFlag('do_image_%s' % t, 'vardeps', ' '.join(vardeps))
+ d.appendVarFlag('do_image_%s' % t, 'vardepsexclude', 'DATETIME')
+
+ bb.debug(2, "Adding type %s before %s, after %s" % (t, 'do_image_complete', after))
+ bb.build.addtask('do_image_%s' % t, 'do_image_complete', after, d)
+}
+
+#
+# Compute the rootfs size
+#
+def get_rootfs_size(d):
+ import subprocess
+
+ rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
+ overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR', True))
+ rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE', True))
+ rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
+ rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
+ image_fstypes = d.getVar('IMAGE_FSTYPES', True) or ''
+ initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES', True) or ''
+ initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE', True)
+
+ output = subprocess.check_output(['du', '-ks',
+ d.getVar('IMAGE_ROOTFS', True)])
+ size_kb = int(output.split()[0])
+ base_size = size_kb * overhead_factor
+ base_size = max(base_size, rootfs_req_size) + rootfs_extra_space
+
+ if base_size != int(base_size):
+ base_size = int(base_size + 1)
+ else:
+ base_size = int(base_size)
+
+ base_size += rootfs_alignment - 1
+ base_size -= base_size % rootfs_alignment
+
+ # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
+ if rootfs_maxsize:
+ rootfs_maxsize_int = int(rootfs_maxsize)
+ if base_size > rootfs_maxsize_int:
+ bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
+ (base_size, rootfs_maxsize_int))
+
+ # Check the initramfs size against INITRAMFS_MAXSIZE (if set)
+ if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
+ initramfs_maxsize_int = int(initramfs_maxsize)
+ if base_size > initramfs_maxsize_int:
+ bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
+ (base_size, initramfs_maxsize_int))
+ bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
+ bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
+ return base_size
+
+python set_image_size () {
+ rootfs_size = get_rootfs_size(d)
+ d.setVar('ROOTFS_SIZE', str(rootfs_size))
+ d.setVarFlag('ROOTFS_SIZE', 'export', '1')
+}
+
+#
+# Create symlinks to the newly created image
+#
+python create_symlinks() {
+
+ deploy_dir = d.getVar('DEPLOY_DIR_IMAGE', True)
+ img_name = d.getVar('IMAGE_NAME', True)
+ link_name = d.getVar('IMAGE_LINK_NAME', True)
+ manifest_name = d.getVar('IMAGE_MANIFEST', True)
+ taskname = d.getVar("BB_CURRENTTASK", True)
+ subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
+ imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix', True) or d.expand("${IMAGE_NAME_SUFFIX}.")
+ os.chdir(deploy_dir)
+
+ if not link_name:
+ return
+ for type in subimages:
+ if os.path.exists(img_name + imgsuffix + type):
+ dst = deploy_dir + "/" + link_name + "." + type
+ src = img_name + imgsuffix + type
+ bb.note("Creating symlink: %s -> %s" % (dst, src))
+ if os.path.islink(dst):
+ if d.getVar('RM_OLD_IMAGE', True) == "1" and \
+ os.path.exists(os.path.realpath(dst)):
+ os.remove(os.path.realpath(dst))
+ os.remove(dst)
+ os.symlink(src, dst)
+}
+
+MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
+MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
+MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
+
+do_fetch[noexec] = "1"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_configure[noexec] = "1"
+do_compile[noexec] = "1"
+do_install[noexec] = "1"
+do_populate_sysroot[noexec] = "1"
+do_package[noexec] = "1"
+do_package_qa[noexec] = "1"
+do_packagedata[noexec] = "1"
+do_package_write_ipk[noexec] = "1"
+do_package_write_deb[noexec] = "1"
+do_package_write_rpm[noexec] = "1"
+
+# Allow the kernel to be repacked with the initramfs and boot image file as a single file
+do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
+do_bundle_initramfs[nostamp] = "1"
+do_bundle_initramfs[noexec] = "1"
+do_bundle_initramfs () {
+ :
+}
+addtask bundle_initramfs after do_image_complete
diff --git a/import-layers/yocto-poky/meta/classes/image_types.bbclass b/import-layers/yocto-poky/meta/classes/image_types.bbclass
new file mode 100644
index 000000000..53af7ca8d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image_types.bbclass
@@ -0,0 +1,299 @@
+# IMAGE_NAME is the base name for everything produced when building images.
+# The actual image that contains the rootfs has an additional suffix (.rootfs
+# by default) followed by additional suffices which describe the format (.ext4,
+# .ext4.xz, etc.).
+IMAGE_NAME_SUFFIX ??= ".rootfs"
+
+# The default aligment of the size of the rootfs is set to 1KiB. In case
+# you're using the SD card emulation of a QEMU system simulator you may
+# set this value to 2048 (2MiB alignment).
+IMAGE_ROOTFS_ALIGNMENT ?= "1"
+
+def imagetypes_getdepends(d):
+ def adddep(depstr, deps):
+ for i in (depstr or "").split():
+ if i not in deps:
+ deps.append(i)
+
+ deps = []
+ ctypes = d.getVar('COMPRESSIONTYPES', True).split()
+ fstypes = set((d.getVar('IMAGE_FSTYPES', True) or "").split())
+ fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS', True) or "").split())
+ for type in fstypes:
+ if type in ["vmdk", "vdi", "qcow2", "hdddirect", "live", "iso", "hddimg"]:
+ type = "ext4"
+ basetype = type
+ for ctype in ctypes:
+ if type.endswith("." + ctype):
+ basetype = type[:-len("." + ctype)]
+ adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
+ break
+ for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split():
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % typedepends, True) , deps)
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
+
+ depstr = ""
+ for dep in deps:
+ depstr += " " + dep + ":do_populate_sysroot"
+ return depstr
+
+
+XZ_COMPRESSION_LEVEL ?= "-e -6"
+XZ_INTEGRITY_CHECK ?= "crc32"
+XZ_THREADS ?= "-T 0"
+
+JFFS2_SUM_EXTRA_ARGS ?= ""
+IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
+
+IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
+
+oe_mkext234fs () {
+ fstype=$1
+ extra_imagecmd=""
+
+ if [ $# -gt 1 ]; then
+ shift
+ extra_imagecmd=$@
+ fi
+
+ # If generating an empty image the size of the sparse block should be large
+ # enough to allocate an ext4 filesystem using 4096 bytes per inode, this is
+ # about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO)
+ eval local COUNT=\"0\"
+ eval local MIN_COUNT=\"60\"
+ if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then
+ eval COUNT=\"$MIN_COUNT\"
+ fi
+ # Create a sparse image block
+ dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
+ mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
+}
+
+IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
+IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
+
+MIN_BTRFS_SIZE ?= "16384"
+IMAGE_CMD_btrfs () {
+ if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
+ dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${ROOTFS_SIZE} bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
+ else
+ bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
+ fi
+}
+
+IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+
+# By default, tar from the host is used, which can be quite old. If
+# you need special parameters (like --xattrs) which are only supported
+# by GNU tar upstream >= 1.27, then override that default:
+# IMAGE_CMD_TAR = "tar --xattrs --xattrs-include=*"
+# IMAGE_DEPENDS_tar_append = " tar-replacement-native"
+# EXTRANATIVEPATH += "tar-native"
+#
+# The GNU documentation does not specify whether --xattrs-include is necessary.
+# In practice, it turned out to be not needed when creating archives and
+# required when extracting, but it seems prudent to use it in both cases.
+IMAGE_CMD_TAR ?= "tar"
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} ."
+
+do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
+IMAGE_CMD_cpio () {
+ (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ if [ ! -L ${IMAGE_ROOTFS}/init -a ! -e ${IMAGE_ROOTFS}/init ]; then
+ if [ -L ${IMAGE_ROOTFS}/sbin/init -o -e ${IMAGE_ROOTFS}/sbin/init ]; then
+ ln -sf /sbin/init ${WORKDIR}/cpio_append/init
+ else
+ touch ${WORKDIR}/cpio_append/init
+ fi
+ (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ fi
+}
+
+ELF_KERNEL ?= "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE}"
+ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
+
+IMAGE_CMD_elf () {
+ test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
+ mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
+}
+
+IMAGE_TYPEDEP_elf = "cpio.gz"
+
+UBI_VOLNAME ?= "${MACHINE}-rootfs"
+
+multiubi_mkfs() {
+ local mkubifs_args="$1"
+ local ubinize_args="$2"
+ if [ -z "$3" ]; then
+ local vname=""
+ else
+ local vname="_$3"
+ fi
+
+ echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
+ echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
+ mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
+
+ # Cleanup cfg file
+ mv ubinize${vname}-${IMAGE_NAME}.cfg ${DEPLOY_DIR_IMAGE}/
+
+ # Create own symlinks for 'named' volumes
+ if [ -n "$vname" ]; then
+ cd ${DEPLOY_DIR_IMAGE}
+ if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
+ ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
+ ${IMAGE_LINK_NAME}${vname}.ubifs
+ fi
+ if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
+ ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
+ ${IMAGE_LINK_NAME}${vname}.ubi
+ fi
+ cd -
+ fi
+}
+
+IMAGE_CMD_multiubi () {
+ # Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
+ for name in ${MULTIUBI_BUILD}; do
+ eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
+ eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\"
+
+ multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}"
+ done
+}
+
+IMAGE_CMD_ubi () {
+ multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
+}
+
+IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+
+WKS_FILE ?= "${IMAGE_BASENAME}.${MACHINE}.wks"
+WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
+WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
+WKS_FULL_PATH = "${@wks_search('${WKS_FILES}'.split(), '${WKS_SEARCH_PATH}') or ''}"
+
+def wks_search(files, search_path):
+ for f in files:
+ if os.path.isabs(f):
+ if os.path.exists(f):
+ return f
+ else:
+ searched = bb.utils.which(search_path, f)
+ if searched:
+ return searched
+
+IMAGE_CMD_wic () {
+ out="${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}"
+ wks="${WKS_FULL_PATH}"
+ if [ -z "$wks" ]; then
+ bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
+ fi
+
+ BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR_TARGET}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/"
+ mv "$out/build/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
+ rm -rf "$out/"
+}
+IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES"
+
+# Rebuild when the wks file or vars in WICVARS change
+USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${COMPRESSIONTYPES}'.split()), '1', '', d)}"
+do_image_wic[file-checksums] += "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
+
+EXTRA_IMAGECMD = ""
+
+inherit siteinfo
+JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
+JFFS2_ERASEBLOCK ?= "0x40000"
+EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
+
+# Change these if you want default mkfs behavior (i.e. create minimal inode number)
+EXTRA_IMAGECMD_ext2 ?= "-i 4096"
+EXTRA_IMAGECMD_ext3 ?= "-i 4096"
+EXTRA_IMAGECMD_ext4 ?= "-i 4096"
+EXTRA_IMAGECMD_btrfs ?= "-n 4096"
+EXTRA_IMAGECMD_elf ?= ""
+
+IMAGE_DEPENDS = ""
+IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
+IMAGE_DEPENDS_cramfs = "util-linux-native"
+IMAGE_DEPENDS_ext2 = "e2fsprogs-native"
+IMAGE_DEPENDS_ext3 = "e2fsprogs-native"
+IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
+IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
+IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
+IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
+IMAGE_DEPENDS_squashfs-lzo = "squashfs-tools-native"
+IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
+IMAGE_DEPENDS_ubi = "mtd-utils-native"
+IMAGE_DEPENDS_ubifs = "mtd-utils-native"
+IMAGE_DEPENDS_multiubi = "mtd-utils-native"
+IMAGE_DEPENDS_wic = "parted-native"
+
+# This variable is available to request which values are suitable for IMAGE_FSTYPES
+IMAGE_TYPES = " \
+ jffs2 jffs2.sum \
+ cramfs \
+ ext2 ext2.gz ext2.bz2 ext2.lzma \
+ ext3 ext3.gz \
+ ext4 ext4.gz \
+ btrfs \
+ iso \
+ hddimg \
+ squashfs squashfs-xz squashfs-lzo \
+ ubi ubifs multiubi \
+ tar tar.gz tar.bz2 tar.xz tar.lz4 \
+ cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
+ vmdk \
+ vdi \
+ qcow2 \
+ hdddirect \
+ elf \
+ wic wic.gz wic.bz2 wic.lzma \
+"
+
+COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum"
+COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+COMPRESS_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+COMPRESS_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
+COMPRESS_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
+COMPRESS_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
+COMPRESS_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
+COMPRESS_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
+COMPRESS_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
+COMPRESS_DEPENDS_lzma = "xz-native"
+COMPRESS_DEPENDS_gz = ""
+COMPRESS_DEPENDS_bz2 = "pbzip2-native"
+COMPRESS_DEPENDS_xz = "xz-native"
+COMPRESS_DEPENDS_lz4 = "lz4-native"
+COMPRESS_DEPENDS_sum = "mtd-utils-native"
+
+RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
+RUNNABLE_MACHINE_PATTERNS ?= "qemu"
+
+DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
+
+# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob
+IMAGE_EXTENSION_live = "hddimg iso"
+
+# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
+# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
+IMAGE_TYPES_MASKED ?= ""
+
+# The WICVARS variable is used to define list of bitbake variables used in wic code
+# variables from this list is written to <image>.env file
+WICVARS ?= "BBLAYERS DEPLOY_DIR_IMAGE HDDDIR IMAGE_BASENAME IMAGE_BOOT_FILES IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD ISODIR MACHINE_ARCH ROOTFS_SIZE STAGING_DATADIR STAGING_DIR_NATIVE STAGING_LIBDIR TARGET_SYS"
diff --git a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
new file mode 100644
index 000000000..19e4aa2e4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
@@ -0,0 +1,26 @@
+inherit image_types kernel-arch
+
+oe_mkimage () {
+ mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
+ -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
+ if [ x$3 = x"clean" ]; then
+ rm $1
+ fi
+}
+
+COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
+
+COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
+
+COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip clean"
+
+COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2 clean"
+
+COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma clean"
+
+IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot cpio.gz.u-boot"
+
diff --git a/import-layers/yocto-poky/meta/classes/insane.bbclass b/import-layers/yocto-poky/meta/classes/insane.bbclass
new file mode 100644
index 000000000..c57b21735
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/insane.bbclass
@@ -0,0 +1,1319 @@
+# BB Class inspired by ebuild.sh
+#
+# This class will test files after installation for certain
+# security issues and other kind of issues.
+#
+# Checks we do:
+# -Check the ownership and permissions
+# -Check the RUNTIME path for the $TMPDIR
+# -Check if .la files wrongly point to workdir
+# -Check if .pc files wrongly point to workdir
+# -Check if packages contains .debug directories or .so files
+# where they should be in -dev or -dbg
+# -Check if config.log contains traces to broken autoconf tests
+# -Check invalid characters (non-utf8) on some package metadata
+# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
+# into exec_prefix
+# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
+# files under exec_prefix
+
+
+# unsafe-references-in-binaries requires prelink-rtld from
+# prelink-native, but we don't want this DEPENDS for -native builds
+QADEPENDS = "prelink-native"
+QADEPENDS_class-native = ""
+QADEPENDS_class-nativesdk = ""
+QA_SANE = "True"
+
+# Elect whether a given type of error is a warning or error, they may
+# have been set by other files.
+WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
+ textrel already-stripped incompatible-license files-invalid \
+ installed-vs-shipped compile-host-path install-host-path \
+ pn-overrides infodir build-deps file-rdeps \
+ unknown-configure-option symlink-to-sysroot multilib \
+ invalid-packageconfig host-user-contaminated \
+ "
+ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
+ perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
+ split-strip packages-list pkgv-undefined var-undefined \
+ version-going-backwards expanded-d invalid-chars \
+ license-checksum dev-elf \
+ "
+FAKEROOT_QA = "host-user-contaminated"
+FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
+enabled tests are listed here, the do_package_qa task will run under fakeroot."
+
+ALL_QA = "${WARN_QA} ${ERROR_QA}"
+
+UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+
+#
+# dictionary for elf headers
+#
+# feel free to add and correct.
+#
+# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
+def package_qa_get_machine_dict():
+ return {
+ "darwin9" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "eabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
+ "elf" : {
+ "i586" : (3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "epiphany": (4643, 0, 0, True, 32),
+ },
+ "linux" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : (40, 97, 0, True, 32),
+ "armeb": (40, 97, 0, False, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "powerpc64": (21, 0, 0, False, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "ia64": (50, 0, 0, True, 64),
+ "alpha": (36902, 0, 0, True, 64),
+ "hppa": (15, 3, 0, False, 32),
+ "m68k": ( 4, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "nios2": (113, 0, 0, True, 32),
+ "s390": (22, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ "sparc": ( 2, 0, 0, False, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ },
+ "linux-uclibc" : {
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "avr32": (6317, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+
+ },
+ "linux-musl" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "aarch64_be" :(183, 0, 0, False, 64),
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "mips64": ( 8, 0, 0, False, 64),
+ "mips64el": ( 8, 0, 0, True, 64),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ },
+ "uclinux-uclibc" : {
+ "bfin": ( 106, 0, 0, True, 32),
+ },
+ "linux-gnueabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-musleabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-uclibceabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-gnuspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-muslspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-uclibcspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "powerpc": (20, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ },
+ "linux-gnux32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
+ "linux-gnun32" : {
+ "mips64": ( 8, 0, 0, False, 32),
+ "mips64el": ( 8, 0, 0, True, 32),
+ },
+ }
+
+
+def package_qa_clean_path(path,d):
+ """ Remove the common prefix from the path. In this case it is the TMPDIR"""
+ return path.replace(d.getVar("TMPDIR", True) + "/", "")
+
+def package_qa_write_error(type, error, d):
+ logfile = d.getVar('QA_LOGFILE', True)
+ if logfile:
+ p = d.getVar('P', True)
+ f = file( logfile, "a+")
+ print >> f, "%s: %s [%s]" % (p, error, type)
+ f.close()
+
+def package_qa_handle_error(error_class, error_msg, d):
+ package_qa_write_error(error_class, error_msg, d)
+ if error_class in (d.getVar("ERROR_QA", True) or "").split():
+ bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
+ d.setVar("QA_SANE", False)
+ return False
+ elif error_class in (d.getVar("WARN_QA", True) or "").split():
+ bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
+ else:
+ bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
+ return True
+
+def package_qa_add_message(messages, section, new_msg):
+ if section not in messages:
+ messages[section] = new_msg
+ else:
+ messages[section] = messages[section] + "\n" + new_msg
+
+QAPATHTEST[libexec] = "package_qa_check_libexec"
+def package_qa_check_libexec(path,name, d, elf, messages):
+
+ # Skip the case where the default is explicitly /usr/libexec
+ libexec = d.getVar('libexecdir', True)
+ if libexec == "/usr/libexec":
+ return True
+
+ if 'libexec' in path.split(os.path.sep):
+ package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
+ return False
+
+ return True
+
+QAPATHTEST[rpaths] = "package_qa_check_rpath"
+def package_qa_check_rpath(file,name, d, elf, messages):
+ """
+ Check for dangerous RPATHs
+ """
+ if not elf:
+ return
+
+ if os.path.islink(file):
+ return
+
+ bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)]
+
+ phdrs = elf.run_objdump("-p", d)
+
+ import re
+ rpath_re = re.compile("\s+RPATH\s+(.*)")
+ for line in phdrs.split("\n"):
+ m = rpath_re.match(line)
+ if m:
+ rpath = m.group(1)
+ for dir in bad_dirs:
+ if dir in rpath:
+ package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
+
+QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
+def package_qa_check_useless_rpaths(file, name, d, elf, messages):
+ """
+ Check for RPATHs that are useless but not dangerous
+ """
+ def rpath_eq(a, b):
+ return os.path.normpath(a) == os.path.normpath(b)
+
+ if not elf:
+ return
+
+ if os.path.islink(file):
+ return
+
+ libdir = d.getVar("libdir", True)
+ base_libdir = d.getVar("base_libdir", True)
+
+ phdrs = elf.run_objdump("-p", d)
+
+ import re
+ rpath_re = re.compile("\s+RPATH\s+(.*)")
+ for line in phdrs.split("\n"):
+ m = rpath_re.match(line)
+ if m:
+ rpath = m.group(1)
+ if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
+ # The dynamic linker searches both these places anyway. There is no point in
+ # looking there again.
+ package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
+
+QAPATHTEST[dev-so] = "package_qa_check_dev"
+def package_qa_check_dev(path, name, d, elf, messages):
+ """
+ Check for ".so" library symlinks in non-dev packages
+ """
+
+ if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
+ package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+
+QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
+def package_qa_check_dev_elf(path, name, d, elf, messages):
+ """
+ Check that -dev doesn't contain real shared libraries. The test has to
+ check that the file is not a link and is an ELF object as some recipes
+ install link-time .so files that are linker scripts.
+ """
+ if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
+ package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+
+QAPATHTEST[staticdev] = "package_qa_check_staticdev"
+def package_qa_check_staticdev(path, name, d, elf, messages):
+ """
+ Check for ".a" library in non-staticdev packages
+ There are a number of exceptions to this rule, -pic packages can contain
+ static libraries, the _nonshared.a belong with their -dev packages and
+ libgcc.a, libgcov.a will be skipped in their packages
+ """
+
+ if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
+ package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+
+def package_qa_check_libdir(d):
+ """
+ Check for wrong library installation paths. For instance, catch
+ recipes installing /lib/bar.so when ${base_libdir}="lib32" or
+ installing in /usr/lib64 when ${libdir}="/usr/lib"
+ """
+ import re
+
+ pkgdest = d.getVar('PKGDEST', True)
+ base_libdir = d.getVar("base_libdir",True) + os.sep
+ libdir = d.getVar("libdir", True) + os.sep
+ libexecdir = d.getVar("libexecdir", True) + os.sep
+ exec_prefix = d.getVar("exec_prefix", True) + os.sep
+
+ messages = []
+
+ # The re's are purposely fuzzy, as some there are some .so.x.y.z files
+ # that don't follow the standard naming convention. It checks later
+ # that they are actual ELF files
+ lib_re = re.compile("^/lib.+\.so(\..+)?$")
+ exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
+
+ for root, dirs, files in os.walk(pkgdest):
+ if root == pkgdest:
+ # Skip subdirectories for any packages with libdir in INSANE_SKIP
+ skippackages = []
+ for package in dirs:
+ if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
+ bb.note("Package %s skipping libdir QA test" % (package))
+ skippackages.append(package)
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory' and package.endswith("-dbg"):
+ bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
+ skippackages.append(package)
+ for package in skippackages:
+ dirs.remove(package)
+ for file in files:
+ full_path = os.path.join(root, file)
+ rel_path = os.path.relpath(full_path, pkgdest)
+ if os.sep in rel_path:
+ package, rel_path = rel_path.split(os.sep, 1)
+ rel_path = os.sep + rel_path
+ if lib_re.match(rel_path):
+ if base_libdir not in rel_path:
+ # make sure it's an actual ELF file
+ elf = oe.qa.ELFFile(full_path)
+ try:
+ elf.open()
+ messages.append("%s: found library in wrong location: %s" % (package, rel_path))
+ except (oe.qa.NotELFFileError):
+ pass
+ if exec_re.match(rel_path):
+ if libdir not in rel_path and libexecdir not in rel_path:
+ # make sure it's an actual ELF file
+ elf = oe.qa.ELFFile(full_path)
+ try:
+ elf.open()
+ messages.append("%s: found library in wrong location: %s" % (package, rel_path))
+ except (oe.qa.NotELFFileError):
+ pass
+
+ if messages:
+ package_qa_handle_error("libdir", "\n".join(messages), d)
+
+QAPATHTEST[debug-files] = "package_qa_check_dbg"
+def package_qa_check_dbg(path, name, d, elf, messages):
+ """
+ Check for ".debug" files or directories outside of the dbg package
+ """
+
+ if not "-dbg" in name and not "-ptest" in name:
+ if '.debug' in path.split(os.path.sep):
+ messages("debug-files", "non debug package contains .debug directory: %s path %s" % \
+ (name, package_qa_clean_path(path,d)))
+
+QAPATHTEST[perms] = "package_qa_check_perm"
+def package_qa_check_perm(path,name,d, elf, messages):
+ """
+ Check the permission of files
+ """
+ return
+
+QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
+def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
+ """
+ Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
+ """
+ if unsafe_references_skippable(path, name, d):
+ return
+
+ if elf:
+ import subprocess as sub
+ pn = d.getVar('PN', True)
+
+ exec_prefix = d.getVar('exec_prefix', True)
+ sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
+ sysroot_path_usr = sysroot_path + exec_prefix
+
+ try:
+ ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
+ except bb.process.CmdError:
+ error_msg = pn + ": prelink-rtld aborted when processing %s" % path
+ package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
+ return False
+
+ if sysroot_path_usr in ldd_output:
+ ldd_output = ldd_output.replace(sysroot_path, "")
+
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES', True)
+
+ for package in packages.split():
+ short_path = path.replace('%s/%s' % (pkgdest, package), "", 1)
+ if (short_path != path):
+ break
+
+ base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix)
+ for line in ldd_output.split('\n'):
+ if exec_prefix in line:
+ error_msg = "%s: %s" % (base_err, line.strip())
+ package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
+
+ return False
+
+QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
+def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
+ """
+ Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
+ """
+ if unsafe_references_skippable(path, name, d):
+ return
+
+ if not elf:
+ import stat
+ import subprocess
+ pn = d.getVar('PN', True)
+
+ # Ensure we're checking an executable script
+ statinfo = os.stat(path)
+ if bool(statinfo.st_mode & stat.S_IXUSR):
+ # grep shell scripts for possible references to /exec_prefix/
+ exec_prefix = d.getVar('exec_prefix', True)
+ statement = "grep -e '%s/[^ :]\{1,\}/[^ :]\{1,\}' %s > /dev/null" % (exec_prefix, path)
+ if subprocess.call(statement, shell=True) == 0:
+ error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
+ package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
+ error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
+ package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
+
+def unsafe_references_skippable(path, name, d):
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
+ return True
+
+ if "-dbg" in name or "-dev" in name:
+ return True
+
+ # Other package names to skip:
+ if name.startswith("kernel-module-"):
+ return True
+
+ # Skip symlinks
+ if os.path.islink(path):
+ return True
+
+ # Skip unusual rootfs layouts which make these tests irrelevant
+ exec_prefix = d.getVar('exec_prefix', True)
+ if exec_prefix == "":
+ return True
+
+ pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = pkgdest + "/" + name
+ pkgdest = os.path.abspath(pkgdest)
+ base_bindir = pkgdest + d.getVar('base_bindir', True)
+ base_sbindir = pkgdest + d.getVar('base_sbindir', True)
+ base_libdir = pkgdest + d.getVar('base_libdir', True)
+ bindir = pkgdest + d.getVar('bindir', True)
+ sbindir = pkgdest + d.getVar('sbindir', True)
+ libdir = pkgdest + d.getVar('libdir', True)
+
+ if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
+ return True
+
+ # Skip files not in base_[bindir|sbindir|libdir]
+ path = os.path.abspath(path)
+ if not (base_bindir in path or base_sbindir in path or base_libdir in path):
+ return True
+
+ return False
+
+QAPATHTEST[arch] = "package_qa_check_arch"
+def package_qa_check_arch(path,name,d, elf, messages):
+ """
+ Check if archs are compatible
+ """
+ if not elf:
+ return
+
+ target_os = d.getVar('TARGET_OS', True)
+ target_arch = d.getVar('TARGET_ARCH', True)
+ provides = d.getVar('PROVIDES', True)
+ bpn = d.getVar('BPN', True)
+
+ if target_arch == "allarch":
+ pn = d.getVar('PN', True)
+ package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
+ return
+
+ # FIXME: Cross package confuse this check, so just skip them
+ for s in ['cross', 'nativesdk', 'cross-canadian']:
+ if bb.data.inherits_class(s, d):
+ return
+
+ # avoid following links to /usr/bin (e.g. on udev builds)
+ # we will check the files pointed to anyway...
+ if os.path.islink(path):
+ return
+
+ #if this will throw an exception, then fix the dict above
+ (machine, osabi, abiversion, littleendian, bits) \
+ = package_qa_get_machine_dict()[target_os][target_arch]
+
+ # Check the architecture and endiannes of the binary
+ if not ((machine == elf.machine()) or \
+ ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
+ package_qa_add_message(messages, "arch", "Architecture did not match (%d to %d) on %s" % \
+ (machine, elf.machine(), package_qa_clean_path(path,d)))
+ elif not ((bits == elf.abiSize()) or \
+ ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
+ package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
+ (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
+ elif not littleendian == elf.isLittleEndian():
+ package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
+ (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
+
+QAPATHTEST[desktop] = "package_qa_check_desktop"
+def package_qa_check_desktop(path, name, d, elf, messages):
+ """
+ Run all desktop files through desktop-file-validate.
+ """
+ if path.endswith(".desktop"):
+ desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
+ output = os.popen("%s %s" % (desktop_file_validate, path))
+ # This only produces output on errors
+ for l in output:
+ package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
+
+QAPATHTEST[textrel] = "package_qa_textrel"
+def package_qa_textrel(path, name, d, elf, messages):
+ """
+ Check if the binary contains relocations in .text
+ """
+
+ if not elf:
+ return
+
+ if os.path.islink(path):
+ return
+
+ phdrs = elf.run_objdump("-p", d)
+ sane = True
+
+ import re
+ textrel_re = re.compile("\s+TEXTREL\s+")
+ for line in phdrs.split("\n"):
+ if textrel_re.match(line):
+ sane = False
+
+ if not sane:
+ package_qa_add_message(messages, "textrel", "ELF binary '%s' has relocations in .text" % path)
+
+QAPATHTEST[ldflags] = "package_qa_hash_style"
+def package_qa_hash_style(path, name, d, elf, messages):
+ """
+ Check if the binary has the right hash style...
+ """
+
+ if not elf:
+ return
+
+ if os.path.islink(path):
+ return
+
+ gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
+ if not gnu_hash:
+ gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
+ if not gnu_hash:
+ return
+
+ sane = False
+ has_syms = False
+
+ phdrs = elf.run_objdump("-p", d)
+
+ # If this binary has symbols, we expect it to have GNU_HASH too.
+ for line in phdrs.split("\n"):
+ if "SYMTAB" in line:
+ has_syms = True
+ if "GNU_HASH" in line:
+ sane = True
+ if "[mips32]" in line or "[mips64]" in line:
+ sane = True
+
+ if has_syms and not sane:
+ package_qa_add_message(messages, "ldflags", "No GNU_HASH in the elf binary: '%s'" % path)
+
+
+QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
+def package_qa_check_buildpaths(path, name, d, elf, messages):
+ """
+ Check for build paths inside target files and error if not found in the whitelist
+ """
+ # Ignore .debug files, not interesting
+ if path.find(".debug") != -1:
+ return
+
+ # Ignore symlinks
+ if os.path.islink(path):
+ return
+
+ # Ignore ipk and deb's CONTROL dir
+ if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
+ return
+
+ tmpdir = d.getVar('TMPDIR', True)
+ with open(path) as f:
+ file_content = f.read()
+ if tmpdir in file_content:
+ package_qa_add_message(messages, "buildpaths", "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
+
+
+QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
+def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
+ """
+ Check that all packages containing Xorg drivers have ABI dependencies
+ """
+
+ # Skip dev, dbg or nativesdk packages
+ if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
+ return
+
+ driverdir = d.expand("${libdir}/xorg/modules/drivers/")
+ if driverdir in path and path.endswith(".so"):
+ mlprefix = d.getVar('MLPREFIX', True) or ''
+ for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
+ if rdep.startswith("%sxorg-abi-" % mlprefix):
+ return
+ package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
+
+QAPATHTEST[infodir] = "package_qa_check_infodir"
+def package_qa_check_infodir(path, name, d, elf, messages):
+ """
+ Check that /usr/share/info/dir isn't shipped in a particular package
+ """
+ infodir = d.expand("${infodir}/dir")
+
+ if infodir in path:
+ package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
+
+QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
+def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
+ """
+ Check that the package doesn't contain any absolute symlinks to the sysroot.
+ """
+ if os.path.islink(path):
+ target = os.readlink(path)
+ if os.path.isabs(target):
+ tmpdir = d.getVar('TMPDIR', True)
+ if target.startswith(tmpdir):
+ trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
+ package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
+
+def package_qa_check_license(workdir, d):
+ """
+ Check for changes in the license files
+ """
+ import tempfile
+ sane = True
+
+ lic_files = d.getVar('LIC_FILES_CHKSUM', True)
+ lic = d.getVar('LICENSE', True)
+ pn = d.getVar('PN', True)
+
+ if lic == "CLOSED":
+ return
+
+ if not lic_files:
+ package_qa_handle_error("license-checksum", pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)", d)
+ return
+
+ srcdir = d.getVar('S', True)
+
+ for url in lic_files.split():
+ try:
+ (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ except bb.fetch.MalformedUrl:
+ package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+ continue
+ srclicfile = os.path.join(srcdir, path)
+ if not os.path.isfile(srclicfile):
+ package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+ continue
+
+ recipemd5 = parm.get('md5', '')
+ beginline, endline = 0, 0
+ if 'beginline' in parm:
+ beginline = int(parm['beginline'])
+ if 'endline' in parm:
+ endline = int(parm['endline'])
+
+ if (not beginline) and (not endline):
+ md5chksum = bb.utils.md5_file(srclicfile)
+ else:
+ fi = open(srclicfile, 'rb')
+ fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
+ tmplicfile = fo.name;
+ lineno = 0
+ linesout = 0
+ for line in fi:
+ lineno += 1
+ if (lineno >= beginline):
+ if ((lineno <= endline) or not endline):
+ fo.write(line)
+ linesout += 1
+ else:
+ break
+ fo.flush()
+ fo.close()
+ fi.close()
+ md5chksum = bb.utils.md5_file(tmplicfile)
+ os.unlink(tmplicfile)
+
+ if recipemd5 == md5chksum:
+ bb.note (pn + ": md5 checksum matched for ", url)
+ else:
+ if recipemd5:
+ msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
+ msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
+ if beginline:
+ if endline:
+ srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
+ else:
+ srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
+ elif endline:
+ srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
+ else:
+ srcfiledesc = srclicfile
+ msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
+
+ else:
+ msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
+ msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
+ package_qa_handle_error("license-checksum", msg, d)
+
+def package_qa_check_staged(path,d):
+ """
+ Check staged la and pc files for common problems like references to the work
+ directory.
+
+ As this is run after every stage we should be able to find the one
+ responsible for the errors easily even if we look at every .pc and .la file.
+ """
+
+ sane = True
+ tmpdir = d.getVar('TMPDIR', True)
+ workdir = os.path.join(tmpdir, "work")
+
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ pkgconfigcheck = workdir
+ else:
+ pkgconfigcheck = tmpdir
+
+ # find all .la and .pc files
+ # read the content
+ # and check for stuff that looks wrong
+ for root, dirs, files in os.walk(path):
+ for file in files:
+ path = os.path.join(root,file)
+ if file.endswith(".la"):
+ with open(path) as f:
+ file_content = f.read()
+ if workdir in file_content:
+ error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
+ sane = package_qa_handle_error("la", error_msg, d)
+ elif file.endswith(".pc"):
+ with open(path) as f:
+ file_content = f.read()
+ if pkgconfigcheck in file_content:
+ error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
+ sane = package_qa_handle_error("pkgconfig", error_msg, d)
+
+ return sane
+
+# Walk over all files in a directory and call func
+def package_qa_walk(warnfuncs, errorfuncs, skip, package, d):
+ import oe.qa
+
+ #if this will throw an exception, then fix the dict above
+ target_os = d.getVar('TARGET_OS', True)
+ target_arch = d.getVar('TARGET_ARCH', True)
+
+ warnings = {}
+ errors = {}
+ for path in pkgfiles[package]:
+ elf = oe.qa.ELFFile(path)
+ try:
+ elf.open()
+ except (IOError, oe.qa.NotELFFileError):
+ # IOError can happen if the packaging control files disappear,
+ elf = None
+ for func in warnfuncs:
+ func(path, package, d, elf, warnings)
+ for func in errorfuncs:
+ func(path, package, d, elf, errors)
+
+ for w in warnings:
+ package_qa_handle_error(w, warnings[w], d)
+ for e in errors:
+ package_qa_handle_error(e, errors[e], d)
+
+def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
+ # Don't do this check for kernel/module recipes, there aren't too many debug/development
+ # packages and you can get false positives e.g. on kernel-module-lirc-dev
+ if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
+ return
+
+ if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('OVERRIDES', pkg)
+ bb.data.update_data(localdata)
+
+ # Now check the RDEPENDS
+ rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
+
+ # Now do the sanity check!!!
+ if "build-deps" not in skip:
+ for rdepend in rdepends:
+ if "-dbg" in rdepend and "debug-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkg,rdepend)
+ package_qa_handle_error("debug-deps", error_msg, d)
+ if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkg, rdepend)
+ package_qa_handle_error("dev-deps", error_msg, d)
+ if rdepend not in packages:
+ rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ continue
+ if not rdep_data or not 'PN' in rdep_data:
+ pkgdata_dir = d.getVar("PKGDATA_DIR", True)
+ try:
+ possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
+ except OSError:
+ possibles = []
+ for p in possibles:
+ rdep_data = oe.packagedata.read_subpkgdata(p, d)
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ break
+ if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
+ continue
+ if rdep_data and 'PN' in rdep_data:
+ error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
+ else:
+ error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
+ package_qa_handle_error("build-deps", error_msg, d)
+
+ if "file-rdeps" not in skip:
+ ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
+ if bb.data.inherits_class('nativesdk', d):
+ ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl'])
+ # For Saving the FILERDEPENDS
+ filerdepends = {}
+ rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
+ for key in rdep_data:
+ if key.startswith("FILERDEPENDS_"):
+ for subkey in rdep_data[key].split():
+ if subkey not in ignored_file_rdeps:
+ # We already know it starts with FILERDEPENDS_
+ filerdepends[subkey] = key[13:]
+
+ if filerdepends:
+ next = rdepends
+ done = rdepends[:]
+ # Find all the rdepends on the dependency chain
+ while next:
+ new = []
+ for rdep in next:
+ rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
+ sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
+ if not sub_rdeps:
+ continue
+ for sub_rdep in sub_rdeps.split():
+ if sub_rdep in done:
+ continue
+ if not sub_rdep.startswith('(') and \
+ oe.packagedata.has_subpkgdata(sub_rdep, d):
+ # It's a new rdep
+ done.append(sub_rdep)
+ new.append(sub_rdep)
+ next = new
+
+ # Add the rprovides of itself
+ if pkg not in done:
+ done.insert(0, pkg)
+
+ # The python is not a package, but python-core provides it, so
+ # skip checking /usr/bin/python if python is in the rdeps, in
+ # case there is a RDEPENDS_pkg = "python" in the recipe.
+ for py in [ d.getVar('MLPREFIX', True) + "python", "python" ]:
+ if py in done:
+ filerdepends.pop("/usr/bin/python",None)
+ done.remove(py)
+ for rdep in done:
+ # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
+ rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
+ for key in rdep_data:
+ if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
+ for subkey in rdep_data[key].split():
+ filerdepends.pop(subkey,None)
+ # Add the files list to the rprovides
+ if key == "FILES_INFO":
+ # Use eval() to make it as a dict
+ for subkey in eval(rdep_data[key]):
+ filerdepends.pop(subkey,None)
+ if not filerdepends:
+ # Break if all the file rdepends are met
+ break
+ if filerdepends:
+ for key in filerdepends:
+ error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
+ (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
+ package_qa_handle_error("file-rdeps", error_msg, d)
+
+def package_qa_check_deps(pkg, pkgdest, skip, d):
+
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('OVERRIDES', pkg)
+ bb.data.update_data(localdata)
+
+ def check_valid_deps(var):
+ try:
+ rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
+ except ValueError as e:
+ bb.fatal("%s_%s: %s" % (var, pkg, e))
+ for dep in rvar:
+ for v in rvar[dep]:
+ if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
+ error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
+ package_qa_handle_error("dep-cmp", error_msg, d)
+
+ check_valid_deps('RDEPENDS')
+ check_valid_deps('RRECOMMENDS')
+ check_valid_deps('RSUGGESTS')
+ check_valid_deps('RPROVIDES')
+ check_valid_deps('RREPLACES')
+ check_valid_deps('RCONFLICTS')
+
+QAPATHTEST[expanded-d] = "package_qa_check_expanded_d"
+def package_qa_check_expanded_d(path,name,d,elf,messages):
+ """
+ Check for the expanded D (${D}) value in pkg_* and FILES
+ variables, warn the user to use it correctly.
+ """
+
+ sane = True
+ expanded_d = d.getVar('D',True)
+
+ # Get packages for current recipe and iterate
+ packages = d.getVar('PACKAGES', True).split(" ")
+ for pak in packages:
+ # Go through all variables and check if expanded D is found, warn the user accordingly
+ for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
+ bbvar = d.getVar(var + "_" + pak, False)
+ if bbvar:
+ # Bitbake expands ${D} within bbvar during the previous step, so we check for its expanded value
+ if expanded_d in bbvar:
+ if var == 'FILES':
+ package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak)
+ sane = False
+ else:
+ package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak))
+ sane = False
+ return sane
+
+def package_qa_check_encoding(keys, encode, d):
+ def check_encoding(key,enc):
+ sane = True
+ value = d.getVar(key, True)
+ if value:
+ try:
+ s = unicode(value, enc)
+ except UnicodeDecodeError as e:
+ error_msg = "%s has non %s characters" % (key,enc)
+ sane = False
+ package_qa_handle_error("invalid-chars", error_msg, d)
+ return sane
+
+ for key in keys:
+ sane = check_encoding(key, encode)
+ if not sane:
+ break
+
+HOST_USER_UID := "${@os.getuid()}"
+HOST_USER_GID := "${@os.getgid()}"
+
+QAPATHTEST[host-user-contaminated] = "package_qa_check_host_user"
+def package_qa_check_host_user(path, name, d, elf, messages):
+ """Check for paths outside of /home which are owned by the user running bitbake."""
+
+ if not os.path.lexists(path):
+ return
+
+ dest = d.getVar('PKGDEST', True)
+ pn = d.getVar('PN', True)
+ home = os.path.join(dest, 'home')
+ if path == home or path.startswith(home + os.sep):
+ return
+
+ try:
+ stat = os.lstat(path)
+ except OSError as exc:
+ import errno
+ if exc.errno != errno.ENOENT:
+ raise
+ else:
+ rootfs_path = path[len(dest):]
+ check_uid = int(d.getVar('HOST_USER_UID', True))
+ if stat.st_uid == check_uid:
+ package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid))
+ return False
+
+ check_gid = int(d.getVar('HOST_USER_GID', True))
+ if stat.st_gid == check_gid:
+ package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid))
+ return False
+ return True
+
+# The PACKAGE FUNC to scan each package
+python do_package_qa () {
+ import subprocess
+ import oe.packagedata
+
+ bb.note("DO PACKAGE QA")
+
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ # Check non UTF-8 characters on recipe's metadata
+ package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
+
+ logdir = d.getVar('T', True)
+ pkg = d.getVar('PN', True)
+
+ # Check the compile log for host contamination
+ compilelog = os.path.join(logdir,"log.do_compile")
+
+ if os.path.exists(compilelog):
+ statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
+ if subprocess.call(statement, shell=True) == 0:
+ msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
+ Please check the log '%s' for more information." % (pkg, compilelog)
+ package_qa_handle_error("compile-host-path", msg, d)
+
+ # Check the install log for host contamination
+ installlog = os.path.join(logdir,"log.do_install")
+
+ if os.path.exists(installlog):
+ statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
+ if subprocess.call(statement, shell=True) == 0:
+ msg = "%s: The install log indicates that host include and/or library paths were used.\n \
+ Please check the log '%s' for more information." % (pkg, installlog)
+ package_qa_handle_error("install-host-path", msg, d)
+
+ # Scan the packages...
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = set((d.getVar('PACKAGES', True) or '').split())
+
+ cpath = oe.cachedpath.CachedPath()
+ global pkgfiles
+ pkgfiles = {}
+ for pkg in packages:
+ pkgfiles[pkg] = []
+ for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
+ for file in files:
+ pkgfiles[pkg].append(walkroot + os.sep + file)
+
+ # no packages should be scanned
+ if not packages:
+ return
+
+ testmatrix = d.getVarFlags("QAPATHTEST")
+ import re
+ # The package name matches the [a-z0-9.+-]+ regular expression
+ pkgname_pattern = re.compile("^[a-z0-9.+-]+$")
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ taskdeps = set()
+ for dep in taskdepdata:
+ taskdeps.add(taskdepdata[dep][0])
+
+ g = globals()
+ for package in packages:
+ skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
+ if skip:
+ bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
+ warnchecks = []
+ for w in (d.getVar("WARN_QA", True) or "").split():
+ if w in skip:
+ continue
+ if w in testmatrix and testmatrix[w] in g:
+ warnchecks.append(g[testmatrix[w]])
+ if w == 'unsafe-references-in-binaries':
+ oe.utils.write_ld_so_conf(d)
+
+ errorchecks = []
+ for e in (d.getVar("ERROR_QA", True) or "").split():
+ if e in skip:
+ continue
+ if e in testmatrix and testmatrix[e] in g:
+ errorchecks.append(g[testmatrix[e]])
+ if e == 'unsafe-references-in-binaries':
+ oe.utils.write_ld_so_conf(d)
+
+ bb.note("Checking Package: %s" % package)
+ # Check package name
+ if not pkgname_pattern.match(package):
+ package_qa_handle_error("pkgname",
+ "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
+
+ path = "%s/%s" % (pkgdest, package)
+ package_qa_walk(warnchecks, errorchecks, skip, package, d)
+
+ package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
+ package_qa_check_deps(package, pkgdest, skip, d)
+
+ if 'libdir' in d.getVar("ALL_QA", True).split():
+ package_qa_check_libdir(d)
+
+ qa_sane = d.getVar("QA_SANE", True)
+ if not qa_sane:
+ bb.fatal("QA run found fatal errors. Please consider fixing them.")
+ bb.note("DONE with PACKAGE QA")
+}
+
+do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
+do_package_qa[rdeptask] = "do_packagedata"
+addtask do_package_qa after do_packagedata do_package before do_build
+
+SSTATETASKS += "do_package_qa"
+do_package_qa[sstate-inputdirs] = ""
+do_package_qa[sstate-outputdirs] = ""
+python do_package_qa_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_qa_setscene
+
+python do_qa_staging() {
+ bb.note("QA checking staging")
+
+ if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
+ bb.fatal("QA staging was broken by the package built above")
+}
+
+python do_qa_configure() {
+ import subprocess
+
+ ###########################################################################
+ # Check config.log for cross compile issues
+ ###########################################################################
+
+ configs = []
+ workdir = d.getVar('WORKDIR', True)
+
+ if bb.data.inherits_class('autotools', d):
+ bb.note("Checking autotools environment for common misconfiguration")
+ for root, dirs, files in os.walk(workdir):
+ statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
+ os.path.join(root,"config.log")
+ if "config.log" in files:
+ if subprocess.call(statement, shell=True) == 0:
+ bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
+Rerun configure task after fixing this.""")
+
+ if "configure.ac" in files:
+ configs.append(os.path.join(root,"configure.ac"))
+ if "configure.in" in files:
+ configs.append(os.path.join(root, "configure.in"))
+
+ ###########################################################################
+ # Check gettext configuration and dependencies are correct
+ ###########################################################################
+
+ cnf = d.getVar('EXTRA_OECONF', True) or ""
+ if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
+ ml = d.getVar("MLPREFIX", True) or ""
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
+ gt = "gettext-native"
+ elif bb.data.inherits_class('cross-canadian', d):
+ gt = "nativesdk-gettext"
+ else:
+ gt = "virtual/" + ml + "gettext"
+ deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
+ if gt not in deps:
+ for config in configs:
+ gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
+ if subprocess.call(gnu, shell=True) == 0:
+ bb.fatal("""%s required but not in DEPENDS for file %s.
+Missing inherit gettext?""" % (gt, config))
+
+ ###########################################################################
+ # Check license variables
+ ###########################################################################
+
+ package_qa_check_license(workdir, d)
+
+ ###########################################################################
+ # Check unrecognised configure options (with a white list)
+ ###########################################################################
+ if bb.data.inherits_class("autotools", d):
+ bb.note("Checking configure output for unrecognised options")
+ try:
+ flag = "WARNING: unrecognized options:"
+ log = os.path.join(d.getVar('B', True), 'config.log')
+ output = subprocess.check_output(['grep', '-F', flag, log]).replace(', ', ' ')
+ options = set()
+ for line in output.splitlines():
+ options |= set(line.partition(flag)[2].split())
+ whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST", True).split())
+ options -= whitelist
+ if options:
+ pn = d.getVar('PN', True)
+ error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
+ package_qa_handle_error("unknown-configure-option", error_msg, d)
+ except subprocess.CalledProcessError:
+ pass
+
+ # Check invalid PACKAGECONFIG
+ pkgconfig = (d.getVar("PACKAGECONFIG", True) or "").split()
+ if pkgconfig:
+ pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
+ for pconfig in pkgconfig:
+ if pconfig not in pkgconfigflags:
+ pn = d.getVar('PN', True)
+ error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
+ package_qa_handle_error("invalid-packageconfig", error_msg, d)
+
+ qa_sane = d.getVar("QA_SANE", True)
+ if not qa_sane:
+ bb.fatal("Fatal QA errors found, failing task.")
+}
+
+python do_qa_unpack() {
+ bb.note("Checking has ${S} been created")
+
+ s_dir = d.getVar('S', True)
+ if not os.path.exists(s_dir):
+ bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN', True), d.getVar('S', False), s_dir))
+}
+
+# The Staging Func, to check all staging
+#addtask qa_staging after do_populate_sysroot before do_build
+do_populate_sysroot[postfuncs] += "do_qa_staging "
+
+# Check broken config.log files, for packages requiring Gettext which don't
+# have it in DEPENDS and for correct LIC_FILES_CHKSUM
+#addtask qa_configure after do_configure before do_compile
+do_configure[postfuncs] += "do_qa_configure "
+
+# Check does S exist.
+do_unpack[postfuncs] += "do_qa_unpack"
+
+python () {
+ tests = d.getVar('ALL_QA', True).split()
+ if "desktop" in tests:
+ d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
+
+ ###########################################################################
+ # Check various variables
+ ###########################################################################
+
+ # Checking ${FILESEXTRAPATHS}
+ extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ if '__default' not in extrapaths.split(":"):
+ msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
+ msg += "type of assignment, and don't forget the colon.\n"
+ msg += "Please assign it with the format of:\n"
+ msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
+ msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
+ msg += "in your bbappend file\n\n"
+ msg += "Your incorrect assignment is:\n"
+ msg += "%s\n" % extrapaths
+ bb.warn(msg)
+
+ overrides = d.getVar('OVERRIDES', True).split(':')
+ pn = d.getVar('PN', True)
+ if pn in overrides:
+ msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn)
+ package_qa_handle_error("pn-overrides", msg, d)
+
+ issues = []
+ if (d.getVar('PACKAGES', True) or "").split():
+ for dep in (d.getVar('QADEPENDS', True) or "").split():
+ d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
+ for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
+ if d.getVar(var, False):
+ issues.append(var)
+
+ fakeroot_tests = d.getVar('FAKEROOT_QA', True).split()
+ if set(tests) & set(fakeroot_tests):
+ d.setVarFlag('do_package_qa', 'fakeroot', '1')
+ d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+ else:
+ d.setVarFlag('do_package_qa', 'rdeptask', '')
+ for i in issues:
+ package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
+ qa_sane = d.getVar("QA_SANE", True)
+ if not qa_sane:
+ bb.fatal("Fatal QA errors found, failing task.")
+}
diff --git a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
new file mode 100644
index 000000000..3ed5986a5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
@@ -0,0 +1,60 @@
+#
+# set the ARCH environment variable for kernel compilation (including
+# modules). return value must match one of the architecture directories
+# in the kernel source "arch" directory
+#
+
+valid_archs = "alpha cris ia64 \
+ i386 x86 \
+ m68knommu m68k ppc powerpc powerpc64 ppc64 \
+ sparc sparc64 \
+ arm aarch64 \
+ m32r mips \
+ sh sh64 um h8300 \
+ parisc s390 v850 \
+ avr32 blackfin \
+ microblaze \
+ nios2"
+
+def map_kernel_arch(a, d):
+ import re
+
+ valid_archs = d.getVar('valid_archs', True).split()
+
+ if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
+ elif re.match('armeb$', a): return 'arm'
+ elif re.match('aarch64$', a): return 'arm64'
+ elif re.match('aarch64_be$', a): return 'arm64'
+ elif re.match('mips(el|64|64el)$', a): return 'mips'
+ elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
+ elif re.match('sh(3|4)$', a): return 'sh'
+ elif re.match('bfin', a): return 'blackfin'
+ elif re.match('microblazee[bl]', a): return 'microblaze'
+ elif a in valid_archs: return a
+ else:
+ bb.error("cannot map '%s' to a linux kernel architecture" % a)
+
+export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
+
+def map_uboot_arch(a, d):
+ import re
+
+ if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
+ elif re.match('i.86$', a): return 'x86'
+ return a
+
+export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
+
+# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
+# specific options necessary for building the kernel and modules.
+TARGET_CC_KERNEL_ARCH ?= ""
+HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
+TARGET_LD_KERNEL_ARCH ?= ""
+HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
+TARGET_AR_KERNEL_ARCH ?= ""
+HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
+
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd"
+KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
+KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
+
diff --git a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
new file mode 100644
index 000000000..e5b75edf5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
@@ -0,0 +1,235 @@
+inherit kernel-uboot
+
+python __anonymous () {
+ kerneltype = d.getVar('KERNEL_IMAGETYPE', True)
+ if kerneltype == 'fitImage':
+ depends = d.getVar("DEPENDS", True)
+ depends = "%s u-boot-mkimage-native dtc-native" % depends
+ d.setVar("DEPENDS", depends)
+
+ # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
+ # to kernel.bbclass . We have to override it, since we pack zImage
+ # (at least for now) into the fitImage .
+ d.setVar("KERNEL_IMAGETYPE_FOR_MAKE", "zImage")
+
+ image = d.getVar('INITRAMFS_IMAGE', True)
+ if image:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+}
+
+#
+# Emit the fitImage ITS header
+#
+fitimage_emit_fit_header() {
+ cat << EOF >> fit-image.its
+/dts-v1/;
+
+/ {
+ description = "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}";
+ #address-cells = <1>;
+EOF
+}
+
+#
+# Emit the fitImage section bits
+#
+# $1 ... Section bit type: imagestart - image section start
+# confstart - configuration section start
+# sectend - section end
+# fitend - fitimage end
+#
+fitimage_emit_section_maint() {
+ case $1 in
+ imagestart)
+ cat << EOF >> fit-image.its
+
+ images {
+EOF
+ ;;
+ confstart)
+ cat << EOF >> fit-image.its
+
+ configurations {
+EOF
+ ;;
+ sectend)
+ cat << EOF >> fit-image.its
+ };
+EOF
+ ;;
+ fitend)
+ cat << EOF >> fit-image.its
+};
+EOF
+ ;;
+ esac
+}
+
+#
+# Emit the fitImage ITS kernel section
+#
+# $1 ... Image counter
+# $2 ... Path to kernel image
+# $3 ... Compression type
+fitimage_emit_section_kernel() {
+
+ kernel_csum="sha1"
+
+ ENTRYPOINT=${UBOOT_ENTRYPOINT}
+ if test -n "${UBOOT_ENTRYSYMBOL}"; then
+ ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
+ fi
+
+ cat << EOF >> fit-image.its
+ kernel@${1} {
+ description = "Linux kernel";
+ data = /incbin/("${2}");
+ type = "kernel";
+ arch = "${UBOOT_ARCH}";
+ os = "linux";
+ compression = "${3}";
+ load = <${UBOOT_LOADADDRESS}>;
+ entry = <${ENTRYPOINT}>;
+ hash@1 {
+ algo = "${kernel_csum}";
+ };
+ };
+EOF
+}
+
+#
+# Emit the fitImage ITS DTB section
+#
+# $1 ... Image counter
+# $2 ... Path to DTB image
+fitimage_emit_section_dtb() {
+
+ dtb_csum="sha1"
+
+ cat << EOF >> fit-image.its
+ fdt@${1} {
+ description = "Flattened Device Tree blob";
+ data = /incbin/("${2}");
+ type = "flat_dt";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash@1 {
+ algo = "${dtb_csum}";
+ };
+ };
+EOF
+}
+
+#
+# Emit the fitImage ITS configuration section
+#
+# $1 ... Linux kernel ID
+# $2 ... DTB image ID
+fitimage_emit_section_config() {
+
+ conf_csum="sha1"
+
+ # Test if we have any DTBs at all
+ if [ -z "${2}" ] ; then
+ conf_desc="Boot Linux kernel"
+ fdt_line=""
+ else
+ conf_desc="Boot Linux kernel with FDT blob"
+ fdt_line="fdt = \"fdt@${2}\";"
+ fi
+ kernel_line="kernel = \"kernel@${1}\";"
+
+ cat << EOF >> fit-image.its
+ default = "conf@1";
+ conf@1 {
+ description = "${conf_desc}";
+ ${kernel_line}
+ ${fdt_line}
+ hash@1 {
+ algo = "${conf_csum}";
+ };
+ };
+EOF
+}
+
+do_assemble_fitimage() {
+ if test "x${KERNEL_IMAGETYPE}" = "xfitImage" ; then
+ kernelcount=1
+ dtbcount=""
+ rm -f fit-image.its
+
+ fitimage_emit_fit_header
+
+ #
+ # Step 1: Prepare a kernel image section.
+ #
+ fitimage_emit_section_maint imagestart
+
+ uboot_prep_kimage
+ fitimage_emit_section_kernel "${kernelcount}" linux.bin "${linux_comp}"
+
+ #
+ # Step 2: Prepare a DTB image section
+ #
+ if test -n "${KERNEL_DEVICETREE}"; then
+ dtbcount=1
+ for DTB in ${KERNEL_DEVICETREE}; do
+ if echo ${DTB} | grep -q '/dts/'; then
+ bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
+ DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
+ fi
+ DTB_PATH="arch/${ARCH}/boot/dts/${DTB}"
+ if [ ! -e "${DTB_PATH}" ]; then
+ DTB_PATH="arch/${ARCH}/boot/${DTB}"
+ fi
+
+ fitimage_emit_section_dtb ${dtbcount} ${DTB_PATH}
+ dtbcount=`expr ${dtbcount} + 1`
+ done
+ fi
+
+ fitimage_emit_section_maint sectend
+
+ # Force the first Kernel and DTB in the default config
+ kernelcount=1
+ dtbcount=1
+
+ #
+ # Step 3: Prepare a configurations section
+ #
+ fitimage_emit_section_maint confstart
+
+ fitimage_emit_section_config ${kernelcount} ${dtbcount}
+
+ fitimage_emit_section_maint sectend
+
+ fitimage_emit_section_maint fitend
+
+ #
+ # Step 4: Assemble the image
+ #
+ uboot-mkimage -f fit-image.its arch/${ARCH}/boot/fitImage
+ fi
+}
+
+addtask assemble_fitimage before do_install after do_compile
+
+kernel_do_deploy[vardepsexclude] = "DATETIME"
+kernel_do_deploy_append() {
+ # Update deploy directory
+ if test "x${KERNEL_IMAGETYPE}" = "xfitImage" ; then
+ cd ${B}
+ echo "Copying fit-image.its source file..."
+ its_base_name="${KERNEL_IMAGETYPE}-its-${PV}-${PR}-${MACHINE}-${DATETIME}"
+ its_symlink_name=${KERNEL_IMAGETYPE}-its-${MACHINE}
+ install -m 0644 fit-image.its ${DEPLOYDIR}/${its_base_name}.its
+ linux_bin_base_name="${KERNEL_IMAGETYPE}-linux.bin-${PV}-${PR}-${MACHINE}-${DATETIME}"
+ linux_bin_symlink_name=${KERNEL_IMAGETYPE}-linux.bin-${MACHINE}
+ install -m 0644 linux.bin ${DEPLOYDIR}/${linux_bin_base_name}.bin
+
+ cd ${DEPLOYDIR}
+ ln -sf ${its_base_name}.its ${its_symlink_name}.its
+ ln -sf ${linux_bin_base_name}.bin ${linux_bin_symlink_name}.bin
+ fi
+}
diff --git a/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass b/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass
new file mode 100644
index 000000000..a63f482a9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass
@@ -0,0 +1,91 @@
+#
+# While installing a rpm to update kernel on a deployed target, it will update
+# the boot area and the boot menu with the kernel as the priority but allow
+# you to fall back to the original kernel as well.
+#
+# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
+# probable confliction with the new one.
+#
+# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
+# updates the new kernel as the boot priority.
+#
+
+pkg_preinst_kernel-image_append () {
+ # Parsing confliction
+ [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
+ [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
+ if [ -n "$grubcfg" ]; then
+ # Dereference symlink to avoid confliction with new kernel name.
+ if grep -q "/${KERNEL_IMAGETYPE} \+root=" $grubcfg; then
+ if [ -L "$D/boot/${KERNEL_IMAGETYPE}" ]; then
+ kimage=`realpath $D/boot/${KERNEL_IMAGETYPE} 2>/dev/null`
+ if [ -f "$D$kimage" ]; then
+ sed -i "s:${KERNEL_IMAGETYPE} \+root=:${kimage##*/} root=:" $grubcfg
+ fi
+ fi
+ fi
+
+ # Rename old kernel if it conflicts with new kernel name.
+ if grep -q "/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=" $grubcfg; then
+ if [ -f "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" ]; then
+ timestamp=`date +%s`
+ kimage="$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-$timestamp-back"
+ sed -i "s:${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
+ mv "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" "$kimage"
+ fi
+ fi
+ fi
+}
+
+pkg_postinst_kernel-image_prepend () {
+ get_new_grub_cfg() {
+ grubcfg="$1"
+ old_image="$2"
+ title="Update ${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-${PV}"
+ if [ "${grubcfg##*/}" = "grub.cfg" ]; then
+ rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
+ sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"`
+
+ echo "menuentry \"$title\" {"
+ echo " set root=(hd0,1)"
+ echo "$rootfs"
+ echo "}"
+ elif [ "${grubcfg##*/}" = "menu.list" ]; then
+ rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
+ sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"`
+
+ echo "default 0"
+ echo "timeout 30"
+ echo "title $title"
+ echo "root (hd0,0)"
+ echo "$rootfs"
+ fi
+ }
+
+ get_old_grub_cfg() {
+ grubcfg="$1"
+ if [ "${grubcfg##*/}" = "grub.cfg" ]; then
+ cat "$grubcfg"
+ elif [ "${grubcfg##*/}" = "menu.list" ]; then
+ sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
+ fi
+ }
+
+ if [ -f "$D/boot/grub/grub.cfg" ]; then
+ grubcfg="$D/boot/grub/grub.cfg"
+ old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
+ elif [ -f "$D/boot/grub/menu.list" ]; then
+ grubcfg="$D/boot/grub/menu.list"
+ old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
+ fi
+
+ # Don't update grubcfg at first install while old bzImage doesn't exist.
+ if [ -f "$D/boot/${old_image##*/}" ]; then
+ grubcfgtmp="$grubcfg.tmp"
+ get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
+ get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
+ mv $grubcfgtmp $grubcfg
+ echo "Caution! Update kernel may affect kernel-module!"
+ fi
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
new file mode 100644
index 000000000..e1a70e621
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
@@ -0,0 +1,203 @@
+pkg_postinst_modules () {
+if [ -z "$D" ]; then
+ depmod -a ${KERNEL_VERSION}
+else
+ # image.bbclass will call depmodwrapper after everything is installed,
+ # no need to do it here as well
+ :
+fi
+}
+
+pkg_postrm_modules () {
+if [ -z "$D" ]; then
+ depmod -a ${KERNEL_VERSION}
+else
+ depmodwrapper -a -b $D ${KERNEL_VERSION}
+fi
+}
+
+autoload_postinst_fragment() {
+if [ x"$D" = "x" ]; then
+ modprobe %s || true
+fi
+}
+
+do_install_append() {
+ install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
+}
+
+PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
+
+KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
+
+python split_kernel_module_packages () {
+ import re
+
+ modinfoexp = re.compile("([^=]+)=(.*)")
+ kerverrexp = re.compile('^(.*-hh.*)[\.\+].*$')
+ depmodpat0 = re.compile("^(.*\.k?o):..*$")
+ depmodpat1 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*$")
+ depmodpat2 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*\\\$")
+ depmodpat3 = re.compile("^\t(.*\.k?o)\s*\\\$")
+ depmodpat4 = re.compile("^\t(.*\.k?o)\s*$")
+
+ def extract_modinfo(file):
+ import tempfile, subprocess
+ tempfile.tempdir = d.getVar("WORKDIR", True)
+ tf = tempfile.mkstemp()
+ tmpfile = tf[1]
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
+ subprocess.call(cmd, shell=True)
+ f = open(tmpfile)
+ l = f.read().split("\000")
+ f.close()
+ os.close(tf[0])
+ os.unlink(tmpfile)
+ vals = {}
+ for i in l:
+ m = modinfoexp.match(i)
+ if not m:
+ continue
+ vals[m.group(1)] = m.group(2)
+ return vals
+
+ def parse_depmod():
+
+ dvar = d.getVar('PKGD', True)
+
+ kernelver = d.getVar('KERNEL_VERSION', True)
+ kernelver_stripped = kernelver
+ m = kerverrexp.match(kernelver)
+ if m:
+ kernelver_stripped = m.group(1)
+ staging_kernel_dir = d.getVar("STAGING_KERNEL_BUILDDIR", True)
+ system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
+ if not os.path.exists(system_map_file):
+ system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
+ if not os.path.exists(system_map_file):
+ bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_BUILDDIR '%s'" % (kernelver, dvar, staging_kernel_dir))
+
+ cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
+ f = os.popen(cmd, 'r')
+
+ deps = {}
+ line = f.readline()
+ while line:
+ if not depmodpat0.match(line):
+ line = f.readline()
+ continue
+ m1 = depmodpat1.match(line)
+ if m1:
+ deps[m1.group(1)] = m1.group(2).split()
+ else:
+ m2 = depmodpat2.match(line)
+ if m2:
+ deps[m2.group(1)] = m2.group(2).split()
+ line = f.readline()
+ m3 = depmodpat3.match(line)
+ while m3:
+ deps[m2.group(1)].extend(m3.group(1).split())
+ line = f.readline()
+ m3 = depmodpat3.match(line)
+ m4 = depmodpat4.match(line)
+ deps[m2.group(1)].extend(m4.group(1).split())
+ line = f.readline()
+ f.close()
+ return deps
+
+ def get_dependencies(file, pattern, format):
+ # file no longer includes PKGD
+ file = file.replace(d.getVar('PKGD', True) or '', '', 1)
+ # instead is prefixed with /lib/modules/${KERNEL_VERSION}
+ file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1)
+
+ if file in module_deps:
+ dependencies = []
+ for i in module_deps[file]:
+ m = re.match(pattern, os.path.basename(i))
+ if not m:
+ continue
+ on = legitimize_package_name(m.group(1))
+ dependency_pkg = format % on
+ dependencies.append(dependency_pkg)
+ return dependencies
+ return []
+
+ def frob_metadata(file, pkg, pattern, format, basename):
+ vals = extract_modinfo(file)
+
+ dvar = d.getVar('PKGD', True)
+
+ # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
+ # appropriate modprobe commands to the postinst
+ autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD", True) or "").split()
+ autoload = d.getVar('module_autoload_%s' % basename, True)
+ if autoload and autoload == basename:
+ bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
+ if autoload and basename not in autoloadlist:
+ bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
+ if basename in autoloadlist:
+ name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
+ f = open(name, 'w')
+ if autoload:
+ for m in autoload.split():
+ f.write('%s\n' % m)
+ else:
+ f.write('%s\n' % basename)
+ f.close()
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ bb.fatal("pkg_postinst_%s not defined" % pkg)
+ postinst += d.getVar('autoload_postinst_fragment', True) % (autoload or basename)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ # Write out any modconf fragment
+ modconflist = (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()
+ modconf = d.getVar('module_conf_%s' % basename, True)
+ if modconf and basename in modconflist:
+ name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
+ f = open(name, 'w')
+ f.write("%s\n" % modconf)
+ f.close()
+ elif modconf:
+ bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
+
+ files = d.getVar('FILES_%s' % pkg, True)
+ files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
+ d.setVar('FILES_%s' % pkg, files)
+
+ if "description" in vals:
+ old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
+ d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
+
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ for dep in get_dependencies(file, pattern, format):
+ if not dep in rdepends:
+ rdepends[dep] = []
+ d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+
+ # Avoid automatic -dev recommendations for modules ending with -dev.
+ d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
+
+ module_deps = parse_depmod()
+ module_regex = '^(.*)\.k?o$'
+ module_pattern = 'kernel-module-%s'
+
+ postinst = d.getVar('pkg_postinst_modules', True)
+ postrm = d.getVar('pkg_postrm_modules', True)
+
+ modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True)))
+ if modules:
+ metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True)
+ d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
+
+ # If modules-load.d and modprobe.d are empty at this point, remove them to
+ # avoid warnings. removedirs only raises an OSError if an empty
+ # directory cannot be removed.
+ dvar = d.getVar('PKGD', True)
+ for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
+ if len(os.listdir(dir)) == 0:
+ os.rmdir(dir)
+}
+
+do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()))}'
diff --git a/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass b/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
new file mode 100644
index 000000000..345e7f5f3
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
@@ -0,0 +1,20 @@
+uboot_prep_kimage() {
+ if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
+ vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
+ linux_suffix=""
+ linux_comp="none"
+ else
+ vmlinux_path="vmlinux"
+ linux_suffix=".gz"
+ linux_comp="gzip"
+ fi
+
+ ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
+
+ if [ "${linux_comp}" != "none" ] ; then
+ gzip -9 linux.bin
+ mv -f "linux.bin${linux_suffix}" linux.bin
+ fi
+
+ echo "${linux_comp}"
+}
diff --git a/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
new file mode 100644
index 000000000..f73965b13
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
@@ -0,0 +1,36 @@
+inherit kernel-uboot
+
+python __anonymous () {
+ kerneltype = d.getVar('KERNEL_IMAGETYPE', True)
+ if kerneltype == 'uImage':
+ depends = d.getVar("DEPENDS", True)
+ depends = "%s u-boot-mkimage-native" % depends
+ d.setVar("DEPENDS", depends)
+
+ # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
+ # to kernel.bbclass . We override the variable here, since we need
+ # to build uImage using the kernel build system if and only if
+ # KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
+ # the uImage .
+ if d.getVar("KEEPUIMAGE", True) != 'yes':
+ d.setVar("KERNEL_IMAGETYPE_FOR_MAKE", "zImage")
+}
+
+do_uboot_mkimage() {
+ if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
+ if test "x${KEEPUIMAGE}" != "xyes" ; then
+ uboot_prep_kimage
+
+ ENTRYPOINT=${UBOOT_ENTRYPOINT}
+ if test -n "${UBOOT_ENTRYSYMBOL}"; then
+ ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
+ fi
+
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
+ rm -f linux.bin
+ fi
+ fi
+}
+
+addtask uboot_mkimage before do_install after do_compile
diff --git a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
new file mode 100644
index 000000000..f86b3ef01
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -0,0 +1,376 @@
+# remove tasks that modify the source tree in case externalsrc is inherited
+SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_shared_workdir do_fetch do_unpack do_patch"
+
+# returns local (absolute) path names for all valid patches in the
+# src_uri
+def find_patches(d):
+ patches = src_patches(d)
+ patch_list=[]
+ for p in patches:
+ _, _, local, _, _, _ = bb.fetch.decodeurl(p)
+ patch_list.append(local)
+
+ return patch_list
+
+# returns all the elements from the src uri that are .scc files
+def find_sccs(d):
+ sources=src_patches(d, True)
+ sources_list=[]
+ for s in sources:
+ base, ext = os.path.splitext(os.path.basename(s))
+ if ext and ext in [".scc", ".cfg"]:
+ sources_list.append(s)
+ elif base and base in 'defconfig':
+ sources_list.append(s)
+
+ return sources_list
+
+# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
+# the repository as it will be found in WORKDIR
+def find_kernel_feature_dirs(d):
+ feature_dirs=[]
+ fetch = bb.fetch2.Fetch([], d)
+ for url in fetch.urls:
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ type=""
+ if "type" in parm:
+ type = parm["type"]
+ if "destsuffix" in parm:
+ destdir = parm["destsuffix"]
+ if type == "kmeta":
+ feature_dirs.append(destdir)
+
+ return feature_dirs
+
+# find the master/machine source branch. In the same way that the fetcher proceses
+# git repositories in the SRC_URI we take the first repo found, first branch.
+def get_machine_branch(d, default):
+ fetch = bb.fetch2.Fetch([], d)
+ for url in fetch.urls:
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ if "branch" in parm:
+ branches = urldata.parm.get("branch").split(',')
+ btype = urldata.parm.get("type")
+ if btype != "kmeta":
+ return branches[0]
+
+ return default
+
+do_kernel_metadata() {
+ set +e
+ cd ${S}
+ export KMETA=${KMETA}
+
+ # if kernel tools are available in-tree, they are preferred
+ # and are placed on the path before any external tools. Unless
+ # the external tools flag is set, in that case we do nothing.
+ if [ -f "${S}/scripts/util/configme" ]; then
+ if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
+ PATH=${S}/scripts/util:${PATH}
+ fi
+ fi
+
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+ machine_srcrev="${SRCREV_machine}"
+ if [ -z "${machine_srcrev}" ]; then
+ # fallback to SRCREV if a non machine_meta tree is being built
+ machine_srcrev="${SRCREV}"
+ fi
+
+ # In a similar manner to the kernel itself:
+ #
+ # defconfig: $(obj)/conf
+ # ifeq ($(KBUILD_DEFCONFIG),)
+ # $< --defconfig $(Kconfig)
+ # else
+ # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
+ # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
+ # endif
+ #
+ # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
+ # from the source tree, into a common location and normalized "defconfig" name,
+ # where the rest of the process will include and incoroporate it into the build
+ #
+ # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
+ # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
+ # precendence.
+ #
+ if [ -n "${KBUILD_DEFCONFIG}" ]; then
+ if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
+ if [ -f "${WORKDIR}/defconfig" ]; then
+ # If the two defconfig's are different, warn that we didn't overwrite the
+ # one already placed in WORKDIR by the fetcher.
+ cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
+ if [ $? -ne 0 ]; then
+ bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
+ fi
+ else
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ sccs="${WORKDIR}/defconfig"
+ fi
+ else
+ bbfatal "A KBUILD_DECONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ fi
+ fi
+
+ sccs="$sccs ${@" ".join(find_sccs(d))}"
+ patches="${@" ".join(find_patches(d))}"
+ feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
+
+ # add any explicitly referenced features onto the end of the feature
+ # list that is passed to the kernel build scripts.
+ if [ -n "${KERNEL_FEATURES}" ]; then
+ for feat in ${KERNEL_FEATURES}; do
+ addon_features="$addon_features --feature $feat"
+ done
+ fi
+
+ # check for feature directories/repos/branches that were part of the
+ # SRC_URI. If they were supplied, we convert them into include directives
+ # for the update part of the process
+ if [ -n "${feat_dirs}" ]; then
+ for f in ${feat_dirs}; do
+ if [ -d "${WORKDIR}/$f/meta" ]; then
+ includes="$includes -I${WORKDIR}/$f/meta"
+ elif [ -d "${WORKDIR}/$f" ]; then
+ includes="$includes -I${WORKDIR}/$f"
+ fi
+ done
+ fi
+
+ # updates or generates the target description
+ updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
+ ${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not update ${machine_branch}"
+ fi
+}
+
+do_patch() {
+ cd ${S}
+
+ # executes and modifies the source tree as required
+ patchme ${KMACHINE}
+ if [ $? -ne 0 ]; then
+ bberror "Could not apply patches for ${KMACHINE}."
+ bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
+ fi
+
+ # check to see if the specified SRCREV is reachable from the final branch.
+ # if it wasn't something wrong has happened, and we should error.
+ machine_srcrev="${SRCREV_machine}"
+ if [ -z "${machine_srcrev}" ]; then
+ # fallback to SRCREV if a non machine_meta tree is being built
+ machine_srcrev="${SRCREV}"
+ # if SRCREV cannot be reached something is wrong.
+ if [ -z "${machine_srcrev}" ]; then
+ bbfatal "Neither SRCREV_machine or SRCREV was specified!"
+ fi
+ fi
+
+ current_branch=`git rev-parse --abbrev-ref HEAD`
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+ if [ "${current_branch}" != "${machine_branch}" ]; then
+ bbwarn "After meta data application, the kernel tree branch is ${current_branch}. The"
+ bbwarn "SRC_URI specified branch ${machine_branch}. The branch will be forced to ${machine_branch},"
+ bbwarn "but this means the board meta data (.scc files) do not match the SRC_URI specification."
+ bbwarn "The meta data and branch ${machine_branch} should be inspected to ensure the proper"
+ bbwarn "kernel is being built."
+ git checkout -f ${machine_branch}
+ fi
+
+ if [ "${machine_srcrev}" != "AUTOINC" ]; then
+ if ! [ "$(git rev-parse --verify ${machine_srcrev}~0)" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
+ bberror "SRCREV ${machine_srcrev} was specified, but is not reachable"
+ bbfatal "Check the BSP description for incorrect branch selection, or other errors."
+ fi
+ fi
+}
+
+do_kernel_checkout() {
+ set +e
+
+ source_dir=`echo ${S} | sed 's%/$%%'`
+ source_workdir="${WORKDIR}/git"
+ if [ -d "${WORKDIR}/git/" ]; then
+ # case: git repository
+ # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
+ if [ "${source_dir}" != "${source_workdir}" ]; then
+ if [ -d "${source_workdir}/.git" ]; then
+ # regular git repository with .git
+ rm -rf ${S}
+ mv ${WORKDIR}/git ${S}
+ else
+ # create source for bare cloned git repository
+ git clone ${WORKDIR}/git ${S}
+ rm -rf ${WORKDIR}/git
+ fi
+ fi
+ cd ${S}
+ else
+ # case: we have no git repository at all.
+ # To support low bandwidth options for building the kernel, we'll just
+ # convert the tree to a git repo and let the rest of the process work unchanged
+
+ # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
+ # used, but we can't initialize that empty directory. So check it and throw a
+ # clear error
+
+ cd ${S}
+ if [ ! -f "Makefile" ]; then
+ bberror "S is not set to the linux source directory. Check "
+ bbfatal "the recipe and set S to the proper extracted subdirectory"
+ fi
+ rm -f .gitignore
+ git init
+ git add .
+ git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
+ git clean -d -f
+ fi
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
+}
+do_kernel_checkout[dirs] = "${S}"
+
+addtask kernel_checkout before do_kernel_metadata after do_unpack
+addtask kernel_metadata after do_validate_branches do_unpack before do_patch
+do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
+
+do_kernel_configme[dirs] += "${S} ${B}"
+do_kernel_configme() {
+ bbnote "kernel configme"
+ export KMETA=${KMETA}
+
+ if [ -n "${KCONFIG_MODE}" ]; then
+ configmeflags=${KCONFIG_MODE}
+ else
+ # If a defconfig was passed, use =n as the baseline, which is achieved
+ # via --allnoconfig
+ if [ -f ${WORKDIR}/defconfig ]; then
+ configmeflags="--allnoconfig"
+ fi
+ fi
+
+ cd ${S}
+ PATH=${PATH}:${S}/scripts/util
+ configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
+ fi
+
+ echo "# Global settings from linux recipe" >> ${B}/.config
+ echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
+}
+
+addtask kernel_configme before do_configure after do_patch
+
+python do_kernel_configcheck() {
+ import re, string, sys
+
+ # if KMETA isn't set globally by a recipe using this routine, we need to
+ # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
+ # meta-series for processing
+ kmeta = d.getVar( "KMETA", True ) or "meta"
+ if not os.path.exists(kmeta):
+ kmeta = "." + kmeta
+
+ pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
+ cmd = d.expand("cd ${S}; kconf_check -config %s/meta-series ${S} ${B}" % kmeta)
+ ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
+
+ config_check_visibility = int(d.getVar( "KCONF_AUDIT_LEVEL", True ) or 0)
+ bsp_check_visibility = int(d.getVar( "KCONF_BSP_AUDIT_LEVEL", True ) or 0)
+
+ # if config check visibility is non-zero, report dropped configuration values
+ mismatch_file = "${S}/" + kmeta + "/" + "mismatch.cfg"
+ if os.path.exists(mismatch_file):
+ if config_check_visibility:
+ with open (mismatch_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+
+ # if config check visibility is level 2 or higher, report non-hardware options
+ nonhw_file = "${S}/" + kmeta + "/" + "nonhw_report.cfg"
+ if os.path.exists(nonhw_file):
+ if config_check_visibility > 1:
+ with open (nonhw_file, "r") as myfile:
+ results = myfile.read()
+ bb.warn( "[kernel config]: BSP specified non-hw configuration:\n\n%s" % results)
+
+ bsp_desc = "${S}/" + kmeta + "/" + "top_tgt"
+ if os.path.exists(bsp_desc) and bsp_check_visibility > 1:
+ with open (bsp_desc, "r") as myfile:
+ bsp_tgt = myfile.read()
+ m = re.match("^(.*)scratch.obj(.*)$", bsp_tgt)
+ if not m is None:
+ bb.warn( "[kernel]: An auto generated BSP description was used, this normally indicates a misconfiguration.\n" +
+ "Check that your machine (%s) has an associated kernel description." % "${MACHINE}" )
+}
+
+# Ensure that the branches (BSP and meta) are on the locations specified by
+# their SRCREV values. If they are NOT on the right commits, the branches
+# are corrected to the proper commit.
+do_validate_branches() {
+ set +e
+ cd ${S}
+
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+ machine_srcrev="${SRCREV_machine}"
+
+ # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
+ # check and we can exit early
+ if [ "${machine_srcrev}" = "AUTOINC" ]; then
+ bbnote "SRCREV validation is not required for AUTOREV"
+ elif [ "${machine_srcrev}" = "" ]; then
+ if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
+ # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
+ # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
+ # this case, we need to reset to the give SRCREV before heading to patching
+ bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
+ force_srcrev="${SRCREV}"
+ fi
+ else
+ git cat-file -t ${machine_srcrev} > /dev/null
+ if [ $? -ne 0 ]; then
+ bberror "${machine_srcrev} is not a valid commit ID."
+ bbfatal_log "The kernel source tree may be out of sync"
+ fi
+ force_srcrev=${machine_srcrev}
+ fi
+
+ git checkout -q -f ${machine_branch}
+ if [ -n "${force_srcrev}" ]; then
+ # see if the branch we are about to patch has been properly reset to the defined
+ # SRCREV .. if not, we reset it.
+ branch_head=`git rev-parse HEAD`
+ if [ "${force_srcrev}" != "${branch_head}" ]; then
+ current_branch=`git rev-parse --abbrev-ref HEAD`
+ git branch "$current_branch-orig"
+ git reset --hard ${force_srcrev}
+ fi
+ fi
+}
+
+OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
+KBUILD_OUTPUT = "${B}"
+
+python () {
+ # If diffconfig is available, ensure it runs after kernel_configme
+ if 'do_diffconfig' in d:
+ bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
+}
diff --git a/import-layers/yocto-poky/meta/classes/kernel.bbclass b/import-layers/yocto-poky/meta/classes/kernel.bbclass
new file mode 100644
index 000000000..6e3e81e93
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel.bbclass
@@ -0,0 +1,512 @@
+inherit linux-kernel-base kernel-module-split
+
+PROVIDES += "virtual/kernel"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native"
+
+S = "${STAGING_KERNEL_DIR}"
+B = "${WORKDIR}/build"
+KBUILD_OUTPUT = "${B}"
+OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
+
+# we include gcc above, we dont need virtual/libc
+INHIBIT_DEFAULT_DEPS = "1"
+
+KERNEL_IMAGETYPE ?= "zImage"
+INITRAMFS_IMAGE ?= ""
+INITRAMFS_TASK ?= ""
+INITRAMFS_IMAGE_BUNDLE ?= ""
+
+python __anonymous () {
+ import re
+
+ kerneltype = d.getVar('KERNEL_IMAGETYPE', True)
+
+ d.setVar("KERNEL_IMAGETYPE_FOR_MAKE", re.sub(r'\.gz$', '', kerneltype))
+
+ image = d.getVar('INITRAMFS_IMAGE', True)
+ if image:
+ d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+
+ # NOTE: setting INITRAMFS_TASK is for backward compatibility
+ # The preferred method is to set INITRAMFS_IMAGE, because
+ # this INITRAMFS_TASK has circular dependency problems
+ # if the initramfs requires kernel modules
+ image_task = d.getVar('INITRAMFS_TASK', True)
+ if image_task:
+ d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
+}
+
+# Here we pull in all various kernel image types which we support.
+#
+# In case you're wondering why kernel.bbclass inherits the other image
+# types instead of the other way around, the reason for that is to
+# maintain compatibility with various currently existing meta-layers.
+# By pulling in the various kernel image types here, we retain the
+# original behavior of kernel.bbclass, so no meta-layers should get
+# broken.
+#
+# KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this
+# used to be the default behavior when only uImage was supported. This
+# variable can be appended by users who implement support for new kernel
+# image types.
+
+KERNEL_CLASSES ?= " kernel-uimage "
+inherit ${KERNEL_CLASSES}
+
+# Old style kernels may set ${S} = ${WORKDIR}/git for example
+# We need to move these over to STAGING_KERNEL_DIR. We can't just
+# create the symlink in advance as the git fetcher can't cope with
+# the symlink.
+do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
+do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
+base_do_unpack_append () {
+ s = d.getVar("S", True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
+ s=s[:-1]
+ kernsrc = d.getVar("STAGING_KERNEL_DIR", True)
+ if s != kernsrc:
+ bb.utils.mkdirhier(kernsrc)
+ bb.utils.remove(kernsrc, recurse=True)
+ if d.getVar("EXTERNALSRC", True):
+ # With EXTERNALSRC S will not be wiped so we can symlink to it
+ os.symlink(s, kernsrc)
+ else:
+ import shutil
+ shutil.move(s, kernsrc)
+ os.symlink(kernsrc, s)
+}
+
+inherit kernel-arch deploy
+
+PACKAGES_DYNAMIC += "^kernel-module-.*"
+PACKAGES_DYNAMIC += "^kernel-image-.*"
+PACKAGES_DYNAMIC += "^kernel-firmware-.*"
+
+export OS = "${TARGET_OS}"
+export CROSS_COMPILE = "${TARGET_PREFIX}"
+
+KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
+ int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
+ int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}"
+
+KERNEL_RELEASE ?= "${KERNEL_VERSION}"
+
+# Where built kernel lies in the kernel tree
+KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
+KERNEL_IMAGEDEST = "boot"
+
+#
+# configuration
+#
+export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
+
+KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
+
+KERNEL_LOCALVERSION ?= ""
+
+# kernels are generally machine specific
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+# U-Boot support
+UBOOT_ENTRYPOINT ?= "20008000"
+UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
+
+# Some Linux kernel configurations need additional parameters on the command line
+KERNEL_EXTRA_ARGS ?= ""
+
+# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
+# We don't want to override kernel Makefile variables from the environment
+EXTRA_OEMAKE = ""
+
+KERNEL_ALT_IMAGETYPE ??= ""
+
+# Define where the kernel headers are installed on the target as well as where
+# they are staged.
+KERNEL_SRC_PATH = "/usr/src/kernel"
+
+copy_initramfs() {
+ echo "Copying initramfs into ./usr ..."
+ # In case the directory is not created yet from the first pass compile:
+ mkdir -p ${B}/usr
+ # Find and use the first initramfs image archive type we find
+ rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ for img in cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
+ if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
+ cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/.
+ case $img in
+ *gz)
+ echo "gzip decompressing image"
+ gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ break
+ ;;
+ *lz4)
+ echo "lz4 decompressing image"
+ lz4 -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ break
+ ;;
+ *lzo)
+ echo "lzo decompressing image"
+ lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ break
+ ;;
+ *lzma)
+ echo "lzma decompressing image"
+ lzma -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ break
+ ;;
+ *xz)
+ echo "xz decompressing image"
+ xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ break
+ ;;
+ esac
+ fi
+ done
+ echo "Finished copy of initramfs into ./usr"
+}
+
+INITRAMFS_BASE_NAME = "${KERNEL_IMAGETYPE}-initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
+INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
+do_bundle_initramfs () {
+ if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
+ echo "Creating a kernel image with a bundled initramfs..."
+ copy_initramfs
+ if [ -e ${KERNEL_OUTPUT} ] ; then
+ mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.bak
+ fi
+ use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ kernel_do_compile
+ mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.initramfs
+ mv -f ${KERNEL_OUTPUT}.bak ${KERNEL_OUTPUT}
+ # Update install area
+ echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT}.initramfs"
+ install -m 0644 ${B}/${KERNEL_OUTPUT}.initramfs ${D}/boot/${KERNEL_IMAGETYPE}-initramfs-${MACHINE}.bin
+ echo "${B}/${KERNEL_OUTPUT}.initramfs"
+ fi
+}
+
+python do_devshell_prepend () {
+ os.environ["LDFLAGS"] = ''
+}
+
+addtask bundle_initramfs after do_install before do_deploy
+
+kernel_do_compile() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ # The $use_alternate_initrd is only set from
+ # do_bundle_initramfs() This variable is specifically for the
+ # case where we are making a second pass at the kernel
+ # compilation and we want to force the kernel build to use a
+ # different initramfs image. The way to do that in the kernel
+ # is to specify:
+ # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
+ if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
+ # The old style way of copying an prebuilt image and building it
+ # is turned on via INTIRAMFS_TASK != ""
+ copy_initramfs
+ use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ fi
+ oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then
+ gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}"
+ fi
+}
+
+do_compile_kernelmodules() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+
+ # Module.symvers gets updated during the
+ # building of the kernel modules. We need to
+ # update this in the shared workdir since some
+ # external kernel modules has a dependency on
+ # other kernel modules and will look at this
+ # file to do symbol lookups
+ cp Module.symvers ${STAGING_KERNEL_BUILDDIR}/
+ else
+ bbnote "no modules to compile"
+ fi
+}
+addtask compile_kernelmodules after do_compile before do_strip
+
+kernel_do_install() {
+ #
+ # First install the modules
+ #
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
+ rm "${D}/lib/modules/${KERNEL_VERSION}/build"
+ rm "${D}/lib/modules/${KERNEL_VERSION}/source"
+ # If the kernel/ directory is empty remove it to prevent QA issues
+ rmdir --ignore-fail-on-non-empty "${D}/lib/modules/${KERNEL_VERSION}/kernel"
+ else
+ bbnote "no modules to install"
+ fi
+
+ #
+ # Install various kernel output (zImage, map file, config, module support files)
+ #
+ install -d ${D}/${KERNEL_IMAGEDEST}
+ install -d ${D}/boot
+ install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}
+ install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
+ install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
+ install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
+ [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
+ install -d ${D}${sysconfdir}/modules-load.d
+ install -d ${D}${sysconfdir}/modprobe.d
+}
+do_install[prefuncs] += "package_get_auto_pr"
+
+addtask shared_workdir after do_compile before do_compile_kernelmodules
+addtask shared_workdir_setscene
+
+do_shared_workdir_setscene () {
+ exit 1
+}
+
+emit_depmod_pkgdata() {
+ # Stash data for depmod
+ install -d ${PKGDESTWORK}/kernel-depmod/
+ echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/kernel-depmod/kernel-abiversion
+ cp ${B}/System.map ${PKGDESTWORK}/kernel-depmod/System.map-${KERNEL_VERSION}
+}
+
+PACKAGEFUNCS += "emit_depmod_pkgdata"
+
+do_shared_workdir () {
+ cd ${B}
+
+ kerneldir=${STAGING_KERNEL_BUILDDIR}
+ install -d $kerneldir
+
+ #
+ # Store the kernel version in sysroots for module-base.bbclass
+ #
+
+ echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
+
+ # Copy files required for module builds
+ cp System.map $kerneldir/System.map-${KERNEL_VERSION}
+ cp Module.symvers $kerneldir/
+ cp .config $kerneldir/
+ mkdir -p $kerneldir/include/config
+ cp include/config/kernel.release $kerneldir/include/config/kernel.release
+
+ # We can also copy over all the generated files and avoid special cases
+ # like version.h, but we've opted to keep this small until file creep starts
+ # to happen
+ if [ -e include/linux/version.h ]; then
+ mkdir -p $kerneldir/include/linux
+ cp include/linux/version.h $kerneldir/include/linux/version.h
+ fi
+
+ # As of Linux kernel version 3.0.1, the clean target removes
+ # arch/powerpc/lib/crtsavres.o which is present in
+ # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
+ if [ ${ARCH} = "powerpc" ]; then
+ mkdir -p $kerneldir/arch/powerpc/lib/
+ cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ fi
+
+ if [ -d include/generated ]; then
+ mkdir -p $kerneldir/include/generated/
+ cp -fR include/generated/* $kerneldir/include/generated/
+ fi
+
+ if [ -d arch/${ARCH}/include/generated ]; then
+ mkdir -p $kerneldir/arch/${ARCH}/include/generated/
+ cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
+ fi
+}
+
+# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
+sysroot_stage_all () {
+ :
+}
+
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig || yes '' | oe_runmake -C ${S} O=${B} oldconfig"
+
+kernel_do_configure() {
+ # fixes extra + in /lib/modules/2.6.37+
+ # $ scripts/setlocalversion . => +
+ # $ make kernelversion => 2.6.37
+ # $ make kernelrelease => 2.6.37+
+ touch ${B}/.scmversion ${S}/.scmversion
+
+ if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
+ mv "${S}/.config" "${B}/.config"
+ fi
+
+ # Copy defconfig to .config if .config does not exist. This allows
+ # recipes to manage the .config themselves in do_configure_prepend().
+ if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
+ cp "${WORKDIR}/defconfig" "${B}/.config"
+ fi
+
+ ${KERNEL_CONFIG_COMMAND}
+}
+
+do_savedefconfig() {
+ oe_runmake -C ${B} savedefconfig
+}
+do_savedefconfig[nostamp] = "1"
+addtask savedefconfig after do_configure
+
+inherit cml1
+
+EXPORT_FUNCTIONS do_compile do_install do_configure
+
+# kernel-base becomes kernel-${KERNEL_VERSION}
+# kernel-image becomes kernel-image-${KERNEL_VERISON}
+PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
+FILES_${PN} = ""
+FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
+FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
+FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build"
+FILES_kernel-vmlinux = "/boot/vmlinux*"
+FILES_kernel-modules = ""
+RDEPENDS_kernel = "kernel-base"
+# Allow machines to override this dependency if kernel image files are
+# not wanted in images as standard
+RDEPENDS_kernel-base ?= "kernel-image"
+PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
+RDEPENDS_kernel-image += "${@base_conditional('KERNEL_IMAGETYPE', 'vmlinux', 'kernel-vmlinux', '', d)}"
+PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
+RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}"
+ALLOW_EMPTY_kernel = "1"
+ALLOW_EMPTY_kernel-base = "1"
+ALLOW_EMPTY_kernel-image = "1"
+ALLOW_EMPTY_kernel-modules = "1"
+DESCRIPTION_kernel-modules = "Kernel modules meta package"
+
+pkg_postinst_kernel-base () {
+ if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
+ mkdir -p $D/lib/modules/${KERNEL_VERSION}
+ fi
+ if [ -n "$D" ]; then
+ depmodwrapper -a -b $D ${KERNEL_VERSION}
+ else
+ depmod -a ${KERNEL_VERSION}
+ fi
+}
+
+pkg_postinst_kernel-image () {
+ update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
+}
+
+pkg_postrm_kernel-image () {
+ update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true
+}
+
+PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
+
+python split_kernel_packages () {
+ do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+}
+
+# Many scripts want to look in arch/$arch/boot for the bootable
+# image. This poses a problem for vmlinux based booting. This
+# task arranges to have vmlinux appear in the normalized directory
+# location.
+do_kernel_link_vmlinux() {
+ if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
+ mkdir ${B}/arch/${ARCH}/boot
+ fi
+ cd ${B}/arch/${ARCH}/boot
+ ln -sf ../../../vmlinux
+}
+
+do_strip() {
+ if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
+ if [ "${KERNEL_IMAGETYPE}" != "vmlinux" ]; then
+ bbwarn "image type will not be stripped (not supported): ${KERNEL_IMAGETYPE}"
+ return
+ fi
+
+ cd ${B}
+ headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT} | \
+ grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
+ sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
+ gawk '{print $1}'`
+
+ for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
+ if ! (echo "$headers" | grep -q "^$str$"); then
+ bbwarn "Section not found: $str";
+ fi
+
+ "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT}
+ }; done
+
+ bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
+ "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
+ fi;
+}
+do_strip[dirs] = "${B}"
+
+addtask do_strip before do_sizecheck after do_kernel_link_vmlinux
+
+# Support checking the kernel size since some kernels need to reside in partitions
+# with a fixed length or there is a limit in transferring the kernel to memory
+do_sizecheck() {
+ if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
+ invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
+ if [ -n "$invalid" ]; then
+ die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integerx (The unit is Kbytes)"
+ fi
+ size=`du -ks ${B}/${KERNEL_OUTPUT} | awk '{ print $1}'`
+ if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
+ die "This kernel (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device. Please reduce the size of the kernel by making more of it modular."
+ fi
+ fi
+}
+do_sizecheck[dirs] = "${B}"
+
+addtask sizecheck before do_install after do_strip
+
+KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
+# Don't include the DATETIME variable in the sstate package signatures
+KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
+KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}"
+MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
+MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
+MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz"
+# Don't include the DATETIME variable in the sstate package signatures
+MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
+MODULE_TARBALL_DEPLOY ?= "1"
+
+kernel_do_deploy() {
+ install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
+ if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ mkdir -p ${D}/lib
+ tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
+ ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME}
+ fi
+
+ ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGE_SYMLINK_NAME}.bin
+ ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGETYPE}
+
+ cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
+
+ cd ${B}
+ # Update deploy directory
+ if [ -e "${KERNEL_OUTPUT}.initramfs" ]; then
+ echo "Copying deploy kernel-initramfs image and setting up links..."
+ initramfs_base_name=${INITRAMFS_BASE_NAME}
+ initramfs_symlink_name=${KERNEL_IMAGETYPE}-initramfs-${MACHINE}
+ install -m 0644 ${KERNEL_OUTPUT}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin
+ cd ${DEPLOYDIR}
+ ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin
+ fi
+}
+do_deploy[cleandirs] = "${DEPLOYDIR}"
+do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[prefuncs] += "package_get_auto_pr"
+
+addtask deploy after do_populate_sysroot
+
+EXPORT_FUNCTIONS do_deploy
+
diff --git a/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass b/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass
new file mode 100644
index 000000000..9efd46a92
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass
@@ -0,0 +1,10 @@
+S = "${STAGING_KERNEL_DIR}"
+do_fetch[noexec] = "1"
+do_unpack[depends] += "virtual/kernel:do_patch"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_package[depends] += "virtual/kernel:do_populate_sysroot"
+KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
+
+inherit linux-kernel-base
+
diff --git a/import-layers/yocto-poky/meta/classes/lib_package.bbclass b/import-layers/yocto-poky/meta/classes/lib_package.bbclass
new file mode 100644
index 000000000..8849f5904
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/lib_package.bbclass
@@ -0,0 +1,7 @@
+#
+# ${PN}-bin is defined in bitbake.conf
+#
+# We need to allow the other packages to be greedy with what they
+# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
+#
+PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/import-layers/yocto-poky/meta/classes/libc-common.bbclass b/import-layers/yocto-poky/meta/classes/libc-common.bbclass
new file mode 100644
index 000000000..bbc80167d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/libc-common.bbclass
@@ -0,0 +1,43 @@
+do_install() {
+ oe_runmake install_root=${D} install
+ for r in ${rpcsvc}; do
+ h=`echo $r|sed -e's,\.x$,.h,'`
+ install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
+ done
+ install -d ${D}/${sysconfdir}/
+ install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
+ install -d ${D}${localedir}
+ make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
+ # get rid of some broken files...
+ for i in ${GLIBC_BROKEN_LOCALES}; do
+ grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
+ mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
+ done
+ rm -f ${D}${sysconfdir}/rpc
+ rm -rf ${D}${datadir}/zoneinfo
+ rm -rf ${D}${libexecdir}/getconf
+}
+
+def get_libc_fpu_setting(bb, d):
+ if d.getVar('TARGET_FPU', True) in [ 'soft', 'ppc-efd' ]:
+ return "--without-fp"
+ return ""
+
+python populate_packages_prepend () {
+ if d.getVar('DEBIAN_NAMES', True):
+ pkgs = d.getVar('PACKAGES', True).split()
+ bpn = d.getVar('BPN', True)
+ prefix = d.getVar('MLPREFIX', True) or ""
+ # Set the base package...
+ d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
+ libcprefix = prefix + bpn + '-'
+ for p in pkgs:
+ # And all the subpackages.
+ if p.startswith(libcprefix):
+ renamed = p.replace(bpn, 'libc6', 1)
+ d.setVar('PKG_' + p, renamed)
+ # For backward compatibility with old -dbg package
+ d.appendVar('RPROVIDES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
+ d.appendVar('RCONFLICTS_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
+ d.appendVar('RREPLACES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
+}
diff --git a/import-layers/yocto-poky/meta/classes/libc-package.bbclass b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
new file mode 100644
index 000000000..467d56792
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
@@ -0,0 +1,391 @@
+#
+# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
+# may need packaging and its pointless to duplicate this code.
+#
+# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
+# "compile" - Use QEMU to generate the binary locale files
+# "precompiled" - The binary locale files are pregenerated and already present
+# "ondevice" - The device will build the locale files upon first boot through the postinst
+
+GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
+
+python __anonymous () {
+ enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
+
+ pn = d.getVar("PN", True)
+ if pn.endswith("-initial"):
+ enabled = False
+
+ if enabled and int(enabled):
+ import re
+
+ target_arch = d.getVar("TARGET_ARCH", True)
+ binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
+
+ for regexp in binary_arches.split(" "):
+ r = re.compile(regexp)
+
+ if r.match(target_arch):
+ depends = d.getVar("DEPENDS", True)
+ if use_cross_localedef == "1" :
+ depends = "%s cross-localedef-native" % depends
+ else:
+ depends = "%s qemu-native" % depends
+ d.setVar("DEPENDS", depends)
+ d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
+ break
+
+ # try to fix disable charsets/locales/locale-code compile fail
+ if bb.utils.contains('DISTRO_FEATURES', 'libc-charsets', True, False, d) and \
+ bb.utils.contains('DISTRO_FEATURES', 'libc-locales', True, False, d) and \
+ bb.utils.contains('DISTRO_FEATURES', 'libc-locale-code', True, False, d):
+ d.setVar('PACKAGE_NO_GCONV', '0')
+ else:
+ d.setVar('PACKAGE_NO_GCONV', '1')
+}
+
+OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+
+do_configure_prepend() {
+ if [ -e ${S}/elf/ldd.bash.in ]; then
+ sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
+ fi
+}
+
+
+
+# indentation removed on purpose
+locale_base_postinst() {
+#!/bin/sh
+
+if [ "x$D" != "x" ]; then
+ exit 1
+fi
+
+rm -rf ${TMP_LOCALE}
+mkdir -p ${TMP_LOCALE}
+if [ -f ${localedir}/locale-archive ]; then
+ cp ${localedir}/locale-archive ${TMP_LOCALE}/
+fi
+localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
+mkdir -p ${localedir}/
+mv ${TMP_LOCALE}/locale-archive ${localedir}/
+rm -rf ${TMP_LOCALE}
+}
+
+# indentation removed on purpose
+locale_base_postrm() {
+#!/bin/sh
+
+rm -rf ${TMP_LOCALE}
+mkdir -p ${TMP_LOCALE}
+if [ -f ${localedir}/locale-archive ]; then
+ cp ${localedir}/locale-archive ${TMP_LOCALE}/
+fi
+localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
+mv ${TMP_LOCALE}/locale-archive ${localedir}/
+rm -rf ${TMP_LOCALE}
+}
+
+
+TMP_LOCALE="/tmp/locale${localedir}"
+LOCALETREESRC ?= "${PKGD}"
+
+do_prep_locale_tree() {
+ treedir=${WORKDIR}/locale-tree
+ rm -rf $treedir
+ mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
+ tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
+ # unzip to avoid parsing errors
+ for i in $treedir/${datadir}/i18n/charmaps/*gz; do
+ gunzip $i
+ done
+ tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
+ if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
+ tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
+ fi
+ install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
+}
+
+do_collect_bins_from_locale_tree() {
+ treedir=${WORKDIR}/locale-tree
+
+ parent=$(dirname ${localedir})
+ mkdir -p ${PKGD}/$parent
+ tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
+}
+
+inherit qemu
+
+python package_do_split_gconvs () {
+ import re
+ if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
+ bb.note("package requested not splitting gconvs")
+ return
+
+ if not d.getVar('PACKAGES', True):
+ return
+
+ mlprefix = d.getVar("MLPREFIX", True) or ""
+
+ bpn = d.getVar('BPN', True)
+ libdir = d.getVar('libdir', True)
+ if not libdir:
+ bb.error("libdir not defined")
+ return
+ datadir = d.getVar('datadir', True)
+ if not datadir:
+ bb.error("datadir not defined")
+ return
+
+ gconv_libdir = base_path_join(libdir, "gconv")
+ charmap_dir = base_path_join(datadir, "i18n", "charmaps")
+ locales_dir = base_path_join(datadir, "i18n", "locales")
+ binary_locales_dir = d.getVar('localedir', True)
+
+ def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
+ deps = []
+ f = open(fn, "rb")
+ c_re = re.compile('^copy "(.*)"')
+ i_re = re.compile('^include "(\w+)".*')
+ for l in f.readlines():
+ m = c_re.match(l) or i_re.match(l)
+ if m:
+ dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
+ if not dp in deps:
+ deps.append(dp)
+ f.close()
+ if deps != []:
+ d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ if bpn != 'glibc':
+ d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+
+ do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
+ description='gconv module for character set %s', hook=calc_gconv_deps, \
+ extra_depends=bpn+'-gconv')
+
+ def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
+ deps = []
+ f = open(fn, "rb")
+ c_re = re.compile('^copy "(.*)"')
+ i_re = re.compile('^include "(\w+)".*')
+ for l in f.readlines():
+ m = c_re.match(l) or i_re.match(l)
+ if m:
+ dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
+ if not dp in deps:
+ deps.append(dp)
+ f.close()
+ if deps != []:
+ d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ if bpn != 'glibc':
+ d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+
+ do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
+ description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
+
+ def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
+ deps = []
+ f = open(fn, "rb")
+ c_re = re.compile('^copy "(.*)"')
+ i_re = re.compile('^include "(\w+)".*')
+ for l in f.readlines():
+ m = c_re.match(l) or i_re.match(l)
+ if m:
+ dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
+ if not dp in deps:
+ deps.append(dp)
+ f.close()
+ if deps != []:
+ d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ if bpn != 'glibc':
+ d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+
+ do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
+ description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
+ d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
+
+ use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
+
+ dot_re = re.compile("(.*)\.(.*)")
+
+ # Read in supported locales and associated encodings
+ supported = {}
+ with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
+ for line in f.readlines():
+ try:
+ locale, charset = line.rstrip().split()
+ except ValueError:
+ continue
+ supported[locale] = charset
+
+ # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
+ to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
+ if not to_generate or to_generate == 'all':
+ to_generate = supported.keys()
+ else:
+ to_generate = to_generate.split()
+ for locale in to_generate:
+ if locale not in supported:
+ if '.' in locale:
+ charset = locale.split('.')[1]
+ else:
+ charset = 'UTF-8'
+ bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
+ supported[locale] = charset
+
+ def output_locale_source(name, pkgname, locale, encoding):
+ d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
+ (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
+ d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
+ % (locale, encoding, locale))
+ d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
+ (locale, encoding, locale))
+
+ def output_locale_binary_rdepends(name, pkgname, locale, encoding):
+ m = re.match("(.*)\.(.*)", name)
+ if m:
+ libc_name = "%s.%s" % (m.group(1), m.group(2).lower())
+ else:
+ libc_name = name
+ d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
+ % (mlprefix+bpn, libc_name)))
+
+ commands = {}
+
+ def output_locale_binary(name, pkgname, locale, encoding):
+ treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
+ ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
+ path = d.getVar("PATH", True)
+ i18npath = base_path_join(treedir, datadir, "i18n")
+ gconvpath = base_path_join(treedir, "iconvdata")
+ outputpath = base_path_join(treedir, binary_locales_dir)
+
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
+ if use_cross_localedef == "1":
+ target_arch = d.getVar('TARGET_ARCH', True)
+ locale_arch_options = { \
+ "arm": " --uint32-align=4 --little-endian ", \
+ "armeb": " --uint32-align=4 --big-endian ", \
+ "aarch64": " --uint32-align=4 --little-endian ", \
+ "aarch64_be": " --uint32-align=4 --big-endian ", \
+ "sh4": " --uint32-align=4 --big-endian ", \
+ "powerpc": " --uint32-align=4 --big-endian ", \
+ "powerpc64": " --uint32-align=4 --big-endian ", \
+ "mips": " --uint32-align=4 --big-endian ", \
+ "mips64": " --uint32-align=4 --big-endian ", \
+ "mipsel": " --uint32-align=4 --little-endian ", \
+ "mips64el":" --uint32-align=4 --little-endian ", \
+ "i586": " --uint32-align=4 --little-endian ", \
+ "i686": " --uint32-align=4 --little-endian ", \
+ "x86_64": " --uint32-align=4 --little-endian " }
+
+ if target_arch in locale_arch_options:
+ localedef_opts = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
+
+ localedef_opts += " --force --old-style --no-archive --prefix=%s \
+ --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
+ % (treedir, treedir, datadir, locale, encoding, outputpath, name)
+
+ cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
+ (path, i18npath, gconvpath, localedef_opts)
+ else: # earlier slower qemu way
+ qemu = qemu_target_binary(d)
+ localedef_opts = "--force --old-style --no-archive --prefix=%s \
+ --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
+ % (treedir, datadir, locale, encoding, name)
+
+ qemu_options = d.getVar('QEMU_OPTIONS', True)
+
+ cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
+ -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
+ (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
+
+ commands["%s/%s" % (outputpath, name)] = cmd
+
+ bb.note("generating locale %s (%s)" % (locale, encoding))
+
+ def output_locale(name, locale, encoding):
+ pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
+ d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
+ d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
+ rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
+ m = re.match("(.*)_(.*)", name)
+ if m:
+ rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
+ d.setVar('RPROVIDES_%s' % pkgname, rprovides)
+
+ if use_bin == "compile":
+ output_locale_binary_rdepends(name, pkgname, locale, encoding)
+ output_locale_binary(name, pkgname, locale, encoding)
+ elif use_bin == "precompiled":
+ output_locale_binary_rdepends(name, pkgname, locale, encoding)
+ else:
+ output_locale_source(name, pkgname, locale, encoding)
+
+ if use_bin == "compile":
+ bb.note("preparing tree for binary locale generation")
+ bb.build.exec_func("do_prep_locale_tree", d)
+
+ utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
+ utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT', True) or 0)
+
+ encodings = {}
+ for locale in to_generate:
+ charset = supported[locale]
+ if utf8_only and charset != 'UTF-8':
+ continue
+
+ m = dot_re.match(locale)
+ if m:
+ base = m.group(1)
+ else:
+ base = locale
+
+ # Non-precompiled locales may be renamed so that the default
+ # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
+ # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
+ # contradicts SUPPORTED.
+ if use_bin == "precompiled" or not utf8_is_default:
+ output_locale(locale, base, charset)
+ else:
+ if charset == 'UTF-8':
+ output_locale(base, base, charset)
+ else:
+ output_locale('%s.%s' % (base, charset), base, charset)
+
+ if use_bin == "compile":
+ makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
+ m = open(makefile, "w")
+ m.write("all: %s\n\n" % " ".join(commands.keys()))
+ for cmd in commands:
+ m.write(cmd + ":\n")
+ m.write("\t" + commands[cmd] + "\n\n")
+ m.close()
+ d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
+ bb.note("Executing binary locale generation makefile")
+ bb.build.exec_func("oe_runmake", d)
+ bb.note("collecting binary locales from locale tree")
+ bb.build.exec_func("do_collect_bins_from_locale_tree", d)
+ do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', extra_depends='', allow_dirs=True)
+ elif use_bin == "precompiled":
+ do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', extra_depends='', allow_dirs=True)
+ else:
+ bb.note("generation of binary locales disabled. this may break i18n!")
+
+}
+
+# We want to do this indirection so that we can safely 'return'
+# from the called function even though we're prepending
+python populate_packages_prepend () {
+ bb.build.exec_func('package_do_split_gconvs', d)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/license.bbclass b/import-layers/yocto-poky/meta/classes/license.bbclass
new file mode 100644
index 000000000..43944e6ee
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/license.bbclass
@@ -0,0 +1,664 @@
+# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
+# LIC_FILES_CHKSUM.
+# TODO:
+# - There is a real issue revolving around license naming standards.
+
+LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
+LICSSTATEDIR = "${WORKDIR}/license-destdir/"
+
+# Create extra package with license texts and add it to RRECOMMENDS_${PN}
+LICENSE_CREATE_PACKAGE[type] = "boolean"
+LICENSE_CREATE_PACKAGE ??= "0"
+LICENSE_PACKAGE_SUFFIX ??= "-lic"
+LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
+
+addtask populate_lic after do_patch before do_build
+do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
+do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
+
+python write_package_manifest() {
+ # Get list of installed packages
+ license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
+ bb.utils.mkdirhier(license_image_dir)
+ from oe.rootfs import image_list_installed_packages
+ from oe.utils import format_pkg_list
+
+ pkgs = image_list_installed_packages(d)
+ output = format_pkg_list(pkgs)
+ open(os.path.join(license_image_dir, 'package.manifest'),
+ 'w+').write(output)
+}
+
+python write_deploy_manifest() {
+ license_deployed_manifest(d)
+}
+
+python license_create_manifest() {
+ import oe.packagedata
+ from oe.rootfs import image_list_installed_packages
+
+ build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS', True)
+ if build_images_from_feeds == "1":
+ return 0
+
+ pkg_dic = {}
+ for pkg in sorted(image_list_installed_packages(d)):
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR', True),
+ 'runtime-reverse', pkg)
+ pkg_name = os.path.basename(os.readlink(pkg_info))
+
+ pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
+ if not "LICENSE" in pkg_dic[pkg_name].keys():
+ pkg_lic_name = "LICENSE_" + pkg_name
+ pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
+
+ rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ d.getVar('IMAGE_NAME', True), 'license.manifest')
+ write_license_files(d, rootfs_license_manifest, pkg_dic)
+}
+
+def write_license_files(d, license_manifest, pkg_dic):
+ import re
+
+ bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE", True) or "").split()
+ bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+
+ with open(license_manifest, "w") as license_file:
+ for pkg in sorted(pkg_dic):
+ if bad_licenses:
+ try:
+ (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
+ oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
+ bad_licenses, canonical_license, d)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P', True), exc))
+ else:
+ pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
+ pkg_dic[pkg]["LICENSES"] = re.sub(' *', ' ', pkg_dic[pkg]["LICENSES"])
+ pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
+
+ if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
+ # Rootfs manifest
+ license_file.write("PACKAGE NAME: %s\n" % pkg)
+ license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
+ license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+ license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
+
+ # If the package doesn't contain any file, that is, its size is 0, the license
+ # isn't relevant as far as the final image is concerned. So doing license check
+ # doesn't make much sense, skip it.
+ if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+ continue
+ else:
+ # Image manifest
+ license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+ license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
+ license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
+ license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
+
+ for lic in pkg_dic[pkg]["LICENSES"]:
+ lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ pkg_dic[pkg]["PN"], "generic_%s" %
+ re.sub('\+', '', lic))
+ # add explicity avoid of CLOSED license because isn't generic
+ if lic == "CLOSED":
+ continue
+
+ if not os.path.exists(lic_file):
+ bb.warn("The license listed %s was not in the "\
+ "licenses collected for recipe %s"
+ % (lic, pkg_dic[pkg]["PN"]))
+
+ # Two options here:
+ # - Just copy the manifest
+ # - Copy the manifest and the license directories
+ # With both options set we see a .5 M increase in core-image-minimal
+ copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST', True)
+ copy_lic_dirs = d.getVar('COPY_LIC_DIRS', True)
+ if copy_lic_manifest == "1":
+ rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS', 'True'),
+ 'usr', 'share', 'common-licenses')
+ bb.utils.mkdirhier(rootfs_license_dir)
+ rootfs_license_manifest = os.path.join(rootfs_license_dir,
+ os.path.split(license_manifest)[1])
+ if not os.path.exists(rootfs_license_manifest):
+ os.link(license_manifest, rootfs_license_manifest)
+
+ if copy_lic_dirs == "1":
+ for pkg in sorted(pkg_dic):
+ pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
+ bb.utils.mkdirhier(pkg_rootfs_license_dir)
+ pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ pkg_dic[pkg]["PN"])
+ licenses = os.listdir(pkg_license_dir)
+ for lic in licenses:
+ rootfs_license = os.path.join(rootfs_license_dir, lic)
+ pkg_license = os.path.join(pkg_license_dir, lic)
+ pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
+
+ if re.match("^generic_.*$", lic):
+ generic_lic = re.search("^generic_(.*)$", lic).group(1)
+ if oe.license.license_ok(canonical_license(d,
+ generic_lic), bad_licenses) == False:
+ continue
+
+ if not os.path.exists(rootfs_license):
+ os.link(pkg_license, rootfs_license)
+
+ if not os.path.exists(pkg_rootfs_license):
+ os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+ else:
+ if (oe.license.license_ok(canonical_license(d,
+ lic), bad_licenses) == False or
+ os.path.exists(pkg_rootfs_license)):
+ continue
+
+ os.link(pkg_license, pkg_rootfs_license)
+
+
+def license_deployed_manifest(d):
+ """
+ Write the license manifest for the deployed recipes.
+ The deployed recipes usually includes the bootloader
+ and extra files to boot the target.
+ """
+
+ dep_dic = {}
+ man_dic = {}
+ lic_dir = d.getVar("LICENSE_DIRECTORY", True)
+
+ dep_dic = get_deployed_dependencies(d)
+ for dep in dep_dic.keys():
+ man_dic[dep] = {}
+ # It is necessary to mark this will be used for image manifest
+ man_dic[dep]["IMAGE_MANIFEST"] = True
+ man_dic[dep]["PN"] = dep
+ man_dic[dep]["FILES"] = \
+ " ".join(get_deployed_files(dep_dic[dep]))
+ with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
+ for line in f.readlines():
+ key,val = line.split(": ", 1)
+ man_dic[dep][key] = val[:-1]
+
+ image_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ d.getVar('IMAGE_NAME', True), 'image_license.manifest')
+ write_license_files(d, image_license_manifest, man_dic)
+
+def get_deployed_dependencies(d):
+ """
+ Get all the deployed dependencies of an image
+ """
+
+ deploy = {}
+ # Get all the dependencies for the current task (rootfs).
+ # Also get EXTRA_IMAGEDEPENDS because the bootloader is
+ # usually in this var and not listed in rootfs.
+ # At last, get the dependencies from boot classes because
+ # it might contain the bootloader.
+ taskdata = d.getVar("BB_TASKDEPDATA", False)
+ depends = list(set([dep[0] for dep
+ in taskdata.itervalues()
+ if not dep[0].endswith("-native")]))
+ extra_depends = d.getVar("EXTRA_IMAGEDEPENDS", True)
+ boot_depends = get_boot_dependencies(d)
+ depends.extend(extra_depends.split())
+ depends.extend(boot_depends)
+ depends = list(set(depends))
+
+ # To verify what was deployed it checks the rootfs dependencies against
+ # the SSTATE_MANIFESTS for "deploy" task.
+ # The manifest file name contains the arch. Because we are not running
+ # in the recipe context it is necessary to check every arch used.
+ sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS", True)
+ sstate_archs = d.getVar("SSTATE_ARCHS", True)
+ extra_archs = d.getVar("PACKAGE_EXTRA_ARCHS", True)
+ archs = list(set(("%s %s" % (sstate_archs, extra_archs)).split()))
+ for dep in depends:
+ # Some recipes have an arch on their own, so we try that first.
+ special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep, True)
+ if special_arch:
+ sstate_manifest_file = os.path.join(sstate_manifest_dir,
+ "manifest-%s-%s.deploy" % (special_arch, dep))
+ if os.path.exists(sstate_manifest_file):
+ deploy[dep] = sstate_manifest_file
+ continue
+
+ for arch in archs:
+ sstate_manifest_file = os.path.join(sstate_manifest_dir,
+ "manifest-%s-%s.deploy" % (arch, dep))
+ if os.path.exists(sstate_manifest_file):
+ deploy[dep] = sstate_manifest_file
+ break
+
+ return deploy
+get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+
+def get_boot_dependencies(d):
+ """
+ Return the dependencies from boot tasks
+ """
+
+ depends = []
+ boot_depends_string = ""
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ # Only bootimg and bootdirectdisk include the depends flag
+ boot_tasks = ["do_bootimg", "do_bootdirectdisk",]
+
+ for task in boot_tasks:
+ boot_depends_string = "%s %s" % (boot_depends_string,
+ d.getVarFlag(task, "depends", True) or "")
+ boot_depends = [dep.split(":")[0] for dep
+ in boot_depends_string.split()
+ if not dep.split(":")[0].endswith("-native")]
+ for dep in boot_depends:
+ info_file = os.path.join(d.getVar("LICENSE_DIRECTORY", True),
+ dep, "recipeinfo")
+ # If the recipe and dependency name is the same
+ if os.path.exists(info_file):
+ depends.append(dep)
+ # We need to search for the provider of the dependency
+ else:
+ for taskdep in taskdepdata.itervalues():
+ # The fifth field contains what the task provides
+ if dep in taskdep[4]:
+ info_file = os.path.join(
+ d.getVar("LICENSE_DIRECTORY", True),
+ taskdep[0], "recipeinfo")
+ if os.path.exists(info_file):
+ depends.append(taskdep[0])
+ break
+ return depends
+get_boot_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+
+def get_deployed_files(man_file):
+ """
+ Get the files deployed from the sstate manifest
+ """
+
+ dep_files = []
+ excluded_files = ["README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt"]
+ with open(man_file, "r") as manifest:
+ all_files = manifest.read()
+ for f in all_files.splitlines():
+ if ((not (os.path.islink(f) or os.path.isdir(f))) and
+ not os.path.basename(f) in excluded_files):
+ dep_files.append(os.path.basename(f))
+ return dep_files
+
+python do_populate_lic() {
+ """
+ Populate LICENSE_DIRECTORY with licenses.
+ """
+ lic_files_paths = find_license_files(d)
+
+ # The base directory we wrangle licenses to
+ destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
+ copy_license_files(lic_files_paths, destdir)
+ info = get_recipe_info(d)
+ with open(os.path.join(destdir, "recipeinfo"), "w") as f:
+ for key in sorted(info.keys()):
+ f.write("%s: %s\n" % (key, info[key]))
+}
+
+# it would be better to copy them in do_install_append, but find_license_filesa is python
+python perform_packagecopy_prepend () {
+ enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
+ if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled:
+ lic_files_paths = find_license_files(d)
+
+ # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
+ destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True))
+ copy_license_files(lic_files_paths, destdir)
+ add_package_and_files(d)
+}
+perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
+
+def get_recipe_info(d):
+ info = {}
+ info["PV"] = d.getVar("PV", True)
+ info["PR"] = d.getVar("PR", True)
+ info["LICENSE"] = d.getVar("LICENSE", True)
+ return info
+
+def add_package_and_files(d):
+ packages = d.getVar('PACKAGES', True)
+ files = d.getVar('LICENSE_FILES_DIRECTORY', True)
+ pn = d.getVar('PN', True)
+ pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
+ if pn_lic in packages:
+ bb.warn("%s package already existed in %s." % (pn_lic, pn))
+ else:
+ # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
+ d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
+ d.setVar('FILES_' + pn_lic, files)
+ rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True)
+ if rrecommends_pn:
+ d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
+ else:
+ d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
+
+def copy_license_files(lic_files_paths, destdir):
+ import shutil
+
+ bb.utils.mkdirhier(destdir)
+ for (basename, path) in lic_files_paths:
+ try:
+ src = path
+ dst = os.path.join(destdir, basename)
+ if os.path.exists(dst):
+ os.remove(dst)
+ if os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev):
+ os.link(src, dst)
+ try:
+ os.chown(dst,0,0)
+ except OSError as err:
+ import errno
+ if err.errno in (errno.EPERM, errno.EINVAL):
+ # Suppress "Operation not permitted" error, as
+ # sometimes this function is not executed under pseudo.
+ # Also ignore "Invalid argument" errors that happen in
+ # some (unprivileged) container environments (no root).
+ pass
+ else:
+ raise
+ else:
+ shutil.copyfile(src, dst)
+ except Exception as e:
+ bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
+
+def find_license_files(d):
+ """
+ Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
+ """
+ import shutil
+ import oe.license
+
+ pn = d.getVar('PN', True)
+ for package in d.getVar('PACKAGES', True):
+ if d.getVar('LICENSE_' + package, True):
+ license_types = license_types + ' & ' + \
+ d.getVar('LICENSE_' + package, True)
+
+ #If we get here with no license types, then that means we have a recipe
+ #level license. If so, we grab only those.
+ try:
+ license_types
+ except NameError:
+ # All the license types at the recipe level
+ license_types = d.getVar('LICENSE', True)
+
+ # All the license files for the package
+ lic_files = d.getVar('LIC_FILES_CHKSUM', True)
+ pn = d.getVar('PN', True)
+ # The license files are located in S/LIC_FILE_CHECKSUM.
+ srcdir = d.getVar('S', True)
+ # Directory we store the generic licenses as set in the distro configuration
+ generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
+ # List of basename, path tuples
+ lic_files_paths = []
+ license_source_dirs = []
+ license_source_dirs.append(generic_directory)
+ try:
+ additional_lic_dirs = d.getVar('LICENSE_PATH', True).split()
+ for lic_dir in additional_lic_dirs:
+ license_source_dirs.append(lic_dir)
+ except:
+ pass
+
+ class FindVisitor(oe.license.LicenseVisitor):
+ def visit_Str(self, node):
+ #
+ # Until I figure out what to do with
+ # the two modifiers I support (or greater = +
+ # and "with exceptions" being *
+ # we'll just strip out the modifier and put
+ # the base license.
+ find_license(node.s.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
+ def find_license(license_type):
+ try:
+ bb.utils.mkdirhier(gen_lic_dest)
+ except:
+ pass
+ spdx_generic = None
+ license_source = None
+ # If the generic does not exist we need to check to see if there is an SPDX mapping to it,
+ # unless NO_GENERIC_LICENSE is set.
+
+ for lic_dir in license_source_dirs:
+ if not os.path.isfile(os.path.join(lic_dir, license_type)):
+ if d.getVarFlag('SPDXLICENSEMAP', license_type, True) != None:
+ # Great, there is an SPDXLICENSEMAP. We can copy!
+ bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
+ spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type, True)
+ license_source = lic_dir
+ break
+ elif os.path.isfile(os.path.join(lic_dir, license_type)):
+ spdx_generic = license_type
+ license_source = lic_dir
+ break
+
+ if spdx_generic and license_source:
+ # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
+ # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
+
+ lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic)))
+
+ # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
+ # and should not be allowed, warn the user in this case.
+ if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
+ bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
+
+ elif d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
+ # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
+ # of the package rather than the license_source_dirs.
+ for (basename, path) in lic_files_paths:
+ if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True) == basename:
+ lic_files_paths.append(("generic_" + license_type, path))
+ break
+ else:
+ # Add explicity avoid of CLOSED license because this isn't generic
+ if license_type != 'CLOSED':
+ # And here is where we warn people that their licenses are lousy
+ bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
+ pass
+
+ if not generic_directory:
+ raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
+
+ if not lic_files:
+ # No recipe should have an invalid license file. This is checked else
+ # where, but let's be pedantic
+ bb.note(pn + ": Recipe file does not have license file information.")
+ return lic_files_paths
+
+ for url in lic_files.split():
+ try:
+ (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ except bb.fetch.MalformedUrl:
+ raise bb.build.FuncFailed("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
+ # We want the license filename and path
+ srclicfile = os.path.join(srcdir, path)
+ lic_files_paths.append((os.path.basename(path), srclicfile))
+
+ v = FindVisitor()
+ try:
+ v.visit_string(license_types)
+ except oe.license.InvalidLicense as exc:
+ bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
+ except SyntaxError:
+ bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
+
+ return lic_files_paths
+
+def return_spdx(d, license):
+ """
+ This function returns the spdx mapping of a license if it exists.
+ """
+ return d.getVarFlag('SPDXLICENSEMAP', license, True)
+
+def canonical_license(d, license):
+ """
+ Return the canonical (SPDX) form of the license if available (so GPLv3
+ becomes GPL-3.0), for the license named 'X+', return canonical form of
+ 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
+ or the passed license if there is no canonical form.
+ """
+ lic = d.getVarFlag('SPDXLICENSEMAP', license, True) or ""
+ if not lic and license.endswith('+'):
+ lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'), True)
+ if lic:
+ lic += '+'
+ return lic or license
+
+def expand_wildcard_licenses(d, wildcard_licenses):
+ """
+ Return actual spdx format license names if wildcard used. We expand
+ wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ """
+ import fnmatch
+ licenses = []
+ spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
+ for wld_lic in wildcard_licenses:
+ spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
+ licenses += [d.getVarFlag('SPDXLICENSEMAP', flag, True) for flag in spdxflags]
+
+ spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
+ for wld_lic in wildcard_licenses:
+ licenses += fnmatch.filter(spdx_lics, wld_lic)
+
+ licenses = list(set(licenses))
+ return licenses
+
+def incompatible_license_contains(license, truevalue, falsevalue, d):
+ license = canonical_license(d, license)
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ return truevalue if license in bad_licenses else falsevalue
+
+def incompatible_license(d, dont_want_licenses, package=None):
+ """
+ This function checks if a recipe has only incompatible licenses. It also
+ take into consideration 'or' operand. dont_want_licenses should be passed
+ as canonical (SPDX) names.
+ """
+ import oe.license
+ license = d.getVar("LICENSE_%s" % package, True) if package else None
+ if not license:
+ license = d.getVar('LICENSE', True)
+
+ # Handles an "or" or two license sets provided by
+ # flattened_licenses(), pick one that works if possible.
+ def choose_lic_set(a, b):
+ return a if all(oe.license.license_ok(canonical_license(d, lic),
+ dont_want_licenses) for lic in a) else b
+
+ try:
+ licenses = oe.license.flattened_licenses(license, choose_lic_set)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P', True), exc))
+ return any(not oe.license.license_ok(canonical_license(d, l), \
+ dont_want_licenses) for l in licenses)
+
+def check_license_flags(d):
+ """
+ This function checks if a recipe has any LICENSE_FLAGS that
+ aren't whitelisted.
+
+ If it does, it returns the first LICENSE_FLAGS item missing from the
+ whitelist, or all of the LICENSE_FLAGS if there is no whitelist.
+
+ If everything is is properly whitelisted, it returns None.
+ """
+
+ def license_flag_matches(flag, whitelist, pn):
+ """
+ Return True if flag matches something in whitelist, None if not.
+
+ Before we test a flag against the whitelist, we append _${PN}
+ to it. We then try to match that string against the
+ whitelist. This covers the normal case, where we expect
+ LICENSE_FLAGS to be a simple string like 'commercial', which
+ the user typically matches exactly in the whitelist by
+ explicitly appending the package name e.g 'commercial_foo'.
+ If we fail the match however, we then split the flag across
+ '_' and append each fragment and test until we either match or
+ run out of fragments.
+ """
+ flag_pn = ("%s_%s" % (flag, pn))
+ for candidate in whitelist:
+ if flag_pn == candidate:
+ return True
+
+ flag_cur = ""
+ flagments = flag_pn.split("_")
+ flagments.pop() # we've already tested the full string
+ for flagment in flagments:
+ if flag_cur:
+ flag_cur += "_"
+ flag_cur += flagment
+ for candidate in whitelist:
+ if flag_cur == candidate:
+ return True
+ return False
+
+ def all_license_flags_match(license_flags, whitelist):
+ """ Return first unmatched flag, None if all flags match """
+ pn = d.getVar('PN', True)
+ split_whitelist = whitelist.split()
+ for flag in license_flags.split():
+ if not license_flag_matches(flag, split_whitelist, pn):
+ return flag
+ return None
+
+ license_flags = d.getVar('LICENSE_FLAGS', True)
+ if license_flags:
+ whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
+ if not whitelist:
+ return license_flags
+ unmatched_flag = all_license_flags_match(license_flags, whitelist)
+ if unmatched_flag:
+ return unmatched_flag
+ return None
+
+def check_license_format(d):
+ """
+ This function checks if LICENSE is well defined,
+ Validate operators in LICENSES.
+ No spaces are allowed between LICENSES.
+ """
+ pn = d.getVar('PN', True)
+ licenses = d.getVar('LICENSE', True)
+ from oe.license import license_operator, license_operator_chars, license_pattern
+
+ elements = filter(lambda x: x.strip(), license_operator.split(licenses))
+ for pos, element in enumerate(elements):
+ if license_pattern.match(element):
+ if pos > 0 and license_pattern.match(elements[pos - 1]):
+ bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
+ 'must be separated by the following characters to indicate ' \
+ 'the license selection: %s' %
+ (pn, licenses, license_operator_chars))
+ elif not license_operator.match(element):
+ bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
+ 'in the valid list of separators (%s)' %
+ (pn, licenses, element, license_operator_chars))
+
+SSTATETASKS += "do_populate_lic"
+do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
+do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
+
+ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+do_rootfs[recrdeptask] += "do_populate_lic"
+
+IMAGE_POSTPROCESS_COMMAND_prepend = "write_deploy_manifest; "
+do_image[recrdeptask] += "do_populate_lic"
+
+do_populate_lic_setscene[dirs] = "${LICSSTATEDIR}/${PN}"
+do_populate_lic_setscene[cleandirs] = "${LICSSTATEDIR}"
+python do_populate_lic_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_lic_setscene
diff --git a/import-layers/yocto-poky/meta/classes/linux-kernel-base.bbclass b/import-layers/yocto-poky/meta/classes/linux-kernel-base.bbclass
new file mode 100644
index 000000000..89ce71605
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/linux-kernel-base.bbclass
@@ -0,0 +1,41 @@
+# parse kernel ABI version out of <linux/version.h>
+def get_kernelversion_headers(p):
+ import re
+
+ fn = p + '/include/linux/utsrelease.h'
+ if not os.path.isfile(fn):
+ # after 2.6.33-rc1
+ fn = p + '/include/generated/utsrelease.h'
+ if not os.path.isfile(fn):
+ fn = p + '/include/linux/version.h'
+
+ try:
+ f = open(fn, 'r')
+ except IOError:
+ return None
+
+ l = f.readlines()
+ f.close()
+ r = re.compile("#define UTS_RELEASE \"(.*)\"")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+
+def get_kernelversion_file(p):
+ fn = p + '/kernel-abiversion'
+
+ try:
+ with open(fn, 'r') as f:
+ return f.readlines()[0].strip()
+ except IOError:
+ return None
+
+def linux_module_packages(s, d):
+ suffix = ""
+ return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
+
+# that's all
+
diff --git a/import-layers/yocto-poky/meta/classes/linuxloader.bbclass b/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
new file mode 100644
index 000000000..5c4dc5c51
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
@@ -0,0 +1,24 @@
+
+linuxloader () {
+ case ${TARGET_ARCH} in
+ powerpc | mips | mipsel | microblaze )
+ dynamic_loader="${base_libdir}/ld.so.1"
+ ;;
+ powerpc64)
+ dynamic_loader="${base_libdir}/ld64.so.1"
+ ;;
+ x86_64)
+ dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
+ ;;
+ i*86 )
+ dynamic_loader="${base_libdir}/ld-linux.so.2"
+ ;;
+ arm )
+ dynamic_loader="${base_libdir}/ld-linux.so.3"
+ ;;
+ * )
+ dynamic_loader="/unknown_dynamic_linker"
+ ;;
+ esac
+ echo $dynamic_loader
+}
diff --git a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
new file mode 100644
index 000000000..c751385e7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
@@ -0,0 +1,58 @@
+# Some of the vars for vm and live image are conflicted, this function
+# is used for fixing the problem.
+def set_live_vm_vars(d, suffix):
+ vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
+ for var in vars:
+ var_with_suffix = var + '_' + suffix
+ if d.getVar(var, True):
+ bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
+ (var, var_with_suffix, var))
+ elif d.getVar(var_with_suffix, True):
+ d.setVar(var, d.getVar(var_with_suffix, True))
+
+
+EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
+EFI_PROVIDER ?= "grub-efi"
+EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
+
+# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
+# contain "efi". This way legacy is supported by default if neither is
+# specified, maintaining the original behavior.
+def pcbios(d):
+ pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
+ if pcbios == "0":
+ pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
+ return pcbios
+
+PCBIOS = "${@pcbios(d)}"
+PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS', True) == '1']}"
+
+inherit ${EFI_CLASS}
+inherit ${PCBIOS_CLASS}
+
+KERNEL_IMAGETYPE ??= "bzImage"
+
+populate_kernel() {
+ dest=$1
+ install -d $dest
+
+ # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
+ if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/vmlinuz
+ fi
+
+ # initrd is made of concatenation of multiple filesystem images
+ if [ -n "${INITRD}" ]; then
+ rm -f $dest/initrd
+ for fs in ${INITRD}
+ do
+ if [ -s "$fs" ]; then
+ cat $fs >> $dest/initrd
+ else
+ bbfatal "$fs is invalid. initrd image creation failed."
+ fi
+ done
+ chmod 0644 $dest/initrd
+ fi
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/logging.bbclass b/import-layers/yocto-poky/meta/classes/logging.bbclass
new file mode 100644
index 000000000..06c7c31c3
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/logging.bbclass
@@ -0,0 +1,101 @@
+# The following logging mechanisms are to be used in bash functions of recipes.
+# They are intended to map one to one in intention and output format with the
+# python recipe logging functions of a similar naming convention: bb.plain(),
+# bb.note(), etc.
+
+LOGFIFO = "${T}/fifo.${@os.getpid()}"
+
+# Print the output exactly as it is passed in. Typically used for output of
+# tasks that should be seen on the console. Use sparingly.
+# Output: logs console
+bbplain() {
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bbplain $*" > ${LOGFIFO}
+ else
+ echo "$*"
+ fi
+}
+
+# Notify the user of a noteworthy condition.
+# Output: logs
+bbnote() {
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bbnote $*" > ${LOGFIFO}
+ else
+ echo "NOTE: $*"
+ fi
+}
+
+# Print a warning to the log. Warnings are non-fatal, and do not
+# indicate a build failure.
+# Output: logs console
+bbwarn() {
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bbwarn $*" > ${LOGFIFO}
+ else
+ echo "WARNING: $*"
+ fi
+}
+
+# Print an error to the log. Errors are non-fatal in that the build can
+# continue, but they do indicate a build failure.
+# Output: logs console
+bberror() {
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bberror $*" > ${LOGFIFO}
+ else
+ echo "ERROR: $*"
+ fi
+}
+
+# Print a fatal error to the log. Fatal errors indicate build failure
+# and halt the build, exiting with an error code.
+# Output: logs console
+bbfatal() {
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bbfatal $*" > ${LOGFIFO}
+ else
+ echo "ERROR: $*"
+ fi
+ exit 1
+}
+
+# Like bbfatal, except prevents the suppression of the error log by
+# bitbake's UI.
+# Output: logs console
+bbfatal_log() {
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
+ else
+ echo "ERROR: $*"
+ fi
+ exit 1
+}
+
+# Print debug messages. These are appropriate for progress checkpoint
+# messages to the logs. Depending on the debug log level, they may also
+# go to the console.
+# Output: logs console
+# Usage: bbdebug 1 "first level debug message"
+# bbdebug 2 "second level debug message"
+bbdebug() {
+ USAGE='Usage: bbdebug [123] "message"'
+ if [ $# -lt 2 ]; then
+ bbfatal "$USAGE"
+ fi
+
+ # Strip off the debug level and ensure it is an integer
+ DBGLVL=$1; shift
+ NONDIGITS=$(echo "$DBGLVL" | tr -d [:digit:])
+ if [ "$NONDIGITS" ]; then
+ bbfatal "$USAGE"
+ fi
+
+ # All debug output is printed to the logs
+ if [ -p ${LOGFIFO} ] ; then
+ printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
+ else
+ echo "DEBUG: $*"
+ fi
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/meta.bbclass b/import-layers/yocto-poky/meta/classes/meta.bbclass
new file mode 100644
index 000000000..5e6890238
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/meta.bbclass
@@ -0,0 +1,4 @@
+
+PACKAGES = ""
+
+do_build[recrdeptask] = "do_build"
diff --git a/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass b/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
new file mode 100644
index 000000000..0f7f4235a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
@@ -0,0 +1,83 @@
+METADATA_BRANCH ?= "${@base_detect_branch(d)}"
+METADATA_REVISION ?= "${@base_detect_revision(d)}"
+
+def base_detect_revision(d):
+ path = base_get_scmbasepath(d)
+
+ scms = [base_get_metadata_git_revision, \
+ base_get_metadata_svn_revision]
+
+ for scm in scms:
+ rev = scm(path, d)
+ if rev != "<unknown>":
+ return rev
+
+ return "<unknown>"
+
+def base_detect_branch(d):
+ path = base_get_scmbasepath(d)
+
+ scms = [base_get_metadata_git_branch]
+
+ for scm in scms:
+ rev = scm(path, d)
+ if rev != "<unknown>":
+ return rev.strip()
+
+ return "<unknown>"
+
+def base_get_scmbasepath(d):
+ return d.getVar( 'COREBASE', True)
+
+def base_get_metadata_monotone_branch(path, d):
+ monotone_branch = "<unknown>"
+ try:
+ with open("%s/_MTN/options" % path) as f:
+ monotone_branch = f.read().strip()
+ if monotone_branch.startswith( "database" ):
+ monotone_branch_words = monotone_branch.split()
+ monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
+ except:
+ pass
+ return monotone_branch
+
+def base_get_metadata_monotone_revision(path, d):
+ monotone_revision = "<unknown>"
+ try:
+ with open("%s/_MTN/revision" % path) as f:
+ monotone_revision = f.read().strip()
+ if monotone_revision.startswith( "format_version" ):
+ monotone_revision_words = monotone_revision.split()
+ monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
+ except IOError:
+ pass
+ return monotone_revision
+
+def base_get_metadata_svn_revision(path, d):
+ # This only works with older subversion. For newer versions
+ # this function will need to be fixed by someone interested
+ revision = "<unknown>"
+ try:
+ with open("%s/.svn/entries" % path) as f:
+ revision = f.readlines()[3].strip()
+ except (IOError, IndexError):
+ pass
+ return revision
+
+def base_get_metadata_git_branch(path, d):
+ import bb.process
+
+ try:
+ rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
+
+def base_get_metadata_git_revision(path, d):
+ import bb.process
+
+ try:
+ rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
diff --git a/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass b/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass
new file mode 100644
index 000000000..aa0df8bb7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass
@@ -0,0 +1,46 @@
+PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
+LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
+
+python migrate_localcount_handler () {
+ import bb.event
+ if not e.data:
+ return
+
+ pv = e.data.getVar('PV', True)
+ if not 'AUTOINC' in pv:
+ return
+
+ localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
+ pn = e.data.getVar('PN', True)
+ revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
+ counts = localcounts.get_by_pattern('%%-%s_count' % pn)
+ if not revs or not counts:
+ return
+
+ if len(revs) != len(counts):
+ bb.warn("The number of revs and localcounts don't match in %s" % pn)
+ return
+
+ version = e.data.getVar('PRAUTOINX', True)
+ srcrev = bb.fetch2.get_srcrev(e.data)
+ base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
+ pkgarch = e.data.getVar('PACKAGE_ARCH', True)
+ value = max(int(count) for count in counts)
+
+ if len(revs) == 1:
+ if srcrev != ('AUTOINC+%s' % revs[0]):
+ value += 1
+ else:
+ value += 1
+
+ bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True))
+ df = e.data.getVar('LOCALCOUNT_DUMPFILE', True)
+ flock = bb.utils.lockfile("%s.lock" % df)
+ with open(df, 'a') as fd:
+ fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
+ (base_ver, pkgarch, srcrev, str(value)))
+ bb.utils.unlockfile(flock)
+}
+
+addhandler migrate_localcount_handler
+migrate_localcount_handler[eventmask] = "bb.event.RecipeParsed"
diff --git a/import-layers/yocto-poky/meta/classes/mime.bbclass b/import-layers/yocto-poky/meta/classes/mime.bbclass
new file mode 100644
index 000000000..721c73fcf
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/mime.bbclass
@@ -0,0 +1,56 @@
+DEPENDS += "shared-mime-info-native shared-mime-info"
+
+mime_postinst() {
+if [ "$1" = configure ]; then
+ UPDATEMIMEDB=`which update-mime-database`
+ if [ -x "$UPDATEMIMEDB" ] ; then
+ echo "Updating MIME database... this may take a while."
+ $UPDATEMIMEDB $D${datadir}/mime
+ else
+ echo "Missing update-mime-database, update of mime database failed!"
+ exit 1
+ fi
+fi
+}
+
+mime_postrm() {
+if [ "$1" = remove ] || [ "$1" = upgrade ]; then
+ UPDATEMIMEDB=`which update-mime-database`
+ if [ -x "$UPDATEMIMEDB" ] ; then
+ echo "Updating MIME database... this may take a while."
+ $UPDATEMIMEDB $D${datadir}/mime
+ else
+ echo "Missing update-mime-database, update of mime database failed!"
+ exit 1
+ fi
+fi
+}
+
+python populate_packages_append () {
+ import re
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
+
+ for pkg in packages:
+ mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
+ mimes = []
+ mime_re = re.compile(".*\.xml$")
+ if os.path.exists(mime_dir):
+ for f in os.listdir(mime_dir):
+ if mime_re.match(f):
+ mimes.append(f)
+ if mimes:
+ bb.note("adding mime postinst and postrm scripts to %s" % pkg)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('mime_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('mime_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+ bb.note("adding shared-mime-info-data dependency to %s" % pkg)
+ d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
+}
diff --git a/import-layers/yocto-poky/meta/classes/mirrors.bbclass b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
new file mode 100644
index 000000000..9e6d4836d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
@@ -0,0 +1,70 @@
+MIRRORS += "\
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
+${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
+${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
+ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
+ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
+ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
+ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
+ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
+ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
+ftp://ftp.gnutls.org/gcrypt/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
+http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
+http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
+${APACHE_MIRROR} http://www.us.apache.org/dist \n \
+${APACHE_MIRROR} http://archive.apache.org/dist \n \
+http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
+${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
+${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
+cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+npm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+cvs://.*/.* http://sources.openembedded.org/ \n \
+svn://.*/.* http://sources.openembedded.org/ \n \
+git://.*/.* http://sources.openembedded.org/ \n \
+hg://.*/.* http://sources.openembedded.org/ \n \
+bzr://.*/.* http://sources.openembedded.org/ \n \
+p4://.*/.* http://sources.openembedded.org/ \n \
+osc://.*/.* http://sources.openembedded.org/ \n \
+https?$://.*/.* http://sources.openembedded.org/ \n \
+ftp://.*/.* http://sources.openembedded.org/ \n \
+npm://.*/.* http://sources.openembedded.org/ \n \
+${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
+${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
+"
diff --git a/import-layers/yocto-poky/meta/classes/module-base.bbclass b/import-layers/yocto-poky/meta/classes/module-base.bbclass
new file mode 100644
index 000000000..6fe77c01b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/module-base.bbclass
@@ -0,0 +1,27 @@
+inherit kernel-arch
+
+# This is instead of DEPENDS = "virtual/kernel"
+do_configure[depends] += "virtual/kernel:do_compile_kernelmodules"
+
+export OS = "${TARGET_OS}"
+export CROSS_COMPILE = "${TARGET_PREFIX}"
+
+# This points to the build artefacts from the main kernel build
+# such as .config and System.map
+# Confusingly it is not the module build output (which is ${B}) but
+# we didn't pick the name.
+export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
+
+export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
+KERNEL_OBJECT_SUFFIX = ".ko"
+
+# kernel modules are generally machine specific
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+# Function to ensure the kernel scripts are created. Expected to
+# be called before do_compile. See module.bbclass for an example.
+do_make_scripts() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
+ -C ${STAGING_KERNEL_DIR} O=${STAGING_KERNEL_BUILDDIR} scripts
+}
diff --git a/import-layers/yocto-poky/meta/classes/module.bbclass b/import-layers/yocto-poky/meta/classes/module.bbclass
new file mode 100644
index 000000000..01c9309eb
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/module.bbclass
@@ -0,0 +1,34 @@
+inherit module-base kernel-module-split
+
+addtask make_scripts after do_patch before do_compile
+do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
+do_make_scripts[depends] += "virtual/kernel:do_shared_workdir"
+
+EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
+
+MODULES_INSTALL_TARGET ?= "modules_install"
+
+module_do_compile() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
+ KERNEL_VERSION=${KERNEL_VERSION} \
+ CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ AR="${KERNEL_AR}" \
+ O=${STAGING_KERNEL_BUILDDIR} \
+ ${MAKE_TARGETS}
+}
+
+module_do_install() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
+ CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ O=${STAGING_KERNEL_BUILDDIR} \
+ ${MODULES_INSTALL_TARGET}
+}
+
+EXPORT_FUNCTIONS do_compile do_install
+
+# add all splitted modules to PN RDEPENDS, PN can be empty now
+KERNEL_MODULES_META_PACKAGE = "${PN}"
+FILES_${PN} = ""
+ALLOW_EMPTY_${PN} = "1"
diff --git a/import-layers/yocto-poky/meta/classes/multilib.bbclass b/import-layers/yocto-poky/meta/classes/multilib.bbclass
new file mode 100644
index 000000000..d5a31287a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/multilib.bbclass
@@ -0,0 +1,148 @@
+python multilib_virtclass_handler () {
+ cls = e.data.getVar("BBEXTENDCURR", True)
+ variant = e.data.getVar("BBEXTENDVARIANT", True)
+ if cls != "multilib" or not variant:
+ return
+
+ e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True))
+
+ # There should only be one kernel in multilib configs
+ # We also skip multilib setup for module packages.
+ provides = (e.data.getVar("PROVIDES", True) or "").split()
+ if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
+ raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
+
+ save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
+ for name in save_var_name.split():
+ val=e.data.getVar(name, True)
+ if val:
+ e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
+
+ overrides = e.data.getVar("OVERRIDES", False)
+ pn = e.data.getVar("PN", False)
+ overrides = overrides.replace("pn-${PN}", "pn-${PN}:pn-" + pn)
+ e.data.setVar("OVERRIDES", overrides)
+
+ if bb.data.inherits_class('image', e.data):
+ e.data.setVar("MLPREFIX", variant + "-")
+ e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
+ e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT', True))
+ target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
+ if target_vendor:
+ e.data.setVar("TARGET_VENDOR", target_vendor)
+ return
+
+ if bb.data.inherits_class('cross-canadian', e.data):
+ e.data.setVar("MLPREFIX", variant + "-")
+ override = ":virtclass-multilib-" + variant
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(e.data)
+ return
+
+ if bb.data.inherits_class('native', e.data):
+ raise bb.parse.SkipPackage("We can't extend native recipes")
+
+ if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
+ raise bb.parse.SkipPackage("We can't extend nativesdk recipes")
+
+ if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
+ raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups")
+
+
+ # Expand this since this won't work correctly once we set a multilib into place
+ e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+
+ override = ":virtclass-multilib-" + variant
+
+ e.data.setVar("MLPREFIX", variant + "-")
+ e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+
+ # Expand the WHITELISTs with multilib prefix
+ for whitelist in ["WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
+ pkgs = e.data.getVar(whitelist, True)
+ for pkg in pkgs.split():
+ pkgs += " " + variant + "-" + pkg
+ e.data.setVar(whitelist, pkgs)
+
+ # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
+ newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
+ if newtune:
+ e.data.setVar("DEFAULTTUNE", newtune)
+ e.data.setVar('DEFAULTTUNE_ML_%s' % variant, newtune)
+}
+
+addhandler multilib_virtclass_handler
+multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+
+STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
+
+python __anonymous () {
+ variant = d.getVar("BBEXTENDVARIANT", True)
+
+ import oe.classextend
+
+ clsextend = oe.classextend.ClassExtender(variant, d)
+
+ if bb.data.inherits_class('image', d):
+ clsextend.map_depends_variable("PACKAGE_INSTALL")
+ clsextend.map_depends_variable("LINGUAS_INSTALL")
+ clsextend.map_depends_variable("RDEPENDS")
+ pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
+ d.setVar("PACKAGE_INSTALL", pinstall)
+ d.setVar("LINGUAS_INSTALL", "")
+ # FIXME, we need to map this to something, not delete it!
+ d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
+
+ if bb.data.inherits_class('image', d):
+ return
+
+ clsextend.map_depends_variable("DEPENDS")
+ clsextend.map_variable("PROVIDES")
+
+ if bb.data.inherits_class('cross-canadian', d):
+ return
+
+ clsextend.rename_packages()
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+
+ clsextend.map_packagevars()
+ clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
+ clsextend.map_variable("PACKAGE_INSTALL")
+ clsextend.map_variable("INITSCRIPT_PACKAGES")
+ clsextend.map_variable("USERADD_PACKAGES")
+ clsextend.map_variable("SYSTEMD_PACKAGES")
+}
+
+PACKAGEFUNCS_append = " do_package_qa_multilib"
+
+python do_package_qa_multilib() {
+
+ def check_mlprefix(pkg, var, mlprefix):
+ values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
+ candidates = []
+ for i in values:
+ if i.startswith('virtual/'):
+ i = i[len('virtual/'):]
+ if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
+ (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
+ (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')):
+ candidates.append(i)
+ if len(candidates) > 0:
+ msg = "%s package %s - suspicious values '%s' in %s" \
+ % (d.getVar('PN', True), pkg, ' '.join(candidates), var)
+ package_qa_handle_error("multilib", msg, d)
+
+ ml = d.getVar('MLPREFIX', True)
+ if not ml:
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ for pkg in packages.split():
+ check_mlprefix(pkg, 'RDEPENDS', ml)
+ check_mlprefix(pkg, 'RPROVIDES', ml)
+ check_mlprefix(pkg, 'RRECOMMENDS', ml)
+ check_mlprefix(pkg, 'RSUGGESTS', ml)
+ check_mlprefix(pkg, 'RREPLACES', ml)
+ check_mlprefix(pkg, 'RCONFLICTS', ml)
+}
diff --git a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
new file mode 100644
index 000000000..67dc72b76
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
@@ -0,0 +1,180 @@
+def preferred_ml_updates(d):
+ # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
+ # we need to mirror these variables in the multilib case;
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ if not multilibs:
+ return
+
+ prefixes = []
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ prefixes.append(eext[1])
+
+ versions = []
+ providers = []
+ for v in d.keys():
+ if v.startswith("PREFERRED_VERSION_"):
+ versions.append(v)
+ if v.startswith("PREFERRED_PROVIDER_"):
+ providers.append(v)
+
+ for v in versions:
+ val = d.getVar(v, False)
+ pkg = v.replace("PREFERRED_VERSION_", "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if '-cross-' in pkg and '${' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(localdata)
+ if "-canadian-" in pkg:
+ newname = localdata.expand(v)
+ else:
+ newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
+ if newname != v:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ vexp = d.expand(v)
+ if v != vexp and d.getVar(v, False):
+ d.renameVar(v, vexp)
+ continue
+ for p in prefixes:
+ newname = "PREFERRED_VERSION_" + p + "-" + pkg
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
+
+ for prov in providers:
+ val = d.getVar(prov, False)
+ pkg = prov.replace("PREFERRED_PROVIDER_", "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if 'cross-canadian' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(localdata)
+ newname = localdata.expand(prov)
+ if newname != prov:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ provexp = d.expand(prov)
+ if prov != provexp and d.getVar(prov, False):
+ d.renameVar(prov, provexp)
+ continue
+ virt = ""
+ if pkg.startswith("virtual/"):
+ pkg = pkg.replace("virtual/", "")
+ virt = "virtual/"
+ for p in prefixes:
+ if pkg != "kernel":
+ newval = p + "-" + val
+
+ # implement variable keys
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ bb.data.update_data(localdata)
+ newname = localdata.expand(prov)
+ if newname != prov and not d.getVar(newname, False):
+ d.setVar(newname, localdata.expand(newval))
+
+ # implement alternative multilib name
+ newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
+ if not d.getVar(newname, False):
+ d.setVar(newname, localdata.expand(newval))
+ # Avoid future variable key expansion
+ provexp = d.expand(prov)
+ if prov != provexp and d.getVar(prov, False):
+ d.renameVar(prov, provexp)
+
+ def translate_provide(prefix, prov):
+ if not prov.startswith("virtual/"):
+ return prefix + "-" + prov
+ if prov == "virtual/kernel":
+ return prov
+ prov = prov.replace("virtual/", "")
+ return "virtual/" + prefix + "-" + prov
+
+ mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ extramp = []
+ for p in mp:
+ if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
+ continue
+ for pref in prefixes:
+ extramp.append(translate_provide(pref, p))
+ d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+
+ abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+ extras = []
+ for p in prefixes:
+ for a in abisafe:
+ extras.append(p + "-" + a)
+ d.appendVar("SIGGEN_EXCLUDERECIPES_ABISAFE", " " + " ".join(extras))
+
+ siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ extras = []
+ for p in prefixes:
+ for a in siggen_exclude:
+ a1, a2 = a.split("->")
+ extras.append(translate_provide(p, a1) + "->" + translate_provide(p, a2))
+ d.appendVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", " " + " ".join(extras))
+
+python multilib_virtclass_handler_vendor () {
+ if isinstance(e, bb.event.ConfigParsed):
+ for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
+ if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
+ e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
+ preferred_ml_updates(e.data)
+}
+addhandler multilib_virtclass_handler_vendor
+multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
+
+python multilib_virtclass_handler_global () {
+ if not e.data:
+ return
+
+ variant = e.data.getVar("BBEXTENDVARIANT", True)
+
+ if isinstance(e, bb.event.RecipeParsed) and not variant:
+ if bb.data.inherits_class('kernel', e.data) or \
+ bb.data.inherits_class('module-base', e.data) or \
+ (bb.data.inherits_class('allarch', e.data) and\
+ not bb.data.inherits_class('packagegroup', e.data)):
+ variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
+
+ import oe.classextend
+ clsextends = []
+ for variant in variants:
+ clsextends.append(oe.classextend.ClassExtender(variant, e.data))
+
+ # Process PROVIDES
+ origprovs = provs = e.data.getVar("PROVIDES", True) or ""
+ for clsextend in clsextends:
+ provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
+ e.data.setVar("PROVIDES", provs)
+
+ # Process RPROVIDES
+ origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
+ for clsextend in clsextends:
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
+ if rprovs.strip():
+ e.data.setVar("RPROVIDES", rprovs)
+
+ # Process RPROVIDES_${PN}...
+ for pkg in (e.data.getVar("PACKAGES", True) or "").split():
+ origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
+ for clsextend in clsextends:
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
+ rprovs = rprovs + " " + clsextend.extname + "-" + pkg
+ e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
+}
+
+addhandler multilib_virtclass_handler_global
+multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed"
+
diff --git a/import-layers/yocto-poky/meta/classes/multilib_header.bbclass b/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
new file mode 100644
index 000000000..5ee0a2d56
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
@@ -0,0 +1,54 @@
+inherit siteinfo
+
+# If applicable on the architecture, this routine will rename the header and
+# add a unique identifier to the name for the ABI/bitsize that is being used.
+# A wrapper will be generated for the architecture that knows how to call
+# all of the ABI variants for that given architecture.
+#
+oe_multilib_header() {
+
+ case ${HOST_OS} in
+ *-musl*)
+ return
+ ;;
+ *)
+ esac
+ # We use
+ # For ARM: We don't support multilib builds.
+ # For MIPS: "n32" is a special case, which needs to be
+ # distinct from both 64-bit and 32-bit.
+ case ${TARGET_ARCH} in
+ arm*) return
+ ;;
+ mips*) case "${MIPSPKGSFX_ABI}" in
+ "-n32")
+ ident=n32
+ ;;
+ *)
+ ident=${SITEINFO_BITS}
+ ;;
+ esac
+ ;;
+ *) ident=${SITEINFO_BITS}
+ esac
+ if echo ${TARGET_ARCH} | grep -q arm; then
+ return
+ fi
+ for each_header in "$@" ; do
+ if [ ! -f "${D}/${includedir}/$each_header" ]; then
+ bberror "oe_multilib_header: Unable to find header $each_header."
+ continue
+ fi
+ stem=$(echo $each_header | sed 's#\.h$##')
+ # if mips64/n32 set ident to n32
+ mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
+
+ sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
+ done
+}
+
+# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
+# We don't need multilib headers for native builds so brute force things.
+oe_multilib_header_class-native () {
+ return
+}
diff --git a/import-layers/yocto-poky/meta/classes/native.bbclass b/import-layers/yocto-poky/meta/classes/native.bbclass
new file mode 100644
index 000000000..f67ef0014
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/native.bbclass
@@ -0,0 +1,179 @@
+# We want native packages to be relocatable
+inherit relocatable
+
+# Native packages are built indirectly via dependency,
+# no need for them to be a direct target of 'world'
+EXCLUDE_FROM_WORLD = "1"
+
+PACKAGES = ""
+PACKAGES_class-native = ""
+PACKAGES_DYNAMIC = ""
+PACKAGES_DYNAMIC_class-native = ""
+PACKAGE_ARCH = "${BUILD_ARCH}"
+
+# used by cmake class
+OECMAKE_RPATH = "${libdir}"
+OECMAKE_RPATH_class-native = "${libdir}"
+
+# When this class has packaging enabled, setting
+# RPROVIDES becomes unnecessary.
+RPROVIDES = "${PN}"
+
+TARGET_ARCH = "${BUILD_ARCH}"
+TARGET_OS = "${BUILD_OS}"
+TARGET_VENDOR = "${BUILD_VENDOR}"
+TARGET_PREFIX = "${BUILD_PREFIX}"
+TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
+TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
+TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
+TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
+TARGET_CFLAGS = "${BUILD_CFLAGS}"
+TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
+TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
+TARGET_FPU = ""
+
+HOST_ARCH = "${BUILD_ARCH}"
+HOST_OS = "${BUILD_OS}"
+HOST_VENDOR = "${BUILD_VENDOR}"
+HOST_PREFIX = "${BUILD_PREFIX}"
+HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+HOST_LD_ARCH = "${BUILD_LD_ARCH}"
+HOST_AS_ARCH = "${BUILD_AS_ARCH}"
+
+CPPFLAGS = "${BUILD_CPPFLAGS}"
+CFLAGS = "${BUILD_CFLAGS}"
+CXXFLAGS = "${BUILD_CXXFLAGS}"
+LDFLAGS = "${BUILD_LDFLAGS}"
+LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
+
+STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
+STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
+
+# native pkg doesn't need the TOOLCHAIN_OPTIONS.
+TOOLCHAIN_OPTIONS = ""
+
+DEPENDS_GETTEXT = "gettext-native"
+
+# Don't build ptest natively
+PTEST_ENABLED = "0"
+
+# Don't use site files for native builds
+export CONFIG_SITE = "${COREBASE}/meta/site/native"
+
+# set the compiler as well. It could have been set to something else
+export CC = "${BUILD_CC}"
+export CXX = "${BUILD_CXX}"
+export FC = "${BUILD_FC}"
+export CPP = "${BUILD_CPP}"
+export LD = "${BUILD_LD}"
+export CCLD = "${BUILD_CCLD}"
+export AR = "${BUILD_AR}"
+export AS = "${BUILD_AS}"
+export RANLIB = "${BUILD_RANLIB}"
+export STRIP = "${BUILD_STRIP}"
+export NM = "${BUILD_NM}"
+
+# Path prefixes
+base_prefix = "${STAGING_DIR_NATIVE}"
+prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+
+bindir = "${STAGING_BINDIR_NATIVE}"
+sbindir = "${STAGING_SBINDIR_NATIVE}"
+libdir = "${STAGING_LIBDIR_NATIVE}"
+includedir = "${STAGING_INCDIR_NATIVE}"
+sysconfdir = "${STAGING_ETCDIR_NATIVE}"
+datadir = "${STAGING_DATADIR_NATIVE}"
+
+baselib = "lib"
+
+# Libtool's default paths are correct for the native machine
+lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
+
+NATIVE_PACKAGE_PATH_SUFFIX ?= ""
+bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
+do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/"
+
+# Since we actually install these into situ there is no staging prefix
+STAGING_DIR_HOST = ""
+STAGING_DIR_TARGET = ""
+PKG_CONFIG_DIR = "${libdir}/pkgconfig"
+
+EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
+PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
+PKG_CONFIG_SYSROOT_DIR = ""
+PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
+PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
+
+# we dont want libc-uclibc or libc-glibc to kick in for native recipes
+LIBCOVERRIDE = ""
+CLASSOVERRIDE = "class-native"
+MACHINEOVERRIDES = ""
+
+PATH_prepend = "${COREBASE}/scripts/native-intercept:"
+
+python native_virtclass_handler () {
+ classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
+ if "native" not in classextend:
+ return
+
+ pn = e.data.getVar("PN", True)
+ if not pn.endswith("-native"):
+ return
+
+ def map_dependencies(varname, d, suffix = ""):
+ if suffix:
+ varname = varname + "_" + suffix
+ deps = d.getVar(varname, True)
+ if not deps:
+ return
+ deps = bb.utils.explode_deps(deps)
+ newdeps = []
+ for dep in deps:
+ if dep == pn:
+ continue
+ elif "-cross-" in dep:
+ newdeps.append(dep.replace("-cross", "-native"))
+ elif not dep.endswith("-native"):
+ newdeps.append(dep + "-native")
+ else:
+ newdeps.append(dep)
+ d.setVar(varname, " ".join(newdeps))
+
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
+
+ map_dependencies("DEPENDS", e.data)
+ for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
+ map_dependencies("RDEPENDS", e.data, pkg)
+ map_dependencies("RRECOMMENDS", e.data, pkg)
+ map_dependencies("RSUGGESTS", e.data, pkg)
+ map_dependencies("RPROVIDES", e.data, pkg)
+ map_dependencies("RREPLACES", e.data, pkg)
+
+ provides = e.data.getVar("PROVIDES", True)
+ nprovides = []
+ for prov in provides.split():
+ if prov.find(pn) != -1:
+ nprovides.append(prov)
+ elif not prov.endswith("-native"):
+ nprovides.append(prov.replace(prov, prov + "-native"))
+ else:
+ nprovides.append(prov)
+ e.data.setVar("PROVIDES", ' '.join(nprovides))
+
+
+}
+
+addhandler native_virtclass_handler
+native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+
+inherit nopackages
+
+do_packagedata[stamp-extra-info] = ""
+do_populate_sysroot[stamp-extra-info] = ""
+
+USE_NLS = "no"
diff --git a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
new file mode 100644
index 000000000..f74da6267
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
@@ -0,0 +1,97 @@
+# SDK packages are built either explicitly by the user,
+# or indirectly via dependency. No need to be in 'world'.
+EXCLUDE_FROM_WORLD = "1"
+
+STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
+
+# libc for the SDK can be different to that of the target
+NATIVESDKLIBC ?= "libc-glibc"
+LIBCOVERRIDE = ":${NATIVESDKLIBC}"
+CLASSOVERRIDE = "class-nativesdk"
+MACHINEOVERRIDES = ""
+
+#
+# Update PACKAGE_ARCH and PACKAGE_ARCHS
+#
+PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
+PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
+
+#
+# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
+# binaries
+#
+DEPENDS_append = " chrpath-replacement-native"
+EXTRANATIVEPATH += "chrpath-native"
+
+STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
+STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
+PKGDATA_DIR = "${STAGING_DIR_HOST}/pkgdata"
+
+HOST_ARCH = "${SDK_ARCH}"
+HOST_VENDOR = "${SDK_VENDOR}"
+HOST_OS = "${SDK_OS}"
+HOST_PREFIX = "${SDK_PREFIX}"
+HOST_CC_ARCH = "${SDK_CC_ARCH}"
+HOST_LD_ARCH = "${SDK_LD_ARCH}"
+HOST_AS_ARCH = "${SDK_AS_ARCH}"
+#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
+
+TARGET_ARCH = "${SDK_ARCH}"
+TARGET_VENDOR = "${SDK_VENDOR}"
+TARGET_OS = "${SDK_OS}"
+TARGET_PREFIX = "${SDK_PREFIX}"
+TARGET_CC_ARCH = "${SDK_CC_ARCH}"
+TARGET_LD_ARCH = "${SDK_LD_ARCH}"
+TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_FPU = ""
+EXTRA_OECONF_GCC_FLOAT = ""
+
+CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+CFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CFLAGS}"
+LDFLAGS = "${BUILDSDK_LDFLAGS}"
+
+# Change to place files in SDKPATH
+base_prefix = "${SDKPATHNATIVE}"
+prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+baselib = "lib"
+sbindir = "${bindir}"
+
+export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
+
+python nativesdk_virtclass_handler () {
+ pn = e.data.getVar("PN", True)
+ if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
+ return
+
+ e.data.setVar("MLPREFIX", "nativesdk-")
+ e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", ""))
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
+}
+
+python () {
+ pn = d.getVar("PN", True)
+ if not pn.startswith("nativesdk-"):
+ return
+
+ import oe.classextend
+
+ clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
+ clsextend.rename_packages()
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+
+ clsextend.map_depends_variable("DEPENDS")
+ clsextend.map_packagevars()
+ clsextend.map_variable("PROVIDES")
+ clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
+}
+
+addhandler nativesdk_virtclass_handler
+nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_packagedata[stamp-extra-info] = ""
+
+USE_NLS = "${SDKUSE_NLS}"
diff --git a/import-layers/yocto-poky/meta/classes/nopackages.bbclass b/import-layers/yocto-poky/meta/classes/nopackages.bbclass
new file mode 100644
index 000000000..0c2761bef
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/nopackages.bbclass
@@ -0,0 +1,6 @@
+deltask do_package
+deltask do_package_write_rpm
+deltask do_package_write_ipk
+deltask do_package_write_deb
+deltask do_package_qa
+deltask do_packagedata
diff --git a/import-layers/yocto-poky/meta/classes/npm.bbclass b/import-layers/yocto-poky/meta/classes/npm.bbclass
new file mode 100644
index 000000000..9843e8735
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/npm.bbclass
@@ -0,0 +1,49 @@
+DEPENDS_prepend = "nodejs-native "
+S = "${WORKDIR}/npmpkg"
+
+NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
+
+npm_do_compile() {
+ # changing the home directory to the working directory, the .npmrc will
+ # be created in this directory
+ export HOME=${WORKDIR}
+ npm config set dev false
+ npm set cache ${WORKDIR}/npm_cache
+ # clear cache before every build
+ npm cache clear
+ # Install pkg into ${S} without going to the registry
+ npm --arch=${TARGET_ARCH} --production --no-registry install
+}
+
+npm_do_install() {
+ mkdir -p ${NPM_INSTALLDIR}/
+ cp -a ${S}/* ${NPM_INSTALLDIR}/ --no-preserve=ownership
+}
+
+python populate_packages_prepend () {
+ instdir = d.expand('${D}${libdir}/node_modules/${PN}')
+ extrapackages = oe.package.npm_split_package_dirs(instdir)
+ pkgnames = extrapackages.keys()
+ d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
+ for pkgname in pkgnames:
+ pkgrelpath, pdata = extrapackages[pkgname]
+ pkgpath = '${libdir}/node_modules/${PN}/' + pkgrelpath
+ # package names can't have underscores but npm packages sometimes use them
+ oe_pkg_name = pkgname.replace('_', '-')
+ expanded_pkgname = d.expand(oe_pkg_name)
+ d.setVar('FILES_%s' % expanded_pkgname, pkgpath)
+ if pdata:
+ version = pdata.get('version', None)
+ if version:
+ d.setVar('PKGV_%s' % expanded_pkgname, version.encode("utf8"))
+ description = pdata.get('description', None)
+ if description:
+ d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'").encode("utf8"))
+ d.appendVar('RDEPENDS_%s' % d.getVar('PN', True), ' %s' % ' '.join(pkgnames).replace('_', '-'))
+}
+
+FILES_${PN} += " \
+ ${libdir}/node_modules/${PN} \
+"
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/oelint.bbclass b/import-layers/yocto-poky/meta/classes/oelint.bbclass
new file mode 100644
index 000000000..1b051ca22
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/oelint.bbclass
@@ -0,0 +1,84 @@
+addtask lint before do_build
+do_lint[nostamp] = "1"
+python do_lint() {
+ pkgname = d.getVar("PN", True)
+
+ ##############################
+ # Test that DESCRIPTION exists
+ #
+ description = d.getVar("DESCRIPTION", False)
+ if description[1:10] == '{SUMMARY}':
+ bb.warn("%s: DESCRIPTION is not set" % pkgname)
+
+
+ ##############################
+ # Test that HOMEPAGE exists
+ #
+ homepage = d.getVar("HOMEPAGE", False)
+ if homepage == '':
+ bb.warn("%s: HOMEPAGE is not set" % pkgname)
+ elif not homepage.startswith("http://") and not homepage.startswith("https://"):
+ bb.warn("%s: HOMEPAGE doesn't start with http:// or https://" % pkgname)
+
+
+ ##############################
+ # Test for valid SECTION
+ #
+ section = d.getVar("SECTION", False)
+ if section == '':
+ bb.warn("%s: SECTION is not set" % pkgname)
+ elif not section.islower():
+ bb.warn("%s: SECTION should only use lower case" % pkgname)
+
+
+ ##############################
+ # Check that all patches have Signed-off-by and Upstream-Status
+ #
+ srcuri = d.getVar("SRC_URI", False).split()
+ fpaths = (d.getVar('FILESPATH', True) or '').split(':')
+
+ def findPatch(patchname):
+ for dir in fpaths:
+ patchpath = dir + patchname
+ if os.path.exists(patchpath):
+ return patchpath
+
+ def findKey(path, key):
+ ret = True
+ f = file('%s' % path, mode = 'r')
+ line = f.readline()
+ while line:
+ if line.find(key) != -1:
+ ret = False
+ line = f.readline()
+ f.close()
+ return ret
+
+ def checkPN(pkgname, varname, str):
+ if str.find("{PN}") != -1:
+ bb.warn("%s: should use BPN instead of PN in %s" % (pkgname, varname))
+ if str.find("{P}") != -1:
+ bb.warn("%s: should use BP instead of P in %s" % (pkgname, varname))
+
+ length = len("file://")
+ for item in srcuri:
+ if item.startswith("file://"):
+ item = item[length:]
+ if item.endswith(".patch") or item.endswith(".diff"):
+ path = findPatch(item)
+ if findKey(path, "Signed-off-by"):
+ bb.warn("%s: %s doesn't have Signed-off-by" % (pkgname, item))
+ if findKey(path, "Upstream-Status"):
+ bb.warn("%s: %s doesn't have Upstream-Status" % (pkgname, item))
+
+
+ ##############################
+ # Check for ${PN} or ${P} usage in SRC_URI or S
+ # Should use ${BPN} or ${BP} instead to avoid breaking multilib
+ #
+ for s in srcuri:
+ if not s.startswith("file://"):
+ checkPN(pkgname, 'SRC_URI', s)
+
+ checkPN(pkgname, 'S', d.getVar('S', False))
+}
diff --git a/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass b/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
new file mode 100644
index 000000000..12b42675b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
@@ -0,0 +1,13 @@
+PREMIRRORS() {
+cvs://.*/.* ${SOURCE_MIRROR_URL}
+svn://.*/.* ${SOURCE_MIRROR_URL}
+git://.*/.* ${SOURCE_MIRROR_URL}
+gitsm://.*/.* ${SOURCE_MIRROR_URL}
+hg://.*/.* ${SOURCE_MIRROR_URL}
+bzr://.*/.* ${SOURCE_MIRROR_URL}
+p4://.*/.* ${SOURCE_MIRROR_URL}
+osc://.*/.* ${SOURCE_MIRROR_URL}
+https?$://.*/.* ${SOURCE_MIRROR_URL}
+ftp://.*/.* ${SOURCE_MIRROR_URL}
+npm://.*/.* ${SOURCE_MIRROR_URL}
+}
diff --git a/import-layers/yocto-poky/meta/classes/package.bbclass b/import-layers/yocto-poky/meta/classes/package.bbclass
new file mode 100644
index 000000000..76b9f8649
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/package.bbclass
@@ -0,0 +1,2131 @@
+#
+# Packaging process
+#
+# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
+# Taking D and splitting it up into the packages listed in PACKAGES, placing the
+# resulting output in PKGDEST.
+#
+# There are the following default steps but PACKAGEFUNCS can be extended:
+#
+# a) package_get_auto_pr - get PRAUTO from remote PR service
+#
+# b) perform_packagecopy - Copy D into PKGD
+#
+# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
+#
+# d) split_and_strip_files - split the files into runtime and debug and strip them.
+# Debug files include debug info split, and associated sources that end up in -dbg packages
+#
+# e) fixup_perms - Fix up permissions in the package before we split it.
+#
+# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
+# Also triggers the binary stripping code to put files in -dbg packages.
+#
+# g) package_do_filedeps - Collect perfile run-time dependency metadata
+# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
+# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
+#
+# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
+# depenedencies found. Also stores the package name so anyone else using this library
+# knows which package to depend on.
+#
+# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
+#
+# j) read_shlibdeps - Reads the stored shlibs information into the metadata
+#
+# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
+#
+# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
+# packaging steps
+
+inherit packagedata
+inherit chrpath
+
+# Need the package_qa_handle_error() in insane.bbclass
+inherit insane
+
+PKGD = "${WORKDIR}/package"
+PKGDEST = "${WORKDIR}/packages-split"
+
+LOCALE_SECTION ?= ''
+
+ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
+
+# rpm is used for the per-file dependency identification
+PACKAGE_DEPENDS += "rpm-native"
+
+def legitimize_package_name(s):
+ """
+ Make sure package names are legitimate strings
+ """
+ import re
+
+ def fixutf(m):
+ cp = m.group(1)
+ if cp:
+ return ('\u%s' % cp).decode('unicode_escape').encode('utf-8')
+
+ # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
+ s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
+
+ # Remaining package name validity fixes
+ return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
+
+def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
+ """
+ Used in .bb files to split up dynamically generated subpackages of a
+ given package, usually plugins or modules.
+
+ Arguments:
+ root -- the path in which to search
+ file_regex -- regular expression to match searched files. Use
+ parentheses () to mark the part of this expression
+ that should be used to derive the module name (to be
+ substituted where %s is used in other function
+ arguments as noted below)
+ output_pattern -- pattern to use for the package names. Must include %s.
+ description -- description to set for each package. Must include %s.
+ postinst -- postinstall script to use for all packages (as a
+ string)
+ recursive -- True to perform a recursive search - default False
+ hook -- a hook function to be called for every match. The
+ function will be called with the following arguments
+ (in the order listed):
+ f: full path to the file/directory match
+ pkg: the package name
+ file_regex: as above
+ output_pattern: as above
+ modulename: the module name derived using file_regex
+ extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
+ all packages. The default value of None causes a
+ dependency on the main package (${PN}) - if you do
+ not want this, pass '' for this parameter.
+ aux_files_pattern -- extra item(s) to be added to FILES for each
+ package. Can be a single string item or a list of
+ strings for multiple items. Must include %s.
+ postrm -- postrm script to use for all packages (as a string)
+ allow_dirs -- True allow directories to be matched - default False
+ prepend -- if True, prepend created packages to PACKAGES instead
+ of the default False which appends them
+ match_path -- match file_regex on the whole relative path to the
+ root rather than just the file name
+ aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
+ each package, using the actual derived module name
+ rather than converting it to something legal for a
+ package name. Can be a single string item or a list
+ of strings for multiple items. Must include %s.
+ allow_links -- True to allow symlinks to be matched - default False
+ summary -- Summary to set for each package. Must include %s;
+ defaults to description if not set.
+
+ """
+
+ dvar = d.getVar('PKGD', True)
+ root = d.expand(root)
+ output_pattern = d.expand(output_pattern)
+ extra_depends = d.expand(extra_depends)
+
+ # If the root directory doesn't exist, don't error out later but silently do
+ # no splitting.
+ if not os.path.exists(dvar + root):
+ return []
+
+ ml = d.getVar("MLPREFIX", True)
+ if ml:
+ if not output_pattern.startswith(ml):
+ output_pattern = ml + output_pattern
+
+ newdeps = []
+ for dep in (extra_depends or "").split():
+ if dep.startswith(ml):
+ newdeps.append(dep)
+ else:
+ newdeps.append(ml + dep)
+ if newdeps:
+ extra_depends = " ".join(newdeps)
+
+
+ packages = d.getVar('PACKAGES', True).split()
+ split_packages = []
+
+ if postinst:
+ postinst = '#!/bin/sh\n' + postinst + '\n'
+ if postrm:
+ postrm = '#!/bin/sh\n' + postrm + '\n'
+ if not recursive:
+ objs = os.listdir(dvar + root)
+ else:
+ objs = []
+ for walkroot, dirs, files in os.walk(dvar + root):
+ for file in files:
+ relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
+ if relpath:
+ objs.append(relpath)
+
+ if extra_depends == None:
+ extra_depends = d.getVar("PN", True)
+
+ if not summary:
+ summary = description
+
+ for o in sorted(objs):
+ import re, stat
+ if match_path:
+ m = re.match(file_regex, o)
+ else:
+ m = re.match(file_regex, os.path.basename(o))
+
+ if not m:
+ continue
+ f = os.path.join(dvar + root, o)
+ mode = os.lstat(f).st_mode
+ if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
+ continue
+ on = legitimize_package_name(m.group(1))
+ pkg = output_pattern % on
+ split_packages.append(pkg)
+ if not pkg in packages:
+ if prepend:
+ packages = [pkg] + packages
+ else:
+ packages.append(pkg)
+ oldfiles = d.getVar('FILES_' + pkg, True)
+ newfile = os.path.join(root, o)
+ # These names will be passed through glob() so if the filename actually
+ # contains * or ? (rare, but possible) we need to handle that specially
+ newfile = newfile.replace('*', '[*]')
+ newfile = newfile.replace('?', '[?]')
+ if not oldfiles:
+ the_files = [newfile]
+ if aux_files_pattern:
+ if type(aux_files_pattern) is list:
+ for fp in aux_files_pattern:
+ the_files.append(fp % on)
+ else:
+ the_files.append(aux_files_pattern % on)
+ if aux_files_pattern_verbatim:
+ if type(aux_files_pattern_verbatim) is list:
+ for fp in aux_files_pattern_verbatim:
+ the_files.append(fp % m.group(1))
+ else:
+ the_files.append(aux_files_pattern_verbatim % m.group(1))
+ d.setVar('FILES_' + pkg, " ".join(the_files))
+ else:
+ d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
+ if extra_depends != '':
+ d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
+ if not d.getVar('DESCRIPTION_' + pkg, True):
+ d.setVar('DESCRIPTION_' + pkg, description % on)
+ if not d.getVar('SUMMARY_' + pkg, True):
+ d.setVar('SUMMARY_' + pkg, summary % on)
+ if postinst:
+ d.setVar('pkg_postinst_' + pkg, postinst)
+ if postrm:
+ d.setVar('pkg_postrm_' + pkg, postrm)
+ if callable(hook):
+ hook(f, pkg, file_regex, output_pattern, m.group(1))
+
+ d.setVar('PACKAGES', ' '.join(packages))
+ return split_packages
+
+PACKAGE_DEPENDS += "file-native"
+
+python () {
+ if d.getVar('PACKAGES', True) != '':
+ deps = ""
+ for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
+ deps += " %s:do_populate_sysroot" % dep
+ d.appendVarFlag('do_package', 'depends', deps)
+
+ # shlibs requires any DEPENDS to have already packaged for the *.list files
+ d.appendVarFlag('do_package', 'deptask', " do_packagedata")
+}
+
+# Get a list of files from file vars by searching files under current working directory
+# The list contains symlinks, directories and normal files.
+def files_from_filevars(filevars):
+ import os,glob
+ cpath = oe.cachedpath.CachedPath()
+ files = []
+ for f in filevars:
+ if os.path.isabs(f):
+ f = '.' + f
+ if not f.startswith("./"):
+ f = './' + f
+ globbed = glob.glob(f)
+ if globbed:
+ if [ f ] != globbed:
+ files += globbed
+ continue
+ files.append(f)
+
+ for f in files:
+ if not cpath.islink(f):
+ if cpath.isdir(f):
+ newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
+ if newfiles:
+ files += newfiles
+
+ return files
+
+# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
+def get_conffiles(pkg, d):
+ pkgdest = d.getVar('PKGDEST', True)
+ root = os.path.join(pkgdest, pkg)
+ cwd = os.getcwd()
+ os.chdir(root)
+
+ conffiles = d.getVar('CONFFILES_%s' % pkg, True);
+ if conffiles == None:
+ conffiles = d.getVar('CONFFILES', True)
+ if conffiles == None:
+ conffiles = ""
+ conffiles = conffiles.split()
+ conf_orig_list = files_from_filevars(conffiles)
+
+ # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
+ conf_list = []
+ for f in conf_orig_list:
+ if os.path.isdir(f):
+ continue
+ if os.path.islink(f):
+ continue
+ if not os.path.exists(f):
+ continue
+ conf_list.append(f)
+
+ # Remove the leading './'
+ for i in range(0, len(conf_list)):
+ conf_list[i] = conf_list[i][1:]
+
+ os.chdir(cwd)
+ return conf_list
+
+def checkbuildpath(file, d):
+ tmpdir = d.getVar('TMPDIR', True)
+ with open(file) as f:
+ file_content = f.read()
+ if tmpdir in file_content:
+ return True
+
+ return False
+
+def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
+ # Function to split a single file into two components, one is the stripped
+ # target system binary, the other contains any debugging information. The
+ # two files are linked to reference each other.
+ #
+ # sourcefile is also generated containing a list of debugsources
+
+ import stat
+
+ dvar = d.getVar('PKGD', True)
+ objcopy = d.getVar("OBJCOPY", True)
+ debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
+
+ # We ignore kernel modules, we don't generate debug info files.
+ if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
+ return 1
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if debugsrcdir:
+ cmd = "'%s' -i -l '%s' '%s'" % (debugedit, sourcefile, file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+
+ # Set the debuglink to have the view of the file path on the target
+ cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return 0
+
+def copydebugsources(debugsrcdir, d):
+ # The debug src information written out to sourcefile is further procecessed
+ # and copied to the destination here.
+
+ import stat
+
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ if debugsrcdir and os.path.isfile(sourcefile):
+ dvar = d.getVar('PKGD', True)
+ strip = d.getVar("STRIP", True)
+ objcopy = d.getVar("OBJCOPY", True)
+ debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
+ workdir = d.getVar("WORKDIR", True)
+ workparentdir = os.path.dirname(os.path.dirname(workdir))
+ workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
+
+ # If build path exists in sourcefile, it means toolchain did not use
+ # -fdebug-prefix-map to compile
+ if checkbuildpath(sourcefile, d):
+ localsrc_prefix = workparentdir + "/"
+ else:
+ localsrc_prefix = "/usr/src/debug/"
+
+ nosuchdir = []
+ basepath = dvar
+ for p in debugsrcdir.split("/"):
+ basepath = basepath + "/" + p
+ if not cpath.exists(basepath):
+ nosuchdir.append(basepath)
+ bb.utils.mkdirhier(basepath)
+ cpath.updatecache(basepath)
+
+ processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
+ # We need to ignore files that are not actually ours
+ # we do this by only paying attention to items from this package
+ processdebugsrc += "fgrep -zw '%s' | "
+ # Remove prefix in the source paths
+ processdebugsrc += "sed 's#%s##g' | "
+ processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
+
+ cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ # Can "fail" if internal headers/transient sources are attempted
+ #if retval:
+ # bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd))
+
+ # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
+ # Work around this by manually finding and copying any symbolic links that made it through.
+ cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd))
+
+ # The copy by cpio may have resulted in some empty directories! Remove these
+ cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+
+ # Also remove debugsrcdir if its empty
+ for p in nosuchdir[::-1]:
+ if os.path.exists(p) and not os.listdir(p):
+ os.rmdir(p)
+
+#
+# Package data handling routines
+#
+
+def get_package_mapping (pkg, basepkg, d):
+ import oe.packagedata
+
+ data = oe.packagedata.read_subpkgdata(pkg, d)
+ key = "PKG_%s" % pkg
+
+ if key in data:
+ # Have to avoid undoing the write_extra_pkgs(global_variants...)
+ if bb.data.inherits_class('allarch', d) and data[key] == basepkg:
+ return pkg
+ return data[key]
+
+ return pkg
+
+def get_package_additional_metadata (pkg_type, d):
+ base_key = "PACKAGE_ADD_METADATA"
+ for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
+ if d.getVar(key, False) is None:
+ continue
+ d.setVarFlag(key, "type", "list")
+ if d.getVarFlag(key, "separator", True) is None:
+ d.setVarFlag(key, "separator", "\\n")
+ metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
+ return "\n".join(metadata_fields).strip()
+
+def runtime_mapping_rename (varname, pkg, d):
+ #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
+
+ if bb.data.inherits_class('packagegroup', d):
+ return
+
+ new_depends = {}
+ deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
+ for depend in deps:
+ new_depend = get_package_mapping(depend, pkg, d)
+ new_depends[new_depend] = deps[depend]
+
+ d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
+
+ #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
+
+#
+# Package functions suitable for inclusion in PACKAGEFUNCS
+#
+
+python package_get_auto_pr() {
+ import oe.prservice
+ import re
+
+ # Support per recipe PRSERV_HOST
+ pn = d.getVar('PN', True)
+ host = d.getVar("PRSERV_HOST_" + pn, True)
+ if not (host is None):
+ d.setVar("PRSERV_HOST", host)
+
+ pkgv = d.getVar("PKGV", True)
+
+ # PR Server not active, handle AUTOINC
+ if not d.getVar('PRSERV_HOST', True):
+ if 'AUTOINC' in pkgv:
+ d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
+ return
+
+ auto_pr = None
+ pv = d.getVar("PV", True)
+ version = d.getVar("PRAUTOINX", True)
+ pkgarch = d.getVar("PACKAGE_ARCH", True)
+ checksum = d.getVar("BB_TASKHASH", True)
+
+ if d.getVar('PRSERV_LOCKDOWN', True):
+ auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
+ if auto_pr is None:
+ bb.fatal("Can NOT get PRAUTO from lockdown exported file")
+ d.setVar('PRAUTO',str(auto_pr))
+ return
+
+ try:
+ conn = d.getVar("__PRSERV_CONN", True)
+ if conn is None:
+ conn = oe.prservice.prserv_make_conn(d)
+ if conn is not None:
+ if "AUTOINC" in pkgv:
+ srcpv = bb.fetch2.get_srcrev(d)
+ base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
+ value = conn.getPR(base_ver, pkgarch, srcpv)
+ d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
+
+ auto_pr = conn.getPR(version, pkgarch, checksum)
+ except Exception as e:
+ bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
+ if auto_pr is None:
+ bb.fatal("Can NOT get PRAUTO from remote PR service")
+ d.setVar('PRAUTO',str(auto_pr))
+}
+
+LOCALEBASEPN ??= "${PN}"
+
+python package_do_split_locales() {
+ if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
+ bb.debug(1, "package requested not splitting locales")
+ return
+
+ packages = (d.getVar('PACKAGES', True) or "").split()
+
+ datadir = d.getVar('datadir', True)
+ if not datadir:
+ bb.note("datadir not defined")
+ return
+
+ dvar = d.getVar('PKGD', True)
+ pn = d.getVar('LOCALEBASEPN', True)
+
+ if pn + '-locale' in packages:
+ packages.remove(pn + '-locale')
+
+ localedir = os.path.join(dvar + datadir, 'locale')
+
+ if not cpath.isdir(localedir):
+ bb.debug(1, "No locale files in this package")
+ return
+
+ locales = os.listdir(localedir)
+
+ summary = d.getVar('SUMMARY', True) or pn
+ description = d.getVar('DESCRIPTION', True) or ""
+ locale_section = d.getVar('LOCALE_SECTION', True)
+ mlprefix = d.getVar('MLPREFIX', True) or ""
+ for l in sorted(locales):
+ ln = legitimize_package_name(l)
+ pkg = pn + '-locale-' + ln
+ packages.append(pkg)
+ d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
+ d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ if locale_section:
+ d.setVar('SECTION_' + pkg, locale_section)
+
+ d.setVar('PACKAGES', ' '.join(packages))
+
+ # Disabled by RP 18/06/07
+ # Wildcards aren't supported in debian
+ # They break with ipkg since glibc-locale* will mean that
+ # glibc-localedata-translit* won't install as a dependency
+ # for some other package which breaks meta-toolchain
+ # Probably breaks since virtual-locale- isn't provided anywhere
+ #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split()
+ #rdep.append('%s-locale*' % pn)
+ #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
+}
+
+python perform_packagecopy () {
+ dest = d.getVar('D', True)
+ dvar = d.getVar('PKGD', True)
+
+ # Start by package population by taking a copy of the installed
+ # files to operate on
+ # Preserve sparse files and hard links
+ cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ if retval:
+ bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+
+ # replace RPATHs for the nativesdk binaries, to make them relocatable
+ if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
+ rpath_replace (dvar, d)
+}
+perform_packagecopy[cleandirs] = "${PKGD}"
+perform_packagecopy[dirs] = "${PKGD}"
+
+# We generate a master list of directories to process, we start by
+# seeding this list with reasonable defaults, then load from
+# the fs-perms.txt files
+python fixup_perms () {
+ import pwd, grp
+
+ # init using a string with the same format as a line as documented in
+ # the fs-perms.txt file
+ # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
+ # <path> link <link target>
+ #
+ # __str__ can be used to print out an entry in the input format
+ #
+ # if fs_perms_entry.path is None:
+ # an error occured
+ # if fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.link = target of link
+ # if not fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.mode = expected dir mode or None
+ # fs_perms_entry.uid = expected uid or -1
+ # fs_perms_entry.gid = expected gid or -1
+ # fs_perms_entry.walk = 'true' or something else
+ # fs_perms_entry.fmode = expected file mode or None
+ # fs_perms_entry.fuid = expected file uid or -1
+ # fs_perms_entry_fgid = expected file gid or -1
+ class fs_perms_entry():
+ def __init__(self, line):
+ lsplit = line.split()
+ if len(lsplit) == 3 and lsplit[1].lower() == "link":
+ self._setlink(lsplit[0], lsplit[2])
+ elif len(lsplit) == 8:
+ self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
+ else:
+ msg = "Fixup Perms: invalid config line %s" % line
+ package_qa_handle_error("perm-config", msg, d)
+ self.path = None
+ self.link = None
+
+ def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
+ self.path = os.path.normpath(path)
+ self.link = None
+ self.mode = self._procmode(mode)
+ self.uid = self._procuid(uid)
+ self.gid = self._procgid(gid)
+ self.walk = walk.lower()
+ self.fmode = self._procmode(fmode)
+ self.fuid = self._procuid(fuid)
+ self.fgid = self._procgid(fgid)
+
+ def _setlink(self, path, link):
+ self.path = os.path.normpath(path)
+ self.link = link
+
+ def _procmode(self, mode):
+ if not mode or (mode and mode == "-"):
+ return None
+ else:
+ return int(mode,8)
+
+ # Note uid/gid -1 has special significance in os.lchown
+ def _procuid(self, uid):
+ if uid is None or uid == "-":
+ return -1
+ elif uid.isdigit():
+ return int(uid)
+ else:
+ return pwd.getpwnam(uid).pw_uid
+
+ def _procgid(self, gid):
+ if gid is None or gid == "-":
+ return -1
+ elif gid.isdigit():
+ return int(gid)
+ else:
+ return grp.getgrnam(gid).gr_gid
+
+ # Use for debugging the entries
+ def __str__(self):
+ if self.link:
+ return "%s link %s" % (self.path, self.link)
+ else:
+ mode = "-"
+ if self.mode:
+ mode = "0%o" % self.mode
+ fmode = "-"
+ if self.fmode:
+ fmode = "0%o" % self.fmode
+ uid = self._mapugid(self.uid)
+ gid = self._mapugid(self.gid)
+ fuid = self._mapugid(self.fuid)
+ fgid = self._mapugid(self.fgid)
+ return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
+
+ def _mapugid(self, id):
+ if id is None or id == -1:
+ return "-"
+ else:
+ return "%d" % id
+
+ # Fix the permission, owner and group of path
+ def fix_perms(path, mode, uid, gid, dir):
+ if mode and not os.path.islink(path):
+ #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
+ os.chmod(path, mode)
+ # -1 is a special value that means don't change the uid/gid
+ # if they are BOTH -1, don't bother to lchown
+ if not (uid == -1 and gid == -1):
+ #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
+ os.lchown(path, uid, gid)
+
+ # Return a list of configuration files based on either the default
+ # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
+ # paths are resolved via BBPATH
+ def get_fs_perms_list(d):
+ str = ""
+ bbpath = d.getVar('BBPATH', True)
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
+ if not fs_perms_tables:
+ fs_perms_tables = 'files/fs-perms.txt'
+ for conf_file in fs_perms_tables.split():
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ return str
+
+
+
+ dvar = d.getVar('PKGD', True)
+
+ fs_perms_table = {}
+ fs_link_table = {}
+
+ # By default all of the standard directories specified in
+ # bitbake.conf will get 0755 root:root.
+ target_path_vars = [ 'base_prefix',
+ 'prefix',
+ 'exec_prefix',
+ 'base_bindir',
+ 'base_sbindir',
+ 'base_libdir',
+ 'datadir',
+ 'sysconfdir',
+ 'servicedir',
+ 'sharedstatedir',
+ 'localstatedir',
+ 'infodir',
+ 'mandir',
+ 'docdir',
+ 'bindir',
+ 'sbindir',
+ 'libexecdir',
+ 'libdir',
+ 'includedir',
+ 'oldincludedir' ]
+
+ for path in target_path_vars:
+ dir = d.getVar(path, True) or ""
+ if dir == "":
+ continue
+ fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
+
+ # Now we actually load from the configuration files
+ for conf in get_fs_perms_list(d).split():
+ if os.path.exists(conf):
+ f = open(conf)
+ for line in f:
+ if line.startswith('#'):
+ continue
+ lsplit = line.split()
+ if len(lsplit) == 0:
+ continue
+ if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
+ msg = "Fixup perms: %s invalid line: %s" % (conf, line)
+ package_qa_handle_error("perm-line", msg, d)
+ continue
+ entry = fs_perms_entry(d.expand(line))
+ if entry and entry.path:
+ if entry.link:
+ fs_link_table[entry.path] = entry
+ if entry.path in fs_perms_table:
+ fs_perms_table.pop(entry.path)
+ else:
+ fs_perms_table[entry.path] = entry
+ if entry.path in fs_link_table:
+ fs_link_table.pop(entry.path)
+ f.close()
+
+ # Debug -- list out in-memory table
+ #for dir in fs_perms_table:
+ # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
+ #for link in fs_link_table:
+ # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
+
+ # We process links first, so we can go back and fixup directory ownership
+ # for any newly created directories
+ # Process in sorted order so /run gets created before /run/lock, etc.
+ for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
+ link = entry.link
+ dir = entry.path
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
+ continue
+
+ if link[0] == "/":
+ target = dvar + link
+ ptarget = link
+ else:
+ target = os.path.join(os.path.dirname(origin), link)
+ ptarget = os.path.join(os.path.dirname(dir), link)
+ if os.path.exists(target):
+ msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
+ package_qa_handle_error("perm-link", msg, d)
+ continue
+
+ # Create path to move directory to, move it, and then setup the symlink
+ bb.utils.mkdirhier(os.path.dirname(target))
+ #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
+ os.rename(origin, target)
+ #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
+ os.symlink(link, origin)
+
+ for dir in fs_perms_table:
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin)):
+ continue
+
+ fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+
+ if fs_perms_table[dir].walk == 'true':
+ for root, dirs, files in os.walk(origin):
+ for dr in dirs:
+ each_dir = os.path.join(root, dr)
+ fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+ for f in files:
+ each_file = os.path.join(root, f)
+ fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
+}
+
+python split_and_strip_files () {
+ import stat, errno
+
+ dvar = d.getVar('PKGD', True)
+ pn = d.getVar('PN', True)
+
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debugappend = ".debug"
+ debugdir = ""
+ debuglibdir = "/usr/lib/debug"
+ debugsrcdir = "/usr/src/debug"
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debugappend = ""
+ debugdir = "/.debug"
+ debuglibdir = ""
+ debugsrcdir = ""
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debugappend = ""
+ debugdir = "/.debug"
+ debuglibdir = ""
+ debugsrcdir = "/usr/src/debug"
+
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ os.chdir(dvar)
+
+ # Return type (bits):
+ # 0 - not elf
+ # 1 - ELF
+ # 2 - stripped
+ # 4 - executable
+ # 8 - shared library
+ # 16 - kernel module
+ def isELF(path):
+ type = 0
+ ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
+
+ if ret:
+ msg = "split_and_strip_files: 'file %s' failed" % path
+ package_qa_handle_error("split-strip", msg, d)
+ return type
+
+ # Not stripped
+ if "ELF" in result:
+ type |= 1
+ if "not stripped" not in result:
+ type |= 2
+ if "executable" in result:
+ type |= 4
+ if "shared" in result:
+ type |= 8
+ return type
+
+
+ #
+ # First lets figure out all of the files we may have to process ... do this only once!
+ #
+ elffiles = {}
+ symlinks = {}
+ kernmods = []
+ inodes = {}
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
+ if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
+ for root, dirs, files in cpath.walk(dvar):
+ for f in files:
+ file = os.path.join(root, f)
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ kernmods.append(file)
+ continue
+
+ # Skip debug files
+ if debugappend and file.endswith(debugappend):
+ continue
+ if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
+ continue
+
+ try:
+ ltarget = cpath.realpath(file, dvar, False)
+ s = cpath.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an excutable
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if cpath.islink(file):
+ target = os.readlink(file)
+ if isELF(ltarget):
+ #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
+ symlinks[file] = target
+ continue
+
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ elf_file = isELF(file)
+ if elf_file & 1:
+ if elf_file & 2:
+ if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+ else:
+ msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
+ package_qa_handle_error("already-stripped", msg, d)
+ continue
+
+ # At this point we have an unstripped elf file. We need to:
+ # a) Make sure any file we strip is not hardlinked to anything else outside this tree
+ # b) Only strip any hardlinked file once (no races)
+ # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+
+ # Use a reference of device ID and inode number to indentify files
+ file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+ if file_reference in inodes:
+ os.unlink(file)
+ os.link(inodes[file_reference][0], file)
+ inodes[file_reference].append(file)
+ else:
+ inodes[file_reference] = [file]
+ # break hardlink
+ bb.utils.copyfile(file, file)
+ elffiles[file] = elf_file
+ # Modified the file so clear the cache
+ cpath.updatecache(file)
+
+ #
+ # First lets process debug splitting
+ #
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
+ for file in elffiles:
+ src = file[len(dvar):]
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ fpath = dvar + dest
+
+ # Split the file...
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Split %s -> %s" % (file, fpath))
+ # Only store off the hard link reference if we successfully split!
+ splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
+
+ # Hardlink our debug symbols to the other hardlink copies
+ for ref in inodes:
+ if len(inodes[ref]) == 1:
+ continue
+ for file in inodes[ref][1:]:
+ src = file[len(dvar):]
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ fpath = dvar + dest
+ target = inodes[ref][0][len(dvar):]
+ ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Link %s -> %s" % (fpath, ftarget))
+ os.link(ftarget, fpath)
+
+ # Create symlinks for all cases we were able to split symbols
+ for file in symlinks:
+ src = file[len(dvar):]
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ fpath = dvar + dest
+ # Skip it if the target doesn't exist
+ try:
+ s = os.stat(fpath)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ continue
+
+ ltarget = symlinks[file]
+ lpath = os.path.dirname(ltarget)
+ lbase = os.path.basename(ltarget)
+ ftarget = ""
+ if lpath and lpath != ".":
+ ftarget += lpath + debugdir + "/"
+ ftarget += lbase + debugappend
+ if lpath.startswith(".."):
+ ftarget = os.path.join("..", ftarget)
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Symlink %s -> %s" % (fpath, ftarget))
+ os.symlink(ftarget, fpath)
+
+ # Process the debugsrcdir if requested...
+ # This copies and places the referenced sources for later debugging...
+ copydebugsources(debugsrcdir, d)
+ #
+ # End of debug splitting
+ #
+
+ #
+ # Now lets go back over things and strip them
+ #
+ if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
+ strip = d.getVar("STRIP", True)
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ #bb.note("Strip %s" % file)
+ sfiles.append((file, elf_file, strip))
+ for f in kernmods:
+ sfiles.append((f, 16, strip))
+
+ oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
+
+ #
+ # End of strip
+ #
+}
+
+python populate_packages () {
+ import glob, re
+
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('DEPLOY_DIR', True)
+ dvar = d.getVar('PKGD', True)
+ packages = d.getVar('PACKAGES', True)
+ pn = d.getVar('PN', True)
+
+ bb.utils.mkdirhier(outdir)
+ os.chdir(dvar)
+
+ autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG", True) or False)
+
+ # Sanity check PACKAGES for duplicates
+ # Sanity should be moved to sanity.bbclass once we have the infrastucture
+ package_list = []
+
+ for pkg in packages.split():
+ if pkg in package_list:
+ msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
+ package_qa_handle_error("packages-list", msg, d)
+ elif autodebug and pkg.endswith("-dbg"):
+ package_list.insert(0, pkg)
+ else:
+ package_list.append(pkg)
+ d.setVar('PACKAGES', ' '.join(package_list))
+ pkgdest = d.getVar('PKGDEST', True)
+
+ seen = []
+
+ # os.mkdir masks the permissions with umask so we have to unset it first
+ oldumask = os.umask(0)
+
+ debug = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = "." + os.path.join(dir, f)
+ if "/.debug/" in path or path.endswith("/.debug"):
+ debug.append(path)
+
+ for pkg in package_list:
+ root = os.path.join(pkgdest, pkg)
+ bb.utils.mkdirhier(root)
+
+ filesvar = d.getVar('FILES_%s' % pkg, True) or ""
+ if "//" in filesvar:
+ msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
+ package_qa_handle_error("files-invalid", msg, d)
+ filesvar.replace("//", "/")
+
+ origfiles = filesvar.split()
+ files = files_from_filevars(origfiles)
+
+ if autodebug and pkg.endswith("-dbg"):
+ files.extend(debug)
+
+ for file in files:
+ if (not cpath.islink(file)) and (not cpath.exists(file)):
+ continue
+ if file in seen:
+ continue
+ seen.append(file)
+
+ def mkdir(src, dest, p):
+ src = os.path.join(src, p)
+ dest = os.path.join(dest, p)
+ fstat = cpath.stat(src)
+ os.mkdir(dest, fstat.st_mode)
+ os.chown(dest, fstat.st_uid, fstat.st_gid)
+ if p not in seen:
+ seen.append(p)
+ cpath.updatecache(dest)
+
+ def mkdir_recurse(src, dest, paths):
+ if cpath.exists(dest + '/' + paths):
+ return
+ while paths.startswith("./"):
+ paths = paths[2:]
+ p = "."
+ for c in paths.split("/"):
+ p = os.path.join(p, c)
+ if not cpath.exists(os.path.join(dest, p)):
+ mkdir(src, dest, p)
+
+ if cpath.isdir(file) and not cpath.islink(file):
+ mkdir_recurse(dvar, root, file)
+ continue
+
+ mkdir_recurse(dvar, root, os.path.dirname(file))
+ fpath = os.path.join(root,file)
+ if not cpath.islink(file):
+ os.link(file, fpath)
+ fstat = cpath.stat(file)
+ os.chmod(fpath, fstat.st_mode)
+ os.chown(fpath, fstat.st_uid, fstat.st_gid)
+ continue
+ ret = bb.utils.copyfile(file, fpath)
+ if ret is False or ret == 0:
+ raise bb.build.FuncFailed("File population failed")
+
+ os.umask(oldumask)
+ os.chdir(workdir)
+
+ # Handle LICENSE_EXCLUSION
+ package_list = []
+ for pkg in packages.split():
+ if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
+ msg = "%s has an incompatible license. Excluding from packaging." % pkg
+ package_qa_handle_error("incompatible-license", msg, d)
+ else:
+ package_list.append(pkg)
+ d.setVar('PACKAGES', ' '.join(package_list))
+
+ unshipped = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = os.path.join(dir, f)
+ if ('.' + path) not in seen:
+ unshipped.append(path)
+
+ if unshipped != []:
+ msg = pn + ": Files/directories were installed but not shipped in any package:"
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
+ else:
+ for f in unshipped:
+ msg = msg + "\n " + f
+ msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
+ msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
+ package_qa_handle_error("installed-vs-shipped", msg, d)
+}
+populate_packages[dirs] = "${D}"
+
+python package_fixsymlinks () {
+ import errno
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar("PACKAGES", False).split()
+
+ dangling_links = {}
+ pkg_files = {}
+ for pkg in packages:
+ dangling_links[pkg] = []
+ pkg_files[pkg] = []
+ inst_root = os.path.join(pkgdest, pkg)
+ for path in pkgfiles[pkg]:
+ rpath = path[len(inst_root):]
+ pkg_files[pkg].append(rpath)
+ rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
+ if not cpath.lexists(rtarget):
+ dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
+
+ newrdepends = {}
+ for pkg in dangling_links:
+ for l in dangling_links[pkg]:
+ found = False
+ bb.debug(1, "%s contains dangling link %s" % (pkg, l))
+ for p in packages:
+ if l in pkg_files[p]:
+ found = True
+ bb.debug(1, "target found in %s" % p)
+ if p == pkg:
+ break
+ if pkg not in newrdepends:
+ newrdepends[pkg] = []
+ newrdepends[pkg].append(p)
+ break
+ if found == False:
+ bb.note("%s contains dangling symlink to %s" % (pkg, l))
+
+ for pkg in newrdepends:
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ for p in newrdepends[pkg]:
+ if p not in rdepends:
+ rdepends[p] = []
+ d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+}
+
+
+python package_package_name_hook() {
+ """
+ A package_name_hook function can be used to rewrite the package names by
+ changing PKG. For an example, see debian.bbclass.
+ """
+ pass
+}
+
+EXPORT_FUNCTIONS package_name_hook
+
+
+PKGDESTWORK = "${WORKDIR}/pkgdata"
+
+python emit_pkgdata() {
+ from glob import glob
+ import json
+
+ def write_if_exists(f, pkg, var):
+ def encode(str):
+ import codecs
+ c = codecs.getencoder("string_escape")
+ return c(str)[0]
+
+ val = d.getVar('%s_%s' % (var, pkg), True)
+ if val:
+ f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
+ return val
+ val = d.getVar('%s' % (var), True)
+ if val:
+ f.write('%s: %s\n' % (var, encode(val)))
+ return val
+
+ def write_extra_pkgs(variants, pn, packages, pkgdatadir):
+ for variant in variants:
+ with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
+ fd.write("PACKAGES: %s\n" % ' '.join(
+ map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
+
+ def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
+ for variant in variants:
+ for pkg in packages.split():
+ ml_pkg = "%s-%s" % (variant, pkg)
+ subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
+ with open(subdata_file, 'w') as fd:
+ fd.write("PKG_%s: %s" % (ml_pkg, pkg))
+
+ packages = d.getVar('PACKAGES', True)
+ pkgdest = d.getVar('PKGDEST', True)
+ pkgdatadir = d.getVar('PKGDESTWORK', True)
+
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
+
+ data_file = pkgdatadir + d.expand("/${PN}" )
+ f = open(data_file, 'w')
+ f.write("PACKAGES: %s\n" % packages)
+ f.close()
+
+ pn = d.getVar('PN', True)
+ global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split()
+ variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_pkgs(variants, pn, packages, pkgdatadir)
+
+ if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
+ write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
+
+ workdir = d.getVar('WORKDIR', True)
+
+ for pkg in packages.split():
+ pkgval = d.getVar('PKG_%s' % pkg, True)
+ if pkgval is None:
+ pkgval = pkg
+ d.setVar('PKG_%s' % pkg, pkg)
+
+ pkgdestpkg = os.path.join(pkgdest, pkg)
+ files = {}
+ total_size = 0
+ for f in pkgfiles[pkg]:
+ relpth = os.path.relpath(f, pkgdestpkg)
+ fstat = os.lstat(f)
+ total_size += fstat.st_size
+ files[os.sep + relpth] = fstat.st_size
+ d.setVar('FILES_INFO', json.dumps(files))
+
+ subdata_file = pkgdatadir + "/runtime/%s" % pkg
+ sf = open(subdata_file, 'w')
+ write_if_exists(sf, pkg, 'PN')
+ write_if_exists(sf, pkg, 'PE')
+ write_if_exists(sf, pkg, 'PV')
+ write_if_exists(sf, pkg, 'PR')
+ write_if_exists(sf, pkg, 'PKGE')
+ write_if_exists(sf, pkg, 'PKGV')
+ write_if_exists(sf, pkg, 'PKGR')
+ write_if_exists(sf, pkg, 'LICENSE')
+ write_if_exists(sf, pkg, 'DESCRIPTION')
+ write_if_exists(sf, pkg, 'SUMMARY')
+ write_if_exists(sf, pkg, 'RDEPENDS')
+ rprov = write_if_exists(sf, pkg, 'RPROVIDES')
+ write_if_exists(sf, pkg, 'RRECOMMENDS')
+ write_if_exists(sf, pkg, 'RSUGGESTS')
+ write_if_exists(sf, pkg, 'RREPLACES')
+ write_if_exists(sf, pkg, 'RCONFLICTS')
+ write_if_exists(sf, pkg, 'SECTION')
+ write_if_exists(sf, pkg, 'PKG')
+ write_if_exists(sf, pkg, 'ALLOW_EMPTY')
+ write_if_exists(sf, pkg, 'FILES')
+ write_if_exists(sf, pkg, 'pkg_postinst')
+ write_if_exists(sf, pkg, 'pkg_postrm')
+ write_if_exists(sf, pkg, 'pkg_preinst')
+ write_if_exists(sf, pkg, 'pkg_prerm')
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ write_if_exists(sf, pkg, 'FILES_INFO')
+ for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
+ write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
+ write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
+
+ sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
+ sf.close()
+
+ # Symlinks needed for rprovides lookup
+ if rprov:
+ for p in rprov.strip().split():
+ subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
+ bb.utils.mkdirhier(os.path.dirname(subdata_sym))
+ oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
+
+ allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
+ if not allow_empty:
+ allow_empty = d.getVar('ALLOW_EMPTY', True)
+ root = "%s/%s" % (pkgdest, pkg)
+ os.chdir(root)
+ g = glob('*')
+ if g or allow_empty == "1":
+ # Symlinks needed for reverse lookups (from the final package name)
+ subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
+ oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
+
+ packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
+ open(packagedfile, 'w').close()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_runtime_pkgs(variants, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d):
+ write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
+
+ bb.utils.unlockfile(lf)
+}
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+
+ldconfig_postinst_fragment() {
+if [ x"$D" = "x" ]; then
+ if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
+fi
+}
+
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
+
+# Collect perfile run-time dependency metadata
+# Output:
+# FILERPROVIDESFLIST_pkg - list of all files w/ deps
+# FILERPROVIDES_filepath_pkg - per file dep
+#
+# FILERDEPENDSFLIST_pkg - list of all files w/ deps
+# FILERDEPENDS_filepath_pkg - per file dep
+
+python package_do_filedeps() {
+ if d.getVar('SKIP_FILEDEPS', True) == '1':
+ return
+
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES', True)
+ rpmdeps = d.getVar('RPMDEPS', True)
+
+ def chunks(files, n):
+ return [files[i:i+n] for i in range(0, len(files), n)]
+
+ pkglist = []
+ for pkg in packages.split():
+ if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1':
+ continue
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
+ continue
+ for files in chunks(pkgfiles[pkg], 100):
+ pkglist.append((pkg, files, rpmdeps, pkgdest))
+
+ processed = oe.utils.multiprocess_exec( pkglist, oe.package.filedeprunner)
+
+ provides_files = {}
+ requires_files = {}
+
+ for result in processed:
+ (pkg, provides, requires) = result
+
+ if pkg not in provides_files:
+ provides_files[pkg] = []
+ if pkg not in requires_files:
+ requires_files[pkg] = []
+
+ for file in provides:
+ provides_files[pkg].append(file)
+ key = "FILERPROVIDES_" + file + "_" + pkg
+ d.setVar(key, " ".join(provides[file]))
+
+ for file in requires:
+ requires_files[pkg].append(file)
+ key = "FILERDEPENDS_" + file + "_" + pkg
+ d.setVar(key, " ".join(requires[file]))
+
+ for pkg in requires_files:
+ d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
+ for pkg in provides_files:
+ d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
+}
+
+SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2"
+SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
+
+python package_do_shlibs() {
+ import re, pipes
+ import subprocess as sub
+
+ exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
+ if exclude_shlibs:
+ bb.note("not generating shlibs")
+ return
+
+ lib_re = re.compile("^.*\.so")
+ libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True))
+
+ packages = d.getVar('PACKAGES', True)
+ targetos = d.getVar('TARGET_OS', True)
+
+ workdir = d.getVar('WORKDIR', True)
+
+ ver = d.getVar('PKGV', True)
+ if not ver:
+ msg = "PKGV not defined"
+ package_qa_handle_error("pkgv-undefined", msg, d)
+ return
+
+ pkgdest = d.getVar('PKGDEST', True)
+
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
+
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
+
+ def linux_so(file, needed, sonames, renames, pkgver):
+ needs_ldconfig = False
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+ cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ rpath = []
+ for l in lines:
+ m = re.match("\s+RPATH\s+([^\s]*)", l)
+ if m:
+ rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
+ rpath = map(os.path.normpath, rpaths)
+ for l in lines:
+ m = re.match("\s+NEEDED\s+([^\s]*)", l)
+ if m:
+ dep = m.group(1)
+ if dep not in needed[pkg]:
+ needed[pkg].append((dep, file, rpath))
+ m = re.match("\s+SONAME\s+([^\s]*)", l)
+ if m:
+ this_soname = m.group(1)
+ prov = (this_soname, ldir, pkgver)
+ if not prov in sonames:
+ # if library is private (only used by package) then do not build shlib for it
+ if not private_libs or this_soname not in private_libs:
+ sonames.append(prov)
+ if libdir_re.match(os.path.dirname(file)):
+ needs_ldconfig = True
+ if snap_symlinks and (os.path.basename(file) != this_soname):
+ renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
+ return needs_ldconfig
+
+ def darwin_so(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+
+ def get_combinations(base):
+ #
+ # Given a base library name, find all combinations of this split by "." and "-"
+ #
+ combos = []
+ options = base.split(".")
+ for i in range(1, len(options) + 1):
+ combos.append(".".join(options[0:i]))
+ options = base.split("-")
+ for i in range(1, len(options) + 1):
+ combos.append("-".join(options[0:i]))
+ return combos
+
+ if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'):
+ # Drop suffix
+ name = os.path.basename(file).rsplit(".",1)[0]
+ # Find all combinations
+ combos = get_combinations(name)
+ for combo in combos:
+ if not combo in sonames:
+ prov = (combo, ldir, pkgver)
+ sonames.append(prov)
+ if file.endswith('.dylib') or file.endswith('.so'):
+ rpath = []
+ p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned successfully, process stderr for results
+ if p.returncode == 0:
+ for l in err.split("\n"):
+ l = l.strip()
+ if l.startswith('path '):
+ rpath.append(l.split()[1])
+
+ p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned successfully, process stderr for results
+ if p.returncode == 0:
+ for l in err.split("\n"):
+ l = l.strip()
+ if not l or l.endswith(":"):
+ continue
+ if "is not an object file" in l:
+ continue
+ name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
+ if name and name not in needed[pkg]:
+ needed[pkg].append((name, file, []))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
+ snap_symlinks = True
+ else:
+ snap_symlinks = False
+
+ if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
+ use_ldconfig = True
+ else:
+ use_ldconfig = False
+
+ needed = {}
+ shlib_provider = oe.package.read_shlib_providers(d)
+
+ for pkg in packages.split():
+ private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
+ private_libs = private_libs.split()
+ needs_ldconfig = False
+ bb.debug(2, "calculating shlib provides for %s" % pkg)
+
+ pkgver = d.getVar('PKGV_' + pkg, True)
+ if not pkgver:
+ pkgver = d.getVar('PV_' + pkg, True)
+ if not pkgver:
+ pkgver = ver
+
+ needed[pkg] = []
+ sonames = list()
+ renames = list()
+ for file in pkgfiles[pkg]:
+ soname = None
+ if cpath.islink(file):
+ continue
+ if targetos == "darwin" or targetos == "darwin8":
+ darwin_so(file, needed, sonames, renames, pkgver)
+ elif os.access(file, os.X_OK) or lib_re.match(file):
+ ldconfig = linux_so(file, needed, sonames, renames, pkgver)
+ needs_ldconfig = needs_ldconfig or ldconfig
+ for (old, new) in renames:
+ bb.note("Renaming %s to %s" % (old, new))
+ os.rename(old, new)
+ pkgfiles[pkg].remove(old)
+
+ shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
+ if len(sonames):
+ fd = open(shlibs_file, 'w')
+ for s in sonames:
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ fd.close()
+ if needs_ldconfig and use_ldconfig:
+ bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('ldconfig_postinst_fragment', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
+
+ bb.utils.unlockfile(lf)
+
+ assumed_libs = d.getVar('ASSUME_SHLIBS', True)
+ if assumed_libs:
+ libdir = d.getVar("libdir", True)
+ for e in assumed_libs.split():
+ l, dep_pkg = e.split(":")
+ lib_ver = None
+ dep_pkg = dep_pkg.rsplit("_", 1)
+ if len(dep_pkg) == 2:
+ lib_ver = dep_pkg[1]
+ dep_pkg = dep_pkg[0]
+ if l not in shlib_provider:
+ shlib_provider[l] = {}
+ shlib_provider[l][libdir] = (dep_pkg, lib_ver)
+
+ libsearchpath = [d.getVar('libdir', True), d.getVar('base_libdir', True)]
+
+ for pkg in packages.split():
+ bb.debug(2, "calculating shlib requirements for %s" % pkg)
+
+ deps = list()
+ for n in needed[pkg]:
+ # if n is in private libraries, don't try to search provider for it
+ # this could cause problem in case some abc.bb provides private
+ # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
+ # but skipping it is still better alternative than providing own
+ # version and then adding runtime dependency for the same system library
+ if private_libs and n[0] in private_libs:
+ bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
+ continue
+ if n[0] in shlib_provider.keys():
+ shlib_provider_path = list()
+ for k in shlib_provider[n[0]].keys():
+ shlib_provider_path.append(k)
+ match = None
+ for p in n[2] + shlib_provider_path + libsearchpath:
+ if p in shlib_provider[n[0]]:
+ match = p
+ break
+ if match:
+ (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
+
+ bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
+
+ if dep_pkg == pkg:
+ continue
+
+ if ver_needed:
+ dep = "%s (>= %s)" % (dep_pkg, ver_needed)
+ else:
+ dep = dep_pkg
+ if not dep in deps:
+ deps.append(dep)
+ continue
+ bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
+
+ deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
+ if os.path.exists(deps_file):
+ os.remove(deps_file)
+ if len(deps):
+ fd = open(deps_file, 'w')
+ for dep in deps:
+ fd.write(dep + '\n')
+ fd.close()
+}
+
+python package_do_pkgconfig () {
+ import re
+
+ packages = d.getVar('PACKAGES', True)
+ workdir = d.getVar('WORKDIR', True)
+ pkgdest = d.getVar('PKGDEST', True)
+
+ shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
+
+ pc_re = re.compile('(.*)\.pc$')
+ var_re = re.compile('(.*)=(.*)')
+ field_re = re.compile('(.*): (.*)')
+
+ pkgconfig_provided = {}
+ pkgconfig_needed = {}
+ for pkg in packages.split():
+ pkgconfig_provided[pkg] = []
+ pkgconfig_needed[pkg] = []
+ for file in pkgfiles[pkg]:
+ m = pc_re.match(file)
+ if m:
+ pd = bb.data.init()
+ name = m.group(1)
+ pkgconfig_provided[pkg].append(name)
+ if not os.access(file, os.R_OK):
+ continue
+ f = open(file, 'r')
+ lines = f.readlines()
+ f.close()
+ for l in lines:
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ pd.setVar(name, pd.expand(val))
+ continue
+ m = field_re.match(l)
+ if m:
+ hdr = m.group(1)
+ exp = bb.data.expand(m.group(2), pd)
+ if hdr == 'Requires':
+ pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
+
+ for pkg in packages.split():
+ pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
+ if pkgconfig_provided[pkg] != []:
+ f = open(pkgs_file, 'w')
+ for p in pkgconfig_provided[pkg]:
+ f.write('%s\n' % p)
+ f.close()
+
+ # Go from least to most specific since the last one found wins
+ for dir in reversed(shlibs_dirs):
+ if not os.path.exists(dir):
+ continue
+ for file in os.listdir(dir):
+ m = re.match('^(.*)\.pclist$', file)
+ if m:
+ pkg = m.group(1)
+ fd = open(os.path.join(dir, file))
+ lines = fd.readlines()
+ fd.close()
+ pkgconfig_provided[pkg] = []
+ for l in lines:
+ pkgconfig_provided[pkg].append(l.rstrip())
+
+ for pkg in packages.split():
+ deps = []
+ for n in pkgconfig_needed[pkg]:
+ found = False
+ for k in pkgconfig_provided.keys():
+ if n in pkgconfig_provided[k]:
+ if k != pkg and not (k in deps):
+ deps.append(k)
+ found = True
+ if found == False:
+ bb.note("couldn't find pkgconfig module '%s' in any package" % n)
+ deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
+ if len(deps):
+ fd = open(deps_file, 'w')
+ for dep in deps:
+ fd.write(dep + '\n')
+ fd.close()
+
+ bb.utils.unlockfile(lf)
+}
+
+def read_libdep_files(d):
+ pkglibdeps = {}
+ packages = d.getVar('PACKAGES', True).split()
+ for pkg in packages:
+ pkglibdeps[pkg] = {}
+ for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
+ depsfile = d.expand("${PKGDEST}/" + pkg + extension)
+ if os.access(depsfile, os.R_OK):
+ fd = open(depsfile)
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ l.rstrip()
+ deps = bb.utils.explode_dep_versions2(l)
+ for dep in deps:
+ if not dep in pkglibdeps[pkg]:
+ pkglibdeps[pkg][dep] = deps[dep]
+ return pkglibdeps
+
+python read_shlibdeps () {
+ pkglibdeps = read_libdep_files(d)
+
+ packages = d.getVar('PACKAGES', True).split()
+ for pkg in packages:
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ for dep in pkglibdeps[pkg]:
+ # Add the dep if it's not already there, or if no comparison is set
+ if dep not in rdepends:
+ rdepends[dep] = []
+ for v in pkglibdeps[pkg][dep]:
+ if v not in rdepends[dep]:
+ rdepends[dep].append(v)
+ d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+}
+
+python package_depchains() {
+ """
+ For a given set of prefix and postfix modifiers, make those packages
+ RRECOMMENDS on the corresponding packages for its RDEPENDS.
+
+ Example: If package A depends upon package B, and A's .bb emits an
+ A-dev package, this would make A-dev Recommends: B-dev.
+
+ If only one of a given suffix is specified, it will take the RRECOMMENDS
+ based on the RDEPENDS of *all* other packages. If more than one of a given
+ suffix is specified, its will only use the RDEPENDS of the single parent
+ package.
+ """
+
+ packages = d.getVar('PACKAGES', True)
+ postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
+
+ def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
+
+ #bb.note('depends for %s is %s' % (base, depends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
+
+ for depend in depends:
+ if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
+
+ #bb.note('rdepends for %s is %s' % (base, rdepends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
+
+ for depend in rdepends:
+ if depend.find('virtual-locale-') != -1:
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def add_dep(list, dep):
+ if dep not in list:
+ list.append(dep)
+
+ depends = []
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
+ add_dep(depends, dep)
+
+ rdepends = []
+ for pkg in packages.split():
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
+ add_dep(rdepends, dep)
+
+ #bb.note('rdepends is %s' % rdepends)
+
+ def post_getname(name, suffix):
+ return '%s%s' % (name, suffix)
+ def pre_getname(name, suffix):
+ return '%s%s' % (suffix, name)
+
+ pkgs = {}
+ for pkg in packages.split():
+ for postfix in postfixes:
+ if pkg.endswith(postfix):
+ if not postfix in pkgs:
+ pkgs[postfix] = {}
+ pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
+
+ for prefix in prefixes:
+ if pkg.startswith(prefix):
+ if not prefix in pkgs:
+ pkgs[prefix] = {}
+ pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
+
+ if "-dbg" in pkgs:
+ pkglibdeps = read_libdep_files(d)
+ pkglibdeplist = []
+ for pkg in pkglibdeps:
+ for k in pkglibdeps[pkg]:
+ add_dep(pkglibdeplist, k)
+ dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (bb.data.inherits_class('packagegroup', d)))
+
+ for suffix in pkgs:
+ for pkg in pkgs[suffix]:
+ if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', True):
+ continue
+ (base, func) = pkgs[suffix][pkg]
+ if suffix == "-dev":
+ pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
+ elif suffix == "-dbg":
+ if not dbgdefaultdeps:
+ pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
+ continue
+ if len(pkgs[suffix]) == 1:
+ pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
+ else:
+ rdeps = []
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""):
+ add_dep(rdeps, dep)
+ pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
+}
+
+# Since bitbake can't determine which variables are accessed during package
+# iteration, we need to list them here:
+PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE"
+
+def gen_packagevar(d):
+ ret = []
+ pkgs = (d.getVar("PACKAGES", True) or "").split()
+ vars = (d.getVar("PACKAGEVARS", True) or "").split()
+ for p in pkgs:
+ for v in vars:
+ ret.append(v + "_" + p)
+
+ # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
+ # affected recipes.
+ ret.append('LICENSE_EXCLUSION-%s' % p)
+ return " ".join(ret)
+
+PACKAGE_PREPROCESS_FUNCS ?= ""
+# Functions for setting up PKGD
+PACKAGEBUILDPKGD ?= " \
+ perform_packagecopy \
+ ${PACKAGE_PREPROCESS_FUNCS} \
+ split_and_strip_files \
+ fixup_perms \
+ "
+# Functions which split PKGD up into separate packages
+PACKAGESPLITFUNCS ?= " \
+ package_do_split_locales \
+ populate_packages"
+# Functions which process metadata based on split packages
+PACKAGEFUNCS += " \
+ package_fixsymlinks \
+ package_name_hook \
+ package_do_filedeps \
+ package_do_shlibs \
+ package_do_pkgconfig \
+ read_shlibdeps \
+ package_depchains \
+ emit_pkgdata"
+
+python do_package () {
+ # Change the following version to cause sstate to invalidate the package
+ # cache. This is useful if an item this class depends on changes in a
+ # way that the output of this class changes. rpmdeps is a good example
+ # as any change to rpmdeps requires this to be rerun.
+ # PACKAGE_BBCLASS_VERSION = "1"
+
+ # Init cachedpath
+ global cpath
+ cpath = oe.cachedpath.CachedPath()
+
+ ###########################################################################
+ # Sanity test the setup
+ ###########################################################################
+
+ packages = (d.getVar('PACKAGES', True) or "").split()
+ if len(packages) < 1:
+ bb.debug(1, "No packages to build, skipping do_package")
+ return
+
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('DEPLOY_DIR', True)
+ dest = d.getVar('D', True)
+ dvar = d.getVar('PKGD', True)
+ pn = d.getVar('PN', True)
+
+ if not workdir or not outdir or not dest or not dvar or not pn:
+ msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
+ package_qa_handle_error("var-undefined", msg, d)
+ return
+
+ bb.build.exec_func("package_get_auto_pr", d)
+
+ ###########################################################################
+ # Optimisations
+ ###########################################################################
+
+ # Continually expanding complex expressions is inefficient, particularly
+ # when we write to the datastore and invalidate the expansion cache. This
+ # code pre-expands some frequently used variables
+
+ def expandVar(x, d):
+ d.setVar(x, d.getVar(x, True))
+
+ for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
+ expandVar(x, d)
+
+ ###########################################################################
+ # Setup PKGD (from D)
+ ###########################################################################
+
+ for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split():
+ bb.build.exec_func(f, d)
+
+ ###########################################################################
+ # Split up PKGD into PKGDEST
+ ###########################################################################
+
+ cpath = oe.cachedpath.CachedPath()
+
+ for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+
+ ###########################################################################
+ # Process PKGDEST
+ ###########################################################################
+
+ # Build global list of files in each split package
+ global pkgfiles
+ pkgfiles = {}
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
+ for pkg in packages:
+ pkgfiles[pkg] = []
+ for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
+ for file in files:
+ pkgfiles[pkg].append(walkroot + os.sep + file)
+
+ for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+
+ qa_sane = d.getVar("QA_SANE", True)
+ if not qa_sane:
+ bb.fatal("Fatal QA errors found, failing task.")
+}
+
+do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
+do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
+addtask package after do_install
+
+PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
+SSTATETASKS += "do_package"
+do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
+do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
+do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
+do_package_setscene[dirs] = "${STAGING_DIR}"
+
+python do_package_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_setscene
+
+do_packagedata () {
+ :
+}
+
+addtask packagedata before do_build after do_package
+
+SSTATETASKS += "do_packagedata"
+do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
+do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
+do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
+do_packagedata[stamp-extra-info] = "${MACHINE}"
+
+python do_packagedata_setscene () {
+ sstate_setscene(d)
+}
+addtask do_packagedata_setscene
+
+#
+# Helper functions for the package writing classes
+#
+
+def mapping_rename_hook(d):
+ """
+ Rewrite variables to account for package renaming in things
+ like debian.bbclass or manual PKG variable name changes
+ """
+ pkg = d.getVar("PKG", True)
+ runtime_mapping_rename("RDEPENDS", pkg, d)
+ runtime_mapping_rename("RRECOMMENDS", pkg, d)
+ runtime_mapping_rename("RSUGGESTS", pkg, d)
+
diff --git a/import-layers/yocto-poky/meta/classes/package_deb.bbclass b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
new file mode 100644
index 000000000..e1d05a74c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
@@ -0,0 +1,359 @@
+#
+# Copyright 2006-2008 OpenedHand Ltd.
+#
+
+inherit package
+
+IMAGE_PKGTYPE ?= "deb"
+
+DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True))}"
+DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
+
+PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
+
+APTCONF_TARGET = "${WORKDIR}"
+
+APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
+
+def debian_arch_map(arch, tune):
+ tune_features = tune.split()
+ if arch in ["i586", "i686"]:
+ return "i386"
+ if arch == "x86_64":
+ if "mx32" in tune_features:
+ return "x32"
+ return "amd64"
+ if arch.startswith("mips"):
+ endian = ["el", ""]["bigendian" in tune_features]
+ if "n64" in tune_features:
+ return "mips64" + endian
+ if "n32" in tune_features:
+ return "mipsn32" + endian
+ return "mips" + endian
+ if arch == "powerpc":
+ return arch + ["", "spe"]["spe" in tune_features]
+ if arch == "aarch64":
+ return "arm64"
+ if arch == "arm":
+ return arch + ["el", "hf"]["callconvention-hard" in tune_features]
+ return arch
+#
+# install a bunch of packages using apt
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_DEB - install root dir
+# INSTALL_BASEARCH_DEB - install base architecutre
+# INSTALL_ARCHS_DEB - list of available archs
+# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attempted to be installed only
+# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
+# INSTALL_TASK_DEB - task name
+
+python do_package_deb () {
+ import re, copy
+ import textwrap
+ import subprocess
+
+ workdir = d.getVar('WORKDIR', True)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ outdir = d.getVar('PKGWRITEDIRDEB', True)
+ if not outdir:
+ bb.error("PKGWRITEDIRDEB not defined, unable to package")
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages:
+ bb.debug(1, "PACKAGES not defined, nothing to package")
+ return
+
+ tmpdir = d.getVar('TMPDIR', True)
+
+ if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
+ os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
+
+ if packages == []:
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ pkgdest = d.getVar('PKGDEST', True)
+
+ def cleanupcontrol(root):
+ for p in ['CONTROL', 'DEBIAN']:
+ p = os.path.join(root, p)
+ if os.path.exists(p):
+ bb.utils.prunedir(p)
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
+
+ lf = bb.utils.lockfile(root + ".lock")
+
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ if not pkgname:
+ pkgname = pkg
+ localdata.setVar('PKG', pkgname)
+
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ bb.data.update_data(localdata)
+ basedir = os.path.join(os.path.dirname(root))
+
+ pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
+ bb.utils.mkdirhier(pkgoutdir)
+
+ os.chdir(root)
+ cleanupcontrol(root)
+ from glob import glob
+ g = glob('*')
+ if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ bb.utils.unlockfile(lf)
+ continue
+
+ controldir = os.path.join(root, 'DEBIAN')
+ bb.utils.mkdirhier(controldir)
+ os.chmod(controldir, 0755)
+ try:
+ import codecs
+ ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
+ except OSError:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("unable to open control file for writing.")
+
+ fields = []
+ pe = d.getVar('PKGE', True)
+ if pe and int(pe) > 0:
+ fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
+ else:
+ fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
+ fields.append(["Description: %s\n", ['DESCRIPTION']])
+ fields.append(["Section: %s\n", ['SECTION']])
+ fields.append(["Priority: %s\n", ['PRIORITY']])
+ fields.append(["Maintainer: %s\n", ['MAINTAINER']])
+ fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
+ fields.append(["OE: %s\n", ['PN']])
+ fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
+ if d.getVar('HOMEPAGE', True):
+ fields.append(["Homepage: %s\n", ['HOMEPAGE']])
+
+ # Package, Version, Maintainer, Description - mandatory
+ # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
+
+
+ def pullData(l, d):
+ l2 = []
+ for i in l:
+ data = d.getVar(i, True)
+ if data is None:
+ raise KeyError(f)
+ if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
+ data = 'all'
+ elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
+ # The params in deb package control don't allow character
+ # `_', so change the arch's `_' to `-'. Such as `x86_64'
+ # -->`x86-64'
+ data = data.replace('_', '-')
+ l2.append(data)
+ return l2
+
+ ctrlfile.write("Package: %s\n" % pkgname)
+ if d.getVar('PACKAGE_ARCH', True) == "all":
+ ctrlfile.write("Multi-Arch: foreign\n")
+ # check for required fields
+ try:
+ for (c, fs) in fields:
+ for f in fs:
+ if localdata.getVar(f, False) is None:
+ raise KeyError(f)
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
+ ctrlfile.write('Description: %s\n' % unicode(summary,'utf-8'))
+ description = localdata.getVar('DESCRIPTION', True) or "."
+ description = textwrap.dedent(description).strip()
+ if '\\n' in description:
+ # Manually indent
+ for t in description.split('\\n'):
+ # We don't limit the width when manually indent, but we do
+ # need the textwrap.fill() to set the initial_indent and
+ # subsequent_indent, so set a large width
+ ctrlfile.write('%s\n' % unicode(textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' '),'utf-8'))
+ else:
+ # Auto indent
+ ctrlfile.write('%s\n' % unicode(textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '),'utf-8'))
+
+ else:
+ ctrlfile.write(unicode(c % tuple(pullData(fs, localdata)),'utf-8'))
+ except KeyError:
+ import sys
+ (type, value, traceback) = sys.exc_info()
+ bb.utils.unlockfile(lf)
+ ctrlfile.close()
+ raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
+ except UnicodeDecodeError:
+ bb.utils.unlockfile(lf)
+ ctrlfile.close()
+ raise bb.build.FuncFailed("Non UTF-8 characters found in one of the fields")
+
+ # more fields
+
+ custom_fields_chunk = get_package_additional_metadata("deb", localdata)
+ if custom_fields_chunk is not None:
+ ctrlfile.write(unicode(custom_fields_chunk))
+ ctrlfile.write("\n")
+
+ mapping_rename_hook(localdata)
+
+ def debian_cmp_remap(var):
+ # dpkg does not allow for '(' or ')' in a dependency name
+ # replace these instances with '__' and '__'
+ #
+ # In debian '>' and '<' do not mean what it appears they mean
+ # '<' = less or equal
+ # '>' = greater or equal
+ # adjust these to the '<<' and '>>' equivalents
+ #
+ for dep in var:
+ if '(' in dep:
+ newdep = dep.replace('(', '__')
+ newdep = newdep.replace(')', '__')
+ if newdep != dep:
+ var[newdep] = var[dep]
+ del var[dep]
+ for dep in var:
+ for i, v in enumerate(var[dep]):
+ if (v or "").startswith("< "):
+ var[dep][i] = var[dep][i].replace("< ", "<< ")
+ elif (v or "").startswith("> "):
+ var[dep][i] = var[dep][i].replace("> ", ">> ")
+
+ rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
+ debian_cmp_remap(rdepends)
+ for dep in rdepends.keys():
+ if dep == pkg:
+ del rdepends[dep]
+ continue
+ if '*' in dep:
+ del rdepends[dep]
+ rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
+ debian_cmp_remap(rrecommends)
+ for dep in rrecommends.keys():
+ if '*' in dep:
+ del rrecommends[dep]
+ rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
+ debian_cmp_remap(rsuggests)
+ # Deliberately drop version information here, not wanted/supported by deb
+ rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ debian_cmp_remap(rprovides)
+ rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
+ debian_cmp_remap(rreplaces)
+ rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
+ debian_cmp_remap(rconflicts)
+ if rdepends:
+ ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
+ if rsuggests:
+ ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
+ if rrecommends:
+ ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
+ if rprovides:
+ ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
+ if rreplaces:
+ ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
+ if rconflicts:
+ ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
+ ctrlfile.close()
+
+ for script in ["preinst", "postinst", "prerm", "postrm"]:
+ scriptvar = localdata.getVar('pkg_%s' % script, True)
+ if not scriptvar:
+ continue
+ scriptvar = scriptvar.strip()
+ try:
+ scriptfile = open(os.path.join(controldir, script), 'w')
+ except OSError:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+
+ if scriptvar.startswith("#!"):
+ pos = scriptvar.find("\n") + 1
+ scriptfile.write(scriptvar[:pos])
+ else:
+ pos = 0
+ scriptfile.write("#!/bin/sh\n")
+
+ # Prevent the prerm/postrm scripts from being run during an upgrade
+ if script in ('prerm', 'postrm'):
+ scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
+
+ scriptfile.write(scriptvar[pos:])
+ scriptfile.write('\n')
+ scriptfile.close()
+ os.chmod(os.path.join(controldir, script), 0755)
+
+ conffiles_str = ' '.join(get_conffiles(pkg, d))
+ if conffiles_str:
+ try:
+ conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
+ except OSError:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ for f in conffiles_str.split():
+ if os.path.exists(oe.path.join(root, f)):
+ conffiles.write('%s\n' % f)
+ conffiles.close()
+
+ os.chdir(basedir)
+ ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
+ if ret != 0:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("dpkg-deb execution failed")
+
+ cleanupcontrol(root)
+ bb.utils.unlockfile(lf)
+}
+# Indirect references to these vars
+do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
+# Otherwise allarch packages may change depending on override configuration
+do_package_deb[vardepsexclude] = "OVERRIDES"
+
+
+SSTATETASKS += "do_package_write_deb"
+do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
+do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
+
+python do_package_write_deb_setscene () {
+ tmpdir = d.getVar('TMPDIR', True)
+
+ if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
+ os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
+
+ sstate_setscene(d)
+}
+addtask do_package_write_deb_setscene
+
+python () {
+ if d.getVar('PACKAGES', True) != '':
+ deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_deb', 'depends', deps)
+ d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
+}
+
+python do_package_write_deb () {
+ bb.build.exec_func("read_subpackage_metadata", d)
+ bb.build.exec_func("do_package_deb", d)
+}
+do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
+do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
+do_package_write_deb[umask] = "022"
+addtask package_write_deb after do_packagedata do_package
+
+
+PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
+
+do_build[recrdeptask] += "do_package_write_deb"
diff --git a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
new file mode 100644
index 000000000..f1ad1d5c1
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
@@ -0,0 +1,294 @@
+inherit package
+
+IMAGE_PKGTYPE ?= "ipk"
+
+IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
+IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
+
+PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
+
+# Program to be used to build opkg packages
+OPKGBUILDCMD ??= "opkg-build"
+
+OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
+OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
+OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
+
+OPKGLIBDIR = "${localstatedir}/lib"
+
+python do_package_ipk () {
+ import re, copy
+ import textwrap
+ import subprocess
+
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('PKGWRITEDIRIPK', True)
+ tmpdir = d.getVar('TMPDIR', True)
+ pkgdest = d.getVar('PKGDEST', True)
+ if not workdir or not outdir or not tmpdir:
+ bb.error("Variables incorrectly set, unable to package")
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages or packages == '':
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ # We're about to add new packages so the index needs to be checked
+ # so remove the appropriate stamp file.
+ if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
+ os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
+
+ def cleanupcontrol(root):
+ for p in ['CONTROL', 'DEBIAN']:
+ p = os.path.join(root, p)
+ if os.path.exists(p):
+ bb.utils.prunedir(p)
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
+
+ lf = bb.utils.lockfile(root + ".lock")
+
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ if not pkgname:
+ pkgname = pkg
+ localdata.setVar('PKG', pkgname)
+
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ bb.data.update_data(localdata)
+ basedir = os.path.join(os.path.dirname(root))
+ arch = localdata.getVar('PACKAGE_ARCH', True)
+
+ if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
+ # Spread packages across subdirectories so each isn't too crowded
+ if pkgname.startswith('lib'):
+ pkg_prefix = 'lib' + pkgname[3]
+ else:
+ pkg_prefix = pkgname[0]
+
+ # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
+ # together. These package suffixes are taken from the definitions of
+ # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
+ if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
+ pkg_subdir = pkgname[:-4]
+ elif pkgname.endswith('-staticdev'):
+ pkg_subdir = pkgname[:-10]
+ elif pkgname.endswith('-locale'):
+ pkg_subdir = pkgname[:-7]
+ elif '-locale-' in pkgname:
+ pkg_subdir = pkgname[:pkgname.find('-locale-')]
+ else:
+ pkg_subdir = pkgname
+
+ pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
+ else:
+ pkgoutdir = "%s/%s" % (outdir, arch)
+
+ bb.utils.mkdirhier(pkgoutdir)
+ os.chdir(root)
+ cleanupcontrol(root)
+ from glob import glob
+ g = glob('*')
+ if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ bb.utils.unlockfile(lf)
+ continue
+
+ controldir = os.path.join(root, 'CONTROL')
+ bb.utils.mkdirhier(controldir)
+ try:
+ ctrlfile = open(os.path.join(controldir, 'control'), 'w')
+ except OSError:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("unable to open control file for writing.")
+
+ fields = []
+ pe = d.getVar('PKGE', True)
+ if pe and int(pe) > 0:
+ fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
+ else:
+ fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
+ fields.append(["Description: %s\n", ['DESCRIPTION']])
+ fields.append(["Section: %s\n", ['SECTION']])
+ fields.append(["Priority: %s\n", ['PRIORITY']])
+ fields.append(["Maintainer: %s\n", ['MAINTAINER']])
+ fields.append(["License: %s\n", ['LICENSE']])
+ fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
+ fields.append(["OE: %s\n", ['PN']])
+ if d.getVar('HOMEPAGE', True):
+ fields.append(["Homepage: %s\n", ['HOMEPAGE']])
+
+ def pullData(l, d):
+ l2 = []
+ for i in l:
+ l2.append(d.getVar(i, True))
+ return l2
+
+ ctrlfile.write("Package: %s\n" % pkgname)
+ # check for required fields
+ try:
+ for (c, fs) in fields:
+ for f in fs:
+ if localdata.getVar(f, False) is None:
+ raise KeyError(f)
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
+ ctrlfile.write('Description: %s\n' % summary)
+ description = localdata.getVar('DESCRIPTION', True) or "."
+ description = textwrap.dedent(description).strip()
+ if '\\n' in description:
+ # Manually indent
+ for t in description.split('\\n'):
+ # We don't limit the width when manually indent, but we do
+ # need the textwrap.fill() to set the initial_indent and
+ # subsequent_indent, so set a large width
+ ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' '))
+ else:
+ # Auto indent
+ ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
+ else:
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
+ except KeyError:
+ import sys
+ (type, value, traceback) = sys.exc_info()
+ ctrlfile.close()
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
+ # more fields
+
+ custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
+ if custom_fields_chunk is not None:
+ ctrlfile.write(custom_fields_chunk)
+ ctrlfile.write("\n")
+
+ mapping_rename_hook(localdata)
+
+ def debian_cmp_remap(var):
+ # In debian '>' and '<' do not mean what it appears they mean
+ # '<' = less or equal
+ # '>' = greater or equal
+ # adjust these to the '<<' and '>>' equivalents
+ #
+ for dep in var:
+ for i, v in enumerate(var[dep]):
+ if (v or "").startswith("< "):
+ var[dep][i] = var[dep][i].replace("< ", "<< ")
+ elif (v or "").startswith("> "):
+ var[dep][i] = var[dep][i].replace("> ", ">> ")
+
+ rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
+ debian_cmp_remap(rdepends)
+ rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
+ debian_cmp_remap(rrecommends)
+ rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
+ debian_cmp_remap(rsuggests)
+ # Deliberately drop version information here, not wanted/supported by ipk
+ rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ debian_cmp_remap(rprovides)
+ rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
+ debian_cmp_remap(rreplaces)
+ rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
+ debian_cmp_remap(rconflicts)
+
+ if rdepends:
+ ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
+ if rsuggests:
+ ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
+ if rrecommends:
+ ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
+ if rprovides:
+ ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
+ if rreplaces:
+ ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
+ if rconflicts:
+ ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
+ src_uri = localdata.getVar("SRC_URI", True).strip() or "None"
+ if src_uri:
+ src_uri = re.sub("\s+", " ", src_uri)
+ ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
+ ctrlfile.close()
+
+ for script in ["preinst", "postinst", "prerm", "postrm"]:
+ scriptvar = localdata.getVar('pkg_%s' % script, True)
+ if not scriptvar:
+ continue
+ try:
+ scriptfile = open(os.path.join(controldir, script), 'w')
+ except OSError:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ scriptfile.write(scriptvar)
+ scriptfile.close()
+ os.chmod(os.path.join(controldir, script), 0755)
+
+ conffiles_str = ' '.join(get_conffiles(pkg, d))
+ if conffiles_str:
+ try:
+ conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
+ except OSError:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ for f in conffiles_str.split():
+ if os.path.exists(oe.path.join(root, f)):
+ conffiles.write('%s\n' % f)
+ conffiles.close()
+
+ os.chdir(basedir)
+ ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
+ d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir), shell=True)
+ if ret != 0:
+ bb.utils.unlockfile(lf)
+ raise bb.build.FuncFailed("opkg-build execution failed")
+
+ if d.getVar('IPK_SIGN_PACKAGES', True) == '1':
+ ipkver = "%s-%s" % (d.getVar('PKGV', True), d.getVar('PKGR', True))
+ ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH', True))
+ sign_ipk(d, ipk_to_sign)
+
+ cleanupcontrol(root)
+ bb.utils.unlockfile(lf)
+
+}
+# Otherwise allarch packages may change depending on override configuration
+do_package_ipk[vardepsexclude] = "OVERRIDES"
+
+SSTATETASKS += "do_package_write_ipk"
+do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
+do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
+
+python do_package_write_ipk_setscene () {
+ tmpdir = d.getVar('TMPDIR', True)
+
+ if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
+ os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
+
+ sstate_setscene(d)
+}
+addtask do_package_write_ipk_setscene
+
+python () {
+ if d.getVar('PACKAGES', True) != '':
+ deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_ipk', 'depends', deps)
+ d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
+}
+
+python do_package_write_ipk () {
+ bb.build.exec_func("read_subpackage_metadata", d)
+ bb.build.exec_func("do_package_ipk", d)
+}
+do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
+do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
+do_package_write_ipk[umask] = "022"
+addtask package_write_ipk after do_packagedata do_package
+
+PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
+
+do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
new file mode 100644
index 000000000..7d523a16f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
@@ -0,0 +1,774 @@
+inherit package
+
+IMAGE_PKGTYPE ?= "rpm"
+
+RPM="rpm"
+RPMBUILD="rpmbuild"
+
+PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
+
+# Maintaining the perfile dependencies has singificant overhead when writing the
+# packages. When set, this value merges them for efficiency.
+MERGEPERFILEDEPS = "1"
+
+# Construct per file dependencies file
+def write_rpm_perfiledata(srcname, d):
+ workdir = d.getVar('WORKDIR', True)
+ packages = d.getVar('PACKAGES', True)
+ pkgd = d.getVar('PKGD', True)
+
+ def dump_filerdeps(varname, outfile, d):
+ outfile.write("#!/usr/bin/env python\n\n")
+ outfile.write("# Dependency table\n")
+ outfile.write('deps = {\n')
+ for pkg in packages.split():
+ dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist = (d.getVar(dependsflist_key, True) or "")
+ for dfile in dependsflist.split():
+ key = "FILE" + varname + "_" + dfile + "_" + pkg
+ depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
+ file = dfile.replace("@underscore@", "_")
+ file = file.replace("@closebrace@", "]")
+ file = file.replace("@openbrace@", "[")
+ file = file.replace("@tab@", "\t")
+ file = file.replace("@space@", " ")
+ file = file.replace("@at@", "@")
+ outfile.write('"' + pkgd + file + '" : "')
+ for dep in depends_dict:
+ ver = depends_dict[dep]
+ if dep and ver:
+ ver = ver.replace("(","")
+ ver = ver.replace(")","")
+ outfile.write(dep + " " + ver + " ")
+ else:
+ outfile.write(dep + " ")
+ outfile.write('",\n')
+ outfile.write('}\n\n')
+ outfile.write("import sys\n")
+ outfile.write("while 1:\n")
+ outfile.write("\tline = sys.stdin.readline().strip()\n")
+ outfile.write("\tif not line:\n")
+ outfile.write("\t\tsys.exit(0)\n")
+ outfile.write("\tif line in deps:\n")
+ outfile.write("\t\tprint(deps[line] + '\\n')\n")
+
+ # OE-core dependencies a.k.a. RPM requires
+ outdepends = workdir + "/" + srcname + ".requires"
+
+ try:
+ dependsfile = open(outdepends, 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+ dump_filerdeps('RDEPENDS', dependsfile, d)
+
+ dependsfile.close()
+ os.chmod(outdepends, 0755)
+
+ # OE-core / RPM Provides
+ outprovides = workdir + "/" + srcname + ".provides"
+
+ try:
+ providesfile = open(outprovides, 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+ dump_filerdeps('RPROVIDES', providesfile, d)
+
+ providesfile.close()
+ os.chmod(outprovides, 0755)
+
+ return (outdepends, outprovides)
+
+
+python write_specfile () {
+ import oe.packagedata
+
+ # append information for logs and patches to %prep
+ def add_prep(d,spec_files_bottom):
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
+ spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
+ spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
+ spec_files_bottom.append('')
+
+ # append the name of tarball to key word 'SOURCE' in xxx.spec.
+ def tail_source(d):
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ if not os.path.exists(ar_outdir):
+ return
+ source_list = os.listdir(ar_outdir)
+ source_number = 0
+ for source in source_list:
+ # The rpmbuild doesn't need the root permission, but it needs
+ # to know the file's user and group name, the only user and
+ # group in fakeroot is "root" when working in fakeroot.
+ f = os.path.join(ar_outdir, source)
+ os.chown(f, 0, 0)
+ spec_preamble_top.append('Source%s: %s' % (source_number, source))
+ source_number += 1
+ # We need a simple way to remove the MLPREFIX from the package name,
+ # and dependency information...
+ def strip_multilib(name, d):
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
+ name = "".join(name.split(eext[1] + '-'))
+ return name
+
+ def strip_multilib_deps(deps, d):
+ depends = bb.utils.explode_dep_versions2(deps or "")
+ newdeps = {}
+ for dep in depends:
+ newdeps[strip_multilib(dep, d)] = depends[dep]
+ return bb.utils.join_deps(newdeps)
+
+# ml = d.getVar("MLPREFIX", True)
+# if ml and name and len(ml) != 0 and name.find(ml) == 0:
+# return ml.join(name.split(ml, 1)[1:])
+# return name
+
+ # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
+ # This format is similar to OE, however there are restrictions on the
+ # characters that can be in a field. In the Version field, "-"
+ # characters are not allowed. "-" is allowed in the Release field.
+ #
+ # We translate the "-" in the version to a "+", by loading the PKGV
+ # from the dependent recipe, replacing the - with a +, and then using
+ # that value to do a replace inside of this recipe's dependencies.
+ # This preserves the "-" separator between the version and release, as
+ # well as any "-" characters inside of the release field.
+ #
+ # All of this has to happen BEFORE the mapping_rename_hook as
+ # after renaming we cannot look up the dependencies in the packagedata
+ # store.
+ def translate_vers(varname, d):
+ depends = d.getVar(varname, True)
+ if depends:
+ depends_dict = bb.utils.explode_dep_versions2(depends)
+ newdeps_dict = {}
+ for dep in depends_dict:
+ verlist = []
+ for ver in depends_dict[dep]:
+ if '-' in ver:
+ subd = oe.packagedata.read_subpkgdata_dict(dep, d)
+ if 'PKGV' in subd:
+ pv = subd['PV']
+ pkgv = subd['PKGV']
+ reppv = pkgv.replace('-', '+')
+ ver = ver.replace(pv, reppv).replace(pkgv, reppv)
+ if 'PKGR' in subd:
+ # Make sure PKGR rather than PR in ver
+ pr = '-' + subd['PR']
+ pkgr = '-' + subd['PKGR']
+ if pkgr not in ver:
+ ver = ver.replace(pr, pkgr)
+ verlist.append(ver)
+ else:
+ verlist.append(ver)
+ newdeps_dict[dep] = verlist
+ depends = bb.utils.join_deps(newdeps_dict)
+ d.setVar(varname, depends.strip())
+
+ # We need to change the style the dependency from BB to RPM
+ # This needs to happen AFTER the mapping_rename_hook
+ def print_deps(variable, tag, array, d):
+ depends = variable
+ if depends:
+ depends_dict = bb.utils.explode_dep_versions2(depends)
+ for dep in depends_dict:
+ for ver in depends_dict[dep]:
+ ver = ver.replace('(', '')
+ ver = ver.replace(')', '')
+ array.append("%s: %s %s" % (tag, dep, ver))
+ if not len(depends_dict[dep]):
+ array.append("%s: %s" % (tag, dep))
+
+ def walk_files(walkpath, target, conffiles, dirfiles):
+ # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
+ # when packaging. We just ignore these files which are created in
+ # packages-split/ and not package/
+ # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
+ # of the walk, the isdir() test would then fail and the walk code would assume its a file
+ # hence we check for the names in files too.
+ for rootpath, dirs, files in os.walk(walkpath):
+ path = rootpath.replace(walkpath, "")
+ if path.endswith("DEBIAN") or path.endswith("CONTROL"):
+ continue
+ path = path.replace("%", "%%%%%%%%")
+
+ # Treat all symlinks to directories as normal files.
+ # os.walk() lists them as directories.
+ def move_to_files(dir):
+ if os.path.islink(os.path.join(rootpath, dir)):
+ files.append(dir)
+ return True
+ else:
+ return False
+ dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
+
+ # Directory handling can happen in two ways, either DIRFILES is not set at all
+ # in which case we fall back to the older behaviour of packages owning all their
+ # directories
+ if dirfiles is None:
+ for dir in dirs:
+ if dir == "CONTROL" or dir == "DEBIAN":
+ continue
+ dir = dir.replace("%", "%%%%%%%%")
+ # All packages own the directories their files are in...
+ target.append('%dir "' + path + '/' + dir + '"')
+ else:
+ # packages own only empty directories or explict directory.
+ # This will prevent the overlapping of security permission.
+ if path and not files and not dirs:
+ target.append('%dir "' + path + '"')
+ elif path and path in dirfiles:
+ target.append('%dir "' + path + '"')
+
+ for file in files:
+ if file == "CONTROL" or file == "DEBIAN":
+ continue
+ file = file.replace("%", "%%%%%%%%")
+ if conffiles.count(path + '/' + file):
+ target.append('%config "' + path + '/' + file + '"')
+ else:
+ target.append('"' + path + '/' + file + '"')
+
+ # Prevent the prerm/postrm scripts from being run during an upgrade
+ def wrap_uninstall(scriptvar):
+ scr = scriptvar.strip()
+ if scr.startswith("#!"):
+ pos = scr.find("\n") + 1
+ else:
+ pos = 0
+ scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
+ return scr
+
+ def get_perfile(varname, pkg, d):
+ deps = []
+ dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist = (d.getVar(dependsflist_key, True) or "")
+ for dfile in dependsflist.split():
+ key = "FILE" + varname + "_" + dfile + "_" + pkg
+ depends = d.getVar(key, True)
+ if depends:
+ deps.append(depends)
+ return " ".join(deps)
+
+ def append_description(spec_preamble, text):
+ """
+ Add the description to the spec file.
+ """
+ import textwrap
+ dedent_text = textwrap.dedent(text).strip()
+ # Bitbake saves "\n" as "\\n"
+ if '\\n' in dedent_text:
+ for t in dedent_text.split('\\n'):
+ spec_preamble.append(t.strip())
+ else:
+ spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages or packages == '':
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ pkgdest = d.getVar('PKGDEST', True)
+ if not pkgdest:
+ bb.fatal("No PKGDEST")
+
+ outspecfile = d.getVar('OUTSPECFILE', True)
+ if not outspecfile:
+ bb.fatal("No OUTSPECFILE")
+
+ # Construct the SPEC file...
+ srcname = strip_multilib(d.getVar('PN', True), d)
+ srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
+ srcversion = d.getVar('PKGV', True).replace('-', '+')
+ srcrelease = d.getVar('PKGR', True)
+ srcepoch = (d.getVar('PKGE', True) or "")
+ srclicense = d.getVar('LICENSE', True)
+ srcsection = d.getVar('SECTION', True)
+ srcmaintainer = d.getVar('MAINTAINER', True)
+ srchomepage = d.getVar('HOMEPAGE', True)
+ srcdescription = d.getVar('DESCRIPTION', True) or "."
+ srccustomtagschunk = get_package_additional_metadata("rpm", d)
+
+ srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d)
+ srcrdepends = []
+ srcrrecommends = []
+ srcrsuggests = []
+ srcrprovides = []
+ srcrreplaces = []
+ srcrconflicts = []
+ srcrobsoletes = []
+
+ srcrpreinst = []
+ srcrpostinst = []
+ srcrprerm = []
+ srcrpostrm = []
+
+ spec_preamble_top = []
+ spec_preamble_bottom = []
+
+ spec_scriptlets_top = []
+ spec_scriptlets_bottom = []
+
+ spec_files_top = []
+ spec_files_bottom = []
+
+ perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
+ extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA", True) or "0") == "1"
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+
+ root = "%s/%s" % (pkgdest, pkg)
+
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ if not pkgname:
+ pkgname = pkg
+ localdata.setVar('PKG', pkgname)
+
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ bb.data.update_data(localdata)
+
+ conffiles = get_conffiles(pkg, d)
+ dirfiles = localdata.getVar('DIRFILES', True)
+ if dirfiles is not None:
+ dirfiles = dirfiles.split()
+
+ splitname = strip_multilib(pkgname, d)
+
+ splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
+ splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
+ splitrelease = (localdata.getVar('PKGR', True) or "")
+ splitepoch = (localdata.getVar('PKGE', True) or "")
+ splitlicense = (localdata.getVar('LICENSE', True) or "")
+ splitsection = (localdata.getVar('SECTION', True) or "")
+ splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
+ splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
+
+ translate_vers('RDEPENDS', localdata)
+ translate_vers('RRECOMMENDS', localdata)
+ translate_vers('RSUGGESTS', localdata)
+ translate_vers('RPROVIDES', localdata)
+ translate_vers('RREPLACES', localdata)
+ translate_vers('RCONFLICTS', localdata)
+
+ # Map the dependencies into their final form
+ mapping_rename_hook(localdata)
+
+ splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d)
+ splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d)
+ splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d)
+ splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d)
+ splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d)
+ splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d)
+ splitrobsoletes = []
+
+ splitrpreinst = localdata.getVar('pkg_preinst', True)
+ splitrpostinst = localdata.getVar('pkg_postinst', True)
+ splitrprerm = localdata.getVar('pkg_prerm', True)
+ splitrpostrm = localdata.getVar('pkg_postrm', True)
+
+
+ if not perfiledeps:
+ # Add in summary of per file dependencies
+ splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
+ splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
+
+ # Gather special src/first package data
+ if srcname == splitname:
+ srcrdepends = splitrdepends
+ srcrrecommends = splitrrecommends
+ srcrsuggests = splitrsuggests
+ srcrprovides = splitrprovides
+ srcrreplaces = splitrreplaces
+ srcrconflicts = splitrconflicts
+
+ srcrpreinst = splitrpreinst
+ srcrpostinst = splitrpostinst
+ srcrprerm = splitrprerm
+ srcrpostrm = splitrpostrm
+
+ file_list = []
+ walk_files(root, file_list, conffiles, dirfiles)
+ if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
+ bb.note("Not creating empty RPM package for %s" % splitname)
+ else:
+ bb.note("Creating RPM package for %s" % splitname)
+ spec_files_top.append('%files')
+ if extra_pkgdata:
+ package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
+ spec_files_top.append('%defattr(-,-,-,-)')
+ if file_list:
+ bb.note("Creating RPM package for %s" % splitname)
+ spec_files_top.extend(file_list)
+ else:
+ bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ spec_files_top.append('')
+ continue
+
+ # Process subpackage data
+ spec_preamble_bottom.append('%%package -n %s' % splitname)
+ spec_preamble_bottom.append('Summary: %s' % splitsummary)
+ if srcversion != splitversion:
+ spec_preamble_bottom.append('Version: %s' % splitversion)
+ if srcrelease != splitrelease:
+ spec_preamble_bottom.append('Release: %s' % splitrelease)
+ if srcepoch != splitepoch:
+ spec_preamble_bottom.append('Epoch: %s' % splitepoch)
+ if srclicense != splitlicense:
+ spec_preamble_bottom.append('License: %s' % splitlicense)
+ spec_preamble_bottom.append('Group: %s' % splitsection)
+
+ if srccustomtagschunk != splitcustomtagschunk:
+ spec_preamble_bottom.append(splitcustomtagschunk)
+
+ # Replaces == Obsoletes && Provides
+ robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
+ rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
+ rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
+ for dep in rreplaces:
+ if not dep in robsoletes:
+ robsoletes[dep] = rreplaces[dep]
+ if not dep in rprovides:
+ rprovides[dep] = rreplaces[dep]
+ splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
+ splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
+
+ print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
+ if splitrpreinst:
+ print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
+ if splitrpostinst:
+ print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
+ if splitrprerm:
+ print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
+ if splitrpostrm:
+ print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
+
+ # Suggests in RPM are like recommends in OE-core!
+ print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
+ # While there is no analog for suggests... (So call them recommends for now)
+ print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
+ print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
+ print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
+
+ # conflicts can not be in a provide! We will need to filter it.
+ if splitrconflicts:
+ depends_dict = bb.utils.explode_dep_versions2(splitrconflicts)
+ newdeps_dict = {}
+ for dep in depends_dict:
+ if dep not in splitrprovides:
+ newdeps_dict[dep] = depends_dict[dep]
+ if newdeps_dict:
+ splitrconflicts = bb.utils.join_deps(newdeps_dict)
+ else:
+ splitrconflicts = ""
+
+ print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
+
+ spec_preamble_bottom.append('')
+
+ spec_preamble_bottom.append('%%description -n %s' % splitname)
+ append_description(spec_preamble_bottom, splitdescription)
+
+ spec_preamble_bottom.append('')
+
+ # Now process scriptlets
+ if splitrpreinst:
+ spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
+ spec_scriptlets_bottom.append('# %s - preinst' % splitname)
+ spec_scriptlets_bottom.append(splitrpreinst)
+ spec_scriptlets_bottom.append('')
+ if splitrpostinst:
+ spec_scriptlets_bottom.append('%%post -n %s' % splitname)
+ spec_scriptlets_bottom.append('# %s - postinst' % splitname)
+ spec_scriptlets_bottom.append(splitrpostinst)
+ spec_scriptlets_bottom.append('')
+ if splitrprerm:
+ spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
+ spec_scriptlets_bottom.append('# %s - prerm' % splitname)
+ scriptvar = wrap_uninstall(splitrprerm)
+ spec_scriptlets_bottom.append(scriptvar)
+ spec_scriptlets_bottom.append('')
+ if splitrpostrm:
+ spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
+ spec_scriptlets_bottom.append('# %s - postrm' % splitname)
+ scriptvar = wrap_uninstall(splitrpostrm)
+ spec_scriptlets_bottom.append(scriptvar)
+ spec_scriptlets_bottom.append('')
+
+ # Now process files
+ file_list = []
+ walk_files(root, file_list, conffiles, dirfiles)
+ if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
+ bb.note("Not creating empty RPM package for %s" % splitname)
+ else:
+ spec_files_bottom.append('%%files -n %s' % splitname)
+ if extra_pkgdata:
+ package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
+ spec_files_bottom.append('%defattr(-,-,-,-)')
+ if file_list:
+ bb.note("Creating RPM package for %s" % splitname)
+ spec_files_bottom.extend(file_list)
+ else:
+ bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ spec_files_bottom.append('')
+
+ del localdata
+
+ add_prep(d,spec_files_bottom)
+ spec_preamble_top.append('Summary: %s' % srcsummary)
+ spec_preamble_top.append('Name: %s' % srcname)
+ spec_preamble_top.append('Version: %s' % srcversion)
+ spec_preamble_top.append('Release: %s' % srcrelease)
+ if srcepoch and srcepoch.strip() != "":
+ spec_preamble_top.append('Epoch: %s' % srcepoch)
+ spec_preamble_top.append('License: %s' % srclicense)
+ spec_preamble_top.append('Group: %s' % srcsection)
+ spec_preamble_top.append('Packager: %s' % srcmaintainer)
+ if srchomepage:
+ spec_preamble_top.append('URL: %s' % srchomepage)
+ if srccustomtagschunk:
+ spec_preamble_top.append(srccustomtagschunk)
+ tail_source(d)
+
+ # Replaces == Obsoletes && Provides
+ robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
+ rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
+ rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
+ for dep in rreplaces:
+ if not dep in robsoletes:
+ robsoletes[dep] = rreplaces[dep]
+ if not dep in rprovides:
+ rprovides[dep] = rreplaces[dep]
+ srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
+ srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
+
+ print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
+ print_deps(srcrdepends, "Requires", spec_preamble_top, d)
+ if srcrpreinst:
+ print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
+ if srcrpostinst:
+ print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
+ if srcrprerm:
+ print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
+ if srcrpostrm:
+ print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
+
+ # Suggests in RPM are like recommends in OE-core!
+ print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
+ # While there is no analog for suggests... (So call them recommends for now)
+ print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
+ print_deps(srcrprovides, "Provides", spec_preamble_top, d)
+ print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
+
+ # conflicts can not be in a provide! We will need to filter it.
+ if srcrconflicts:
+ depends_dict = bb.utils.explode_dep_versions2(srcrconflicts)
+ newdeps_dict = {}
+ for dep in depends_dict:
+ if dep not in srcrprovides:
+ newdeps_dict[dep] = depends_dict[dep]
+ if newdeps_dict:
+ srcrconflicts = bb.utils.join_deps(newdeps_dict)
+ else:
+ srcrconflicts = ""
+
+ print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
+
+ spec_preamble_top.append('')
+
+ spec_preamble_top.append('%description')
+ append_description(spec_preamble_top, srcdescription)
+
+ spec_preamble_top.append('')
+
+ if srcrpreinst:
+ spec_scriptlets_top.append('%pre')
+ spec_scriptlets_top.append('# %s - preinst' % srcname)
+ spec_scriptlets_top.append(srcrpreinst)
+ spec_scriptlets_top.append('')
+ if srcrpostinst:
+ spec_scriptlets_top.append('%post')
+ spec_scriptlets_top.append('# %s - postinst' % srcname)
+ spec_scriptlets_top.append(srcrpostinst)
+ spec_scriptlets_top.append('')
+ if srcrprerm:
+ spec_scriptlets_top.append('%preun')
+ spec_scriptlets_top.append('# %s - prerm' % srcname)
+ scriptvar = wrap_uninstall(srcrprerm)
+ spec_scriptlets_top.append(scriptvar)
+ spec_scriptlets_top.append('')
+ if srcrpostrm:
+ spec_scriptlets_top.append('%postun')
+ spec_scriptlets_top.append('# %s - postrm' % srcname)
+ scriptvar = wrap_uninstall(srcrpostrm)
+ spec_scriptlets_top.append(scriptvar)
+ spec_scriptlets_top.append('')
+
+ # Write the SPEC file
+ try:
+ specfile = open(outspecfile, 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+ # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
+ # of the generated spec file
+ external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
+ if external_preamble:
+ specfile.write(external_preamble + "\n")
+
+ for line in spec_preamble_top:
+ specfile.write(line + "\n")
+
+ for line in spec_preamble_bottom:
+ specfile.write(line + "\n")
+
+ for line in spec_scriptlets_top:
+ specfile.write(line + "\n")
+
+ for line in spec_scriptlets_bottom:
+ specfile.write(line + "\n")
+
+ for line in spec_files_top:
+ specfile.write(line + "\n")
+
+ for line in spec_files_bottom:
+ specfile.write(line + "\n")
+
+ specfile.close()
+}
+# Otherwise allarch packages may change depending on override configuration
+write_specfile[vardepsexclude] = "OVERRIDES"
+
+python do_package_rpm () {
+ # We need a simple way to remove the MLPREFIX from the package name,
+ # and dependency information...
+ def strip_multilib(name, d):
+ ml = d.getVar("MLPREFIX", True)
+ if ml and name and len(ml) != 0 and name.find(ml) >= 0:
+ return "".join(name.split(ml))
+ return name
+
+ workdir = d.getVar('WORKDIR', True)
+ tmpdir = d.getVar('TMPDIR', True)
+ pkgd = d.getVar('PKGD', True)
+ pkgdest = d.getVar('PKGDEST', True)
+ if not workdir or not pkgd or not tmpdir:
+ bb.error("Variables incorrectly set, unable to package")
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages or packages == '':
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ # Construct the spec file...
+ # If the spec file already exist, and has not been stored into
+ # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
+ # so remove it before doing rpmbuild src.rpm.
+ srcname = strip_multilib(d.getVar('PN', True), d)
+ outspecfile = workdir + "/" + srcname + ".spec"
+ if os.path.isfile(outspecfile):
+ os.remove(outspecfile)
+ d.setVar('OUTSPECFILE', outspecfile)
+ bb.build.exec_func('write_specfile', d)
+
+ perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
+ if perfiledeps:
+ outdepends, outprovides = write_rpm_perfiledata(srcname, d)
+
+ # Setup the rpmbuild arguments...
+ rpmbuild = d.getVar('RPMBUILD', True)
+ targetsys = d.getVar('TARGET_SYS', True)
+ targetvendor = d.getVar('HOST_VENDOR', True)
+ package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
+ sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX', True) or "nativesdk").replace("-", "_")
+ if package_arch not in "all any noarch".split() and not package_arch.endswith(sdkpkgsuffix):
+ ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
+ d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
+ else:
+ d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
+ pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
+ d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
+ bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR', True))
+ pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}')
+ magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
+ bb.utils.mkdirhier(pkgwritedir)
+ os.chmod(pkgwritedir, 0755)
+
+ cmd = rpmbuild
+ cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
+ cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
+ cmd = cmd + " --define '_builddir " + d.getVar('S', True) + "'"
+ cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
+ cmd = cmd + " --define '_use_internal_dependency_generator 0'"
+ if perfiledeps:
+ cmd = cmd + " --define '__find_requires " + outdepends + "'"
+ cmd = cmd + " --define '__find_provides " + outprovides + "'"
+ else:
+ cmd = cmd + " --define '__find_requires %{nil}'"
+ cmd = cmd + " --define '__find_provides %{nil}'"
+ cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
+ cmd = cmd + " --define 'debug_package %{nil}'"
+ cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
+ cmd = cmd + " --define '_tmppath " + workdir + "'"
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
+ cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
+ cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
+ cmdsrpm = cmdsrpm + " -bs " + outspecfile
+ # Build the .src.rpm
+ d.setVar('SBUILDSPEC', cmdsrpm + "\n")
+ d.setVarFlag('SBUILDSPEC', 'func', '1')
+ bb.build.exec_func('SBUILDSPEC', d)
+ cmd = cmd + " -bb " + outspecfile
+
+ # Build the rpm package!
+ d.setVar('BUILDSPEC', cmd + "\n")
+ d.setVarFlag('BUILDSPEC', 'func', '1')
+ bb.build.exec_func('BUILDSPEC', d)
+
+ if d.getVar('RPM_SIGN_PACKAGES', True) == '1':
+ bb.build.exec_func("sign_rpm", d)
+}
+
+python () {
+ if d.getVar('PACKAGES', True) != '':
+ deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_rpm', 'depends', deps)
+ d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
+}
+
+SSTATETASKS += "do_package_write_rpm"
+do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
+do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
+# Take a shared lock, we can write multiple packages at the same time...
+# but we need to stop the rootfs/solver from running while we do...
+do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
+
+python do_package_write_rpm_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_write_rpm_setscene
+
+python do_package_write_rpm () {
+ bb.build.exec_func("read_subpackage_metadata", d)
+ bb.build.exec_func("do_package_rpm", d)
+}
+
+do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
+do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
+do_package_write_rpm[umask] = "022"
+addtask package_write_rpm after do_packagedata do_package
+
+PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
+
+do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/import-layers/yocto-poky/meta/classes/package_tar.bbclass b/import-layers/yocto-poky/meta/classes/package_tar.bbclass
new file mode 100644
index 000000000..854e64528
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/package_tar.bbclass
@@ -0,0 +1,69 @@
+inherit package
+
+IMAGE_PKGTYPE ?= "tar"
+
+python do_package_tar () {
+ import subprocess
+ workdir = d.getVar('WORKDIR', True)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ outdir = d.getVar('DEPLOY_DIR_TAR', True)
+ if not outdir:
+ bb.error("DEPLOY_DIR_TAR not defined, unable to package")
+ return
+
+ dvar = d.getVar('D', True)
+ if not dvar:
+ bb.error("D not defined, unable to package")
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages:
+ bb.debug(1, "PACKAGES not defined, nothing to package")
+ return
+
+ pkgdest = d.getVar('PKGDEST', True)
+
+ bb.utils.mkdirhier(outdir)
+ bb.utils.mkdirhier(dvar)
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
+
+ overrides = localdata.getVar('OVERRIDES', False)
+ localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
+ bb.data.update_data(localdata)
+
+ bb.utils.mkdirhier(root)
+ basedir = os.path.dirname(root)
+ tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
+ os.chdir(root)
+ dlist = os.listdir(root)
+ if not dlist:
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ continue
+ args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
+ ret = subprocess.call(args + [tarfn] + dlist)
+ if ret != 0:
+ bb.error("Creation of tar %s failed." % tarfn)
+}
+
+python () {
+ if d.getVar('PACKAGES', True) != '':
+ deps = (d.getVarFlag('do_package_write_tar', 'depends', True) or "").split()
+ deps.append('tar-native:do_populate_sysroot')
+ deps.append('virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
+ d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
+}
+
+
+python do_package_write_tar () {
+ bb.build.exec_func("read_subpackage_metadata", d)
+ bb.build.exec_func("do_package_tar", d)
+}
+do_package_write_tar[dirs] = "${D}"
+addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/import-layers/yocto-poky/meta/classes/packagedata.bbclass b/import-layers/yocto-poky/meta/classes/packagedata.bbclass
new file mode 100644
index 000000000..3397f1e36
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/packagedata.bbclass
@@ -0,0 +1,34 @@
+python read_subpackage_metadata () {
+ import oe.packagedata
+
+ vars = {
+ "PN" : d.getVar('PN', True),
+ "PE" : d.getVar('PE', True),
+ "PV" : d.getVar('PV', True),
+ "PR" : d.getVar('PR', True),
+ }
+
+ data = oe.packagedata.read_pkgdata(vars["PN"], d)
+
+ for key in data.keys():
+ d.setVar(key, data[key])
+
+ for pkg in d.getVar('PACKAGES', True).split():
+ sdata = oe.packagedata.read_subpkgdata(pkg, d)
+ for key in sdata.keys():
+ if key in vars:
+ if sdata[key] != vars[key]:
+ if key == "PN":
+ bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
+ bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
+ continue
+ #
+ # If we set unsuffixed variables here there is a chance they could clobber override versions
+ # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION_<pkgname>
+ # We therefore don't clobber for the unsuffixed variable versions
+ #
+ if key.endswith("_" + pkg):
+ d.setVar(key, sdata[key])
+ else:
+ d.setVar(key, sdata[key], parsing=True)
+}
diff --git a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
new file mode 100644
index 000000000..38bdbd382
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
@@ -0,0 +1,54 @@
+# Class for packagegroup (package group) recipes
+
+# By default, only the packagegroup package itself is in PACKAGES.
+# -dbg and -dev flavours are handled by the anonfunc below.
+# This means that packagegroup recipes used to build multiple packagegroup
+# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
+PACKAGES = "${PN}"
+
+# By default, packagegroup packages do not depend on a certain architecture.
+# Only if dependencies are modified by MACHINE_FEATURES, packages
+# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
+PACKAGE_ARCH ?= "all"
+
+# Fully expanded - so it applies the overrides as well
+PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
+
+inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')}
+
+# This automatically adds -dbg and -dev flavours of all PACKAGES
+# to the list. Their dependencies (RRECOMMENDS) are handled as usual
+# by package_depchains in a following step.
+# Also mark all packages as ALLOW_EMPTY
+python () {
+ packages = d.getVar('PACKAGES', True).split()
+ if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
+ types = ['', '-dbg', '-dev']
+ if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
+ types.append('-ptest')
+ packages = [pkg + suffix for pkg in packages
+ for suffix in types]
+ d.setVar('PACKAGES', ' '.join(packages))
+ for pkg in packages:
+ d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
+}
+
+# We don't want to look at shared library dependencies for the
+# dbg packages
+DEPCHAIN_DBGDEFAULTDEPS = "1"
+
+# We only need the packaging tasks - disable the rest
+do_fetch[noexec] = "1"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_configure[noexec] = "1"
+do_compile[noexec] = "1"
+do_install[noexec] = "1"
+do_populate_sysroot[noexec] = "1"
+
+python () {
+ initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True)
+ if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
+ bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/patch.bbclass b/import-layers/yocto-poky/meta/classes/patch.bbclass
new file mode 100644
index 000000000..3d22ad838
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/patch.bbclass
@@ -0,0 +1,187 @@
+# Copyright (C) 2006 OpenedHand LTD
+
+# Point to an empty file so any user's custom settings don't break things
+QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
+
+PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
+
+inherit terminal
+
+def src_patches(d, all = False ):
+ workdir = d.getVar('WORKDIR', True)
+ fetch = bb.fetch2.Fetch([], d)
+ patches = []
+ sources = []
+ for url in fetch.urls:
+ local = patch_path(url, fetch, workdir)
+ if not local:
+ if all:
+ local = fetch.localpath(url)
+ sources.append(local)
+ continue
+
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ patchname = parm.get('pname') or os.path.basename(local)
+
+ apply, reason = should_apply(parm, d)
+ if not apply:
+ if reason:
+ bb.note("Patch %s %s" % (patchname, reason))
+ continue
+
+ patchparm = {'patchname': patchname}
+ if "striplevel" in parm:
+ striplevel = parm["striplevel"]
+ elif "pnum" in parm:
+ #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+ striplevel = parm["pnum"]
+ else:
+ striplevel = '1'
+ patchparm['striplevel'] = striplevel
+
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ patchparm['patchdir'] = patchdir
+
+ localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
+ patches.append(localurl)
+
+ if all:
+ return sources
+
+ return patches
+
+def patch_path(url, fetch, workdir):
+ """Return the local path of a patch, or None if this isn't a patch"""
+
+ local = fetch.localpath(url)
+ base, ext = os.path.splitext(os.path.basename(local))
+ if ext in ('.gz', '.bz2', '.Z'):
+ local = os.path.join(workdir, base)
+ ext = os.path.splitext(base)[1]
+
+ urldata = fetch.ud[url]
+ if "apply" in urldata.parm:
+ apply = oe.types.boolean(urldata.parm["apply"])
+ if not apply:
+ return
+ elif ext not in (".diff", ".patch"):
+ return
+
+ return local
+
+def should_apply(parm, d):
+ """Determine if we should apply the given patch"""
+
+ if "mindate" in parm or "maxdate" in parm:
+ pn = d.getVar('PN', True)
+ srcdate = d.getVar('SRCDATE_%s' % pn, True)
+ if not srcdate:
+ srcdate = d.getVar('SRCDATE', True)
+
+ if srcdate == "now":
+ srcdate = d.getVar('DATE', True)
+
+ if "maxdate" in parm and parm["maxdate"] < srcdate:
+ return False, 'is outdated'
+
+ if "mindate" in parm and parm["mindate"] > srcdate:
+ return False, 'is predated'
+
+
+ if "minrev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and srcrev < parm["minrev"]:
+ return False, 'applies to later revisions'
+
+ if "maxrev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and srcrev > parm["maxrev"]:
+ return False, 'applies to earlier revisions'
+
+ if "rev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and parm["rev"] not in srcrev:
+ return False, "doesn't apply to revision"
+
+ if "notrev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and parm["notrev"] in srcrev:
+ return False, "doesn't apply to revision"
+
+ return True, None
+
+should_apply[vardepsexclude] = "DATE SRCDATE"
+
+python patch_do_patch() {
+ import oe.patch
+
+ patchsetmap = {
+ "patch": oe.patch.PatchTree,
+ "quilt": oe.patch.QuiltTree,
+ "git": oe.patch.GitApplyTree,
+ }
+
+ cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
+
+ resolvermap = {
+ "noop": oe.patch.NOOPResolver,
+ "user": oe.patch.UserResolver,
+ }
+
+ rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
+
+ classes = {}
+
+ s = d.getVar('S', True)
+
+ os.putenv('PATH', d.getVar('PATH', True))
+
+ # We must use one TMPDIR per process so that the "patch" processes
+ # don't generate the same temp file name.
+
+ import tempfile
+ process_tmpdir = tempfile.mkdtemp()
+ os.environ['TMPDIR'] = process_tmpdir
+
+ for patch in src_patches(d):
+ _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
+
+ if "patchdir" in parm:
+ patchdir = parm["patchdir"]
+ if not os.path.isabs(patchdir):
+ patchdir = os.path.join(s, patchdir)
+ else:
+ patchdir = s
+
+ if not patchdir in classes:
+ patchset = cls(patchdir, d)
+ resolver = rcls(patchset, oe_terminal)
+ classes[patchdir] = (patchset, resolver)
+ patchset.Clean()
+ else:
+ patchset, resolver = classes[patchdir]
+
+ bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
+ try:
+ patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
+ except Exception as exc:
+ bb.utils.remove(process_tmpdir, True)
+ bb.fatal(str(exc))
+ try:
+ resolver.Resolve()
+ except bb.BBHandledException as e:
+ bb.utils.remove(process_tmpdir, True)
+ bb.fatal(str(e))
+
+ bb.utils.remove(process_tmpdir, True)
+ del os.environ['TMPDIR']
+}
+patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
+
+addtask patch after do_unpack
+do_patch[dirs] = "${WORKDIR}"
+do_patch[depends] = "${PATCHDEPENDENCY}"
+
+EXPORT_FUNCTIONS do_patch
diff --git a/import-layers/yocto-poky/meta/classes/perlnative.bbclass b/import-layers/yocto-poky/meta/classes/perlnative.bbclass
new file mode 100644
index 000000000..cc8de8b38
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/perlnative.bbclass
@@ -0,0 +1,3 @@
+EXTRANATIVEPATH += "perl-native"
+DEPENDS += "perl-native"
+OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass b/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
new file mode 100644
index 000000000..dbe11e12d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
@@ -0,0 +1,67 @@
+#
+# This class will generate the proper postinst/postrm scriptlets for pixbuf
+# packages.
+#
+
+DEPENDS += "qemu-native"
+inherit qemu
+
+PIXBUF_PACKAGES ??= "${PN}"
+
+pixbufcache_common() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
+ bindir=${bindir} base_libdir=${base_libdir}
+else
+
+ # Update the pixbuf loaders in case they haven't been registered yet
+ ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
+
+ if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
+ for icondir in /usr/share/icons/*; do
+ if [ -d ${icondir} ]; then
+ gtk-update-icon-cache -t -q ${icondir}
+ fi
+ done
+ fi
+fi
+}
+
+python populate_packages_append() {
+ pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split()
+
+ for pkg in pixbuf_pkgs:
+ bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('pixbufcache_common', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('pixbufcache_common', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
+
+gdkpixbuf_complete() {
+ GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
+}
+
+#
+# Add an sstate postinst hook to update the cache for native packages.
+# An error exit during populate_sysroot_setscene allows bitbake to
+# try to recover by re-building the package.
+#
+SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
+
+# See base.bbclass for the other half of this
+pixbufcache_sstate_postinst() {
+ if [ "${BB_CURRENTTASK}" = "populate_sysroot" ]; then
+ ${gdkpixbuf_complete}
+ elif [ "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]; then
+ echo "${gdkpixbuf_complete}" >> ${STAGING_DIR}/sstatecompletions
+ fi
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/pkgconfig.bbclass b/import-layers/yocto-poky/meta/classes/pkgconfig.bbclass
new file mode 100644
index 000000000..ad1f84f50
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/pkgconfig.bbclass
@@ -0,0 +1,2 @@
+DEPENDS_prepend = "pkgconfig-native "
+
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk.bbclass
new file mode 100644
index 000000000..f64a911b7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk.bbclass
@@ -0,0 +1,7 @@
+# The majority of populate_sdk is located in populate_sdk_base
+# This chunk simply facilitates compatibility with SDK only recipes.
+
+inherit populate_sdk_base
+
+addtask populate_sdk after do_install before do_build
+
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
new file mode 100644
index 000000000..008bb577c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -0,0 +1,267 @@
+inherit meta
+
+# Wildcards specifying complementary packages to install for every package that has been explicitly
+# installed into the rootfs
+COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
+COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
+COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
+COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
+COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
+
+def complementary_globs(featurevar, d):
+ all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
+ globs = []
+ features = set((d.getVar(featurevar, True) or '').split())
+ for name, glob in all_globs.items():
+ if name in features:
+ globs.append(glob)
+ return ' '.join(globs)
+
+SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
+SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
+
+inherit rootfs_${IMAGE_PKGTYPE}
+
+SDK_DIR = "${WORKDIR}/sdk"
+SDK_OUTPUT = "${SDK_DIR}/image"
+SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
+
+B_task-populate-sdk = "${SDK_DIR}"
+
+SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
+
+TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
+TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
+TOOLCHAIN_TARGET_TASK ?= " \
+ ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
+ ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target-dbg')} \
+ "
+TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
+TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
+
+SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
+SDK_DEPENDS = "virtual/fakeroot-native pixz-native"
+
+# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
+# could be set to the MACHINE_ARCH
+REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+PID = "${@os.getpid()}"
+
+EXCLUDE_FROM_WORLD = "1"
+
+SDK_PACKAGING_FUNC ?= "create_shar"
+SDK_PRE_INSTALL_COMMAND ?= ""
+SDK_POST_INSTALL_COMMAND ?= ""
+SDK_RELOCATE_AFTER_INSTALL ?= "1"
+
+SDKEXTPATH ?= "~/${@d.getVar('DISTRO', True)}_sdk"
+SDK_TITLE ?= "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} SDK"
+
+SDK_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
+SDK_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
+python write_target_sdk_manifest () {
+ from oe.sdk import sdk_list_installed_packages
+ from oe.utils import format_pkg_list
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST", True))
+ pkgs = sdk_list_installed_packages(d, True)
+ if not os.path.exists(sdkmanifestdir):
+ bb.utils.mkdirhier(sdkmanifestdir)
+ with open(d.getVar('SDK_TARGET_MANIFEST', True), 'w') as output:
+ output.write(format_pkg_list(pkgs, 'ver'))
+}
+
+python write_host_sdk_manifest () {
+ from oe.sdk import sdk_list_installed_packages
+ from oe.utils import format_pkg_list
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST", True))
+ pkgs = sdk_list_installed_packages(d, False)
+ if not os.path.exists(sdkmanifestdir):
+ bb.utils.mkdirhier(sdkmanifestdir)
+ with open(d.getVar('SDK_HOST_MANIFEST', True), 'w') as output:
+ output.write(format_pkg_list(pkgs, 'ver'))
+}
+
+POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
+POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; "
+SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
+SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
+
+# Some archs override this, we need the nativesdk version
+# turns out this is hard to get from the datastore due to TRANSLATED_TARGET_ARCH
+# manipulation.
+SDK_OLDEST_KERNEL = "2.6.32"
+
+fakeroot python do_populate_sdk() {
+ from oe.sdk import populate_sdk
+ from oe.manifest import create_manifest, Manifest
+
+ pn = d.getVar('PN', True)
+ runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
+ runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
+
+ ld = bb.data.createCopy(d)
+ ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
+ runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
+ runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
+ d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK", True))
+ d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", True))
+
+ # create target/host SDK manifests
+ create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
+ manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
+ create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
+ manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
+
+ populate_sdk(d)
+}
+
+fakeroot create_sdk_files() {
+ cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
+
+ # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
+ # Escape special characters like '+' and '.' in the SDKPATH
+ escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
+ sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
+}
+
+python check_sdk_sysroots() {
+ # Fails build if there are broken or dangling symlinks in SDK sysroots
+
+ if d.getVar('CHECK_SDK_SYSROOTS', True) != '1':
+ # disabled, bail out
+ return
+
+ def norm_path(path):
+ return os.path.abspath(path)
+
+ # Get scan root
+ SCAN_ROOT = norm_path("${SDK_OUTPUT}/${SDKPATH}/sysroots/")
+
+ bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
+
+ def check_symlink(linkPath):
+ if not os.path.islink(linkPath):
+ return
+
+ linkDirPath = os.path.dirname(linkPath)
+
+ targetPath = os.readlink(linkPath)
+ if not os.path.isabs(targetPath):
+ targetPath = os.path.join(linkDirPath, targetPath)
+ targetPath = norm_path(targetPath)
+
+ if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ):
+ bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath))
+ return
+
+ if not os.path.exists(targetPath):
+ bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath))
+ return
+
+ if os.path.isdir(targetPath):
+ dir_walk(targetPath)
+
+ def walk_error_handler(e):
+ bb.error(str(e))
+
+ def dir_walk(rootDir):
+ for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler):
+ entries = subDirEntries + fileEntries
+ for e in entries:
+ ePath = os.path.join(dirPath, e)
+ check_symlink(ePath)
+
+ # start
+ dir_walk(SCAN_ROOT)
+}
+
+SDKTAROPTS = "--owner=root --group=root"
+
+fakeroot tar_sdk() {
+ # Package it up
+ mkdir -p ${SDK_DEPLOY}
+ cd ${SDK_OUTPUT}/${SDKPATH}
+ tar ${SDKTAROPTS} -cf - . | pixz > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+}
+
+fakeroot create_shar() {
+ # copy in the template shar extractor script
+ cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+
+ rm -f ${T}/pre_install_command ${T}/post_install_command
+
+ if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
+ cp ${COREBASE}/meta/files/toolchain-shar-relocate.sh ${T}/post_install_command
+ fi
+ cat << "EOF" >> ${T}/pre_install_command
+${SDK_PRE_INSTALL_COMMAND}
+EOF
+
+ cat << "EOF" >> ${T}/post_install_command
+${SDK_POST_INSTALL_COMMAND}
+EOF
+ sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \
+ -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \
+ ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+
+ # substitute variables
+ sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
+ -e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
+ -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
+ -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
+ -e 's#@SDK_TITLE@#${SDK_TITLE}#g' \
+ -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
+ -e '/@SDK_PRE_INSTALL_COMMAND@/d' \
+ -e '/@SDK_POST_INSTALL_COMMAND@/d' \
+ ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+
+ # add execution permission
+ chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+
+ # append the SDK tarball
+ cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+
+ # delete the old tarball, we don't need it anymore
+ rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+}
+
+populate_sdk_log_check() {
+ for target in $*
+ do
+ lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
+
+ echo "log_check: Using $lf_path as logfile"
+
+ if test -e "$lf_path"
+ then
+ ${IMAGE_PKGTYPE}_log_check $target $lf_path
+ else
+ echo "Cannot find logfile [$lf_path]"
+ fi
+ echo "Logfile is clean"
+ done
+}
+
+def sdk_command_variables(d):
+ return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS']
+
+def sdk_variables(d):
+ variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
+ 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
+ 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI']
+ variables.extend(sdk_command_variables(d))
+ return " ".join(variables)
+
+do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
+
+do_populate_sdk[file-checksums] += "${COREBASE}/meta/files/toolchain-shar-relocate.sh:True \
+ ${COREBASE}/meta/files/toolchain-shar-extract.sh:True"
+
+do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
+do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
+do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
+do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
+addtask populate_sdk
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
new file mode 100644
index 000000000..87518d176
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -0,0 +1,487 @@
+# Extensible SDK
+
+inherit populate_sdk_base
+
+# NOTE: normally you cannot use task overrides for this kind of thing - this
+# only works because of get_sdk_ext_rdepends()
+
+TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
+ meta-environment-extsdk-${MACHINE} \
+ "
+
+TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
+
+SDK_RDEPENDS_append_task-populate-sdk-ext = " ${SDK_TARGETS}"
+
+SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
+
+SDK_EXT = ""
+SDK_EXT_task-populate-sdk-ext = "-ext"
+
+# Options are full or minimal
+SDK_EXT_TYPE ?= "full"
+
+SDK_RECRDEP_TASKS ?= ""
+
+SDK_LOCAL_CONF_WHITELIST ?= ""
+SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
+ BB_NUMBER_THREADS \
+ PARALLEL_MAKE \
+ PRSERV_HOST \
+ SSTATE_MIRRORS \
+ "
+SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
+SDK_UPDATE_URL ?= ""
+
+SDK_TARGETS ?= "${PN}"
+
+def get_sdk_install_targets(d):
+ sdk_install_targets = ''
+ if d.getVar('SDK_EXT_TYPE', True) != 'minimal':
+ sdk_install_targets = d.getVar('SDK_TARGETS', True)
+
+ depd = d.getVar('BB_TASKDEPDATA', False)
+ for v in depd.itervalues():
+ if v[1] == 'do_image_complete':
+ if v[0] not in sdk_install_targets:
+ sdk_install_targets += ' {}'.format(v[0])
+
+ if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+ sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
+
+ return sdk_install_targets
+
+get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA"
+
+OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
+
+# The files from COREBASE that you want preserved in the COREBASE copied
+# into the sdk. This allows someone to have their own setup scripts in
+# COREBASE be preserved as well as untracked files.
+COREBASE_FILES ?= " \
+ oe-init-build-env \
+ oe-init-build-env-memres \
+ scripts \
+ LICENSE \
+ .templateconf \
+"
+
+SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
+B_task-populate-sdk-ext = "${SDK_DIR}"
+TOOLCHAINEXT_OUTPUTNAME = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
+
+SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
+SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+
+SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} Extensible SDK"
+
+python copy_buildsystem () {
+ import re
+ import shutil
+ import glob
+ import oe.copy_buildsystem
+
+ oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT', True)
+
+ conf_bbpath = ''
+ conf_initpath = ''
+ core_meta_subdir = ''
+
+ # Copy in all metadata layers + bitbake (as repositories)
+ buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
+ baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
+
+ # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
+ derivative = (d.getVar('SDK_DERIVATIVE', True) or '') == '1'
+ if derivative:
+ workspace_name = 'orig-workspace'
+ else:
+ workspace_name = None
+ layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
+
+ sdkbblayers = []
+ corebase = os.path.basename(d.getVar('COREBASE', True))
+ for layer in layers_copied:
+ if corebase == os.path.basename(layer):
+ conf_bbpath = os.path.join('layers', layer, 'bitbake')
+ else:
+ sdkbblayers.append(layer)
+
+ for path in os.listdir(baseoutpath + '/layers'):
+ relpath = os.path.join('layers', path, oe_init_env_script)
+ if os.path.exists(os.path.join(baseoutpath, relpath)):
+ conf_initpath = relpath
+
+ relpath = os.path.join('layers', path, 'scripts', 'devtool')
+ if os.path.exists(os.path.join(baseoutpath, relpath)):
+ scriptrelpath = os.path.dirname(relpath)
+
+ relpath = os.path.join('layers', path, 'meta')
+ if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
+ core_meta_subdir = relpath
+
+ d.setVar('oe_init_build_env_path', conf_initpath)
+ d.setVar('scriptrelpath', scriptrelpath)
+
+ # Write out config file for devtool
+ import ConfigParser
+ config = ConfigParser.SafeConfigParser()
+ config.add_section('General')
+ config.set('General', 'bitbake_subdir', conf_bbpath)
+ config.set('General', 'init_path', conf_initpath)
+ config.set('General', 'core_meta_subdir', core_meta_subdir)
+ config.add_section('SDK')
+ config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS', True))
+ updateurl = d.getVar('SDK_UPDATE_URL', True)
+ if updateurl:
+ config.set('SDK', 'updateserver', updateurl)
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
+ with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
+ config.write(f)
+
+ unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
+ with open(unlockedsigs, 'w') as f:
+ pass
+
+ # Create a layer for new recipes / appends
+ bbpath = d.getVar('BBPATH', True)
+ bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
+
+ # Create bblayers.conf
+ bb.utils.mkdirhier(baseoutpath + '/conf')
+ with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
+ f.write('# WARNING: this configuration has been automatically generated and in\n')
+ f.write('# most cases should not be edited. If you need more flexibility than\n')
+ f.write('# this configuration provides, it is strongly suggested that you set\n')
+ f.write('# up a proper instance of the full build system and use that instead.\n\n')
+
+ # LCONF_VERSION may not be set, for example when using meta-poky
+ # so don't error if it isn't found
+ lconf_version = d.getVar('LCONF_VERSION', False)
+ if lconf_version is not None:
+ f.write('LCONF_VERSION = "%s"\n\n' % lconf_version)
+
+ f.write('BBPATH = "$' + '{TOPDIR}"\n')
+ f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
+ f.write('BBLAYERS := " \\\n')
+ for layerrelpath in sdkbblayers:
+ f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
+ f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
+ f.write(' "\n')
+
+ env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE', True) or '').split()
+ env_whitelist_values = {}
+
+ # Create local.conf
+ builddir = d.getVar('TOPDIR', True)
+ if derivative:
+ shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
+ else:
+ local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST', True) or '').split()
+ local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST', True) or '').split()
+ def handle_var(varname, origvalue, op, newlines):
+ if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
+ newlines.append('# Removed original setting of %s\n' % varname)
+ return None, op, 0, True
+ else:
+ if varname in env_whitelist:
+ env_whitelist_values[varname] = origvalue
+ return origvalue, op, 0, True
+ varlist = ['[^#=+ ]*']
+ with open(builddir + '/conf/local.conf', 'r') as f:
+ oldlines = f.readlines()
+ (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
+
+ with open(baseoutpath + '/conf/local.conf', 'w') as f:
+ f.write('# WARNING: this configuration has been automatically generated and in\n')
+ f.write('# most cases should not be edited. If you need more flexibility than\n')
+ f.write('# this configuration provides, it is strongly suggested that you set\n')
+ f.write('# up a proper instance of the full build system and use that instead.\n\n')
+ for line in newlines:
+ if line.strip() and not line.startswith('#'):
+ f.write(line)
+ # Write a newline just in case there's none at the end of the original
+ f.write('\n')
+
+ f.write('INHERIT += "%s"\n\n' % 'uninative')
+ f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
+
+ # Some classes are not suitable for SDK, remove them from INHERIT
+ f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
+
+ # Bypass the default connectivity check if any
+ f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
+
+ # This warning will come out if reverse dependencies for a task
+ # don't have sstate as well as the task itself. We already know
+ # this will be the case for the extensible sdk, so turn off the
+ # warning.
+ f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
+
+ # Error if the sigs in the locked-signature file don't match
+ # the sig computed from the metadata.
+ f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "error"\n\n')
+
+ # Hide the config information from bitbake output (since it's fixed within the SDK)
+ f.write('BUILDCFG_HEADER = ""\n')
+
+ # Allow additional config through sdk-extra.conf
+ fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
+ if fn:
+ with open(fn, 'r') as xf:
+ for line in xf:
+ f.write(line)
+
+ # If you define a sdk_extraconf() function then it can contain additional config
+ # (Though this is awkward; sdk-extra.conf should probably be used instead)
+ extraconf = (d.getVar('sdk_extraconf', True) or '').strip()
+ if extraconf:
+ # Strip off any leading / trailing spaces
+ for line in extraconf.splitlines():
+ f.write(line.strip() + '\n')
+
+ f.write('require conf/locked-sigs.inc\n')
+ f.write('require conf/unlocked-sigs.inc\n')
+
+ if os.path.exists(builddir + '/conf/auto.conf'):
+ if derivative:
+ shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
+ else:
+ with open(builddir + '/conf/auto.conf', 'r') as f:
+ oldlines = f.readlines()
+ (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
+ with open(baseoutpath + '/conf/auto.conf', 'w') as f:
+ f.write('# WARNING: this configuration has been automatically generated and in\n')
+ f.write('# most cases should not be edited. If you need more flexibility than\n')
+ f.write('# this configuration provides, it is strongly suggested that you set\n')
+ f.write('# up a proper instance of the full build system and use that instead.\n\n')
+ for line in newlines:
+ if line.strip() and not line.startswith('#'):
+ f.write(line)
+
+ # Ensure any variables set from the external environment (by way of
+ # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
+ extralines = []
+ for name, value in env_whitelist_values.iteritems():
+ actualvalue = d.getVar(name, True) or ''
+ if value != actualvalue:
+ extralines.append('%s = "%s"\n' % (name, actualvalue))
+ if extralines:
+ with open(baseoutpath + '/conf/local.conf', 'a') as f:
+ f.write('\n')
+ f.write('# Extra settings from environment:\n')
+ for line in extralines:
+ f.write(line)
+ f.write('\n')
+
+ # Filter the locked signatures file to just the sstate tasks we are interested in
+ excluded_targets = d.getVar('SDK_TARGETS', True)
+ sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ oe.copy_buildsystem.prune_lockedsigs([],
+ excluded_targets.split(),
+ sigfile,
+ lockedsigs_pruned)
+
+ sstate_out = baseoutpath + '/sstate-cache'
+ bb.utils.remove(sstate_out, True)
+ # uninative.bbclass sets NATIVELSBSTRING to 'universal'
+ fixedlsbstring = 'universal'
+
+ # Add packagedata if enabled
+ if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+ lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base.inc'
+ lockedsigs_copy = d.getVar('WORKDIR', True) + '/locked-sigs-copy.inc'
+ shutil.move(lockedsigs_pruned, lockedsigs_base)
+ oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
+ lockedsigs_base,
+ d.getVar('STAGING_DIR_HOST', True) + '/world-pkgdata/locked-sigs-pkgdata.inc',
+ lockedsigs_pruned,
+ lockedsigs_copy)
+
+ if d.getVar('SDK_EXT_TYPE', True) == 'minimal':
+ if derivative:
+ # Assume the user is not going to set up an additional sstate
+ # mirror, thus we need to copy the additional artifacts (from
+ # workspace recipes) into the derivative SDK
+ lockedsigs_orig = d.getVar('TOPDIR', True) + '/conf/locked-sigs.inc'
+ if os.path.exists(lockedsigs_orig):
+ lockedsigs_extra = d.getVar('WORKDIR', True) + '/locked-sigs-extra.inc'
+ oe.copy_buildsystem.merge_lockedsigs(None,
+ lockedsigs_orig,
+ lockedsigs_pruned,
+ None,
+ lockedsigs_extra)
+ oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
+ d.getVar('SSTATE_DIR', True),
+ sstate_out, d,
+ fixedlsbstring)
+ else:
+ oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
+ d.getVar('SSTATE_DIR', True),
+ sstate_out, d,
+ fixedlsbstring)
+
+ # We don't need sstate do_package files
+ for root, dirs, files in os.walk(sstate_out):
+ for name in files:
+ if name.endswith("_package.tgz"):
+ f = os.path.join(root, name)
+ os.remove(f)
+
+ # Write manifest file
+ # Note: at the moment we cannot include the env setup script here to keep
+ # it updated, since it gets modified during SDK installation (see
+ # sdk_ext_postinst() below) thus the checksum we take here would always
+ # be different.
+ manifest_file_list = ['conf/*']
+ manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
+ with open(manifest_file, 'w') as f:
+ for item in manifest_file_list:
+ for fn in glob.glob(os.path.join(baseoutpath, item)):
+ if fn == manifest_file:
+ continue
+ chksum = bb.utils.sha256_file(fn)
+ f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
+}
+
+def extsdk_get_buildtools_filename(d):
+ return '*-buildtools-nativesdk-standalone-*.sh'
+
+install_tools() {
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
+ lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/devtool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/devtool
+ lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/recipetool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/recipetool
+ touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
+
+ localconf=${SDK_OUTPUT}/${SDKPATH}/conf/local.conf
+
+ # find latest buildtools-tarball and install it
+ buildtools_path=`ls -t1 ${SDK_DEPLOY}/${@extsdk_get_buildtools_filename(d)} | head -n1`
+ install $buildtools_path ${SDK_OUTPUT}/${SDKPATH}
+
+ # For now this is where uninative.bbclass expects the tarball
+ chksum=`sha256sum ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 | cut -f 1 -d ' '`
+ install -d ${SDK_OUTPUT}/${SDKPATH}/downloads/uninative/$chksum/
+ install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}/downloads/uninative/$chksum/
+ echo "UNINATIVE_CHECKSUM[${BUILD_ARCH}] = '$chksum'" >> ${SDK_OUTPUT}/${SDKPATH}/conf/local.conf
+
+ install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
+}
+do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
+
+# Since bitbake won't run as root it doesn't make sense to try and install
+# the extensible sdk as root.
+sdk_ext_preinst() {
+ if [ "`id -u`" = "0" ]; then
+ echo "ERROR: The extensible sdk cannot be installed as root."
+ exit 1
+ fi
+ SDK_EXTENSIBLE="1"
+ if [ "$publish" = "1" ] ; then
+ EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=ext-sdk-prepare.py"
+ if [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
+ EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
+ fi
+ fi
+}
+SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
+
+# FIXME this preparation should be done as part of the SDK construction
+sdk_ext_postinst() {
+ printf "\nExtracting buildtools...\n"
+ cd $target_sdk_dir
+ printf "buildtools\ny" | ./*buildtools-nativesdk-standalone* > /dev/null || ( printf 'ERROR: buildtools installation failed\n' ; exit 1 )
+
+ # Delete the buildtools tar file since it won't be used again
+ rm ./*buildtools-nativesdk-standalone*.sh -f
+
+ # Make sure when the user sets up the environment, they also get
+ # the buildtools-tarball tools in their path.
+ env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+
+ # Allow bitbake environment setup to be ran as part of this sdk.
+ echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
+
+ # A bit of another hack, but we need this in the path only for devtool
+ # so put it at the end of $PATH.
+ echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+
+ echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
+
+ # Warn if trying to use external bitbake and the ext SDK together
+ echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
+
+ if [ "$prepare_buildsystem" != "no" ]; then
+ printf "Preparing build system...\n"
+ # dash which is /bin/sh on Ubuntu will not preserve the
+ # current working directory when first ran, nor will it set $1 when
+ # sourcing a script. That is why this has to look so ugly.
+ LOGFILE="$target_sdk_dir/preparing_build_system.log"
+ sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py '${SDK_INSTALL_TARGETS}' >> $LOGFILE 2>&1" || { echo "ERROR: SDK preparation failed: see $LOGFILE"; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ rm $target_sdk_dir/ext-sdk-prepare.py
+ fi
+ echo done
+}
+
+SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
+
+SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+
+SDK_INSTALL_TARGETS = ""
+fakeroot python do_populate_sdk_ext() {
+ # FIXME hopefully we can remove this restriction at some point, but uninative
+ # currently forces this upon us
+ if d.getVar('SDK_ARCH', True) != d.getVar('BUILD_ARCH', True):
+ bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH', True), d.getVar('BUILD_ARCH', True)))
+
+ d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
+
+ bb.build.exec_func("do_populate_sdk", d)
+}
+
+def get_ext_sdk_depends(d):
+ return d.getVarFlag('do_rootfs', 'depends', True) + ' ' + d.getVarFlag('do_build', 'depends', True)
+
+python do_sdk_depends() {
+ # We have to do this separately in its own task so we avoid recursing into
+ # dependencies we don't need to (e.g. buildtools-tarball) and bringing those
+ # into the SDK's sstate-cache
+ import oe.copy_buildsystem
+ sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+}
+addtask sdk_depends
+
+do_sdk_depends[dirs] = "${WORKDIR}"
+do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)}"
+do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
+do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
+do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
+
+def get_sdk_ext_rdepends(d):
+ localdata = d.createCopy()
+ localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
+ bb.data.update_data(localdata)
+ return localdata.getVarFlag('do_populate_sdk', 'rdepends', True)
+
+do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
+
+do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
+ buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk \
+ ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1' else ''}"
+
+do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
+
+# Make sure code changes can result in rebuild
+do_populate_sdk_ext[vardeps] += "copy_buildsystem \
+ sdk_ext_postinst"
+
+# Since any change in the metadata of any layer should cause a rebuild of the
+# sdk(since the layers are put in the sdk) set the task to nostamp so it
+# always runs.
+do_populate_sdk_ext[nostamp] = "1"
+
+addtask populate_sdk_ext after do_sdk_depends
diff --git a/import-layers/yocto-poky/meta/classes/prexport.bbclass b/import-layers/yocto-poky/meta/classes/prexport.bbclass
new file mode 100644
index 000000000..809ec1034
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/prexport.bbclass
@@ -0,0 +1,59 @@
+PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
+PRSERV_DUMPOPT_PKGARCH = ""
+PRSERV_DUMPOPT_CHECKSUM = ""
+PRSERV_DUMPOPT_COL = "0"
+
+PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
+PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
+
+python prexport_handler () {
+ import bb.event
+ if not e.data or bb.data.inherits_class('native', e.data) or \
+ bb.data.inherits_class('crosssdk', e.data):
+ return
+
+ if isinstance(e, bb.event.RecipeParsed):
+ import oe.prservice
+ #get all PR values for the current PRAUTOINX
+ ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
+ ver = ver.replace('%','-')
+ retval = oe.prservice.prserv_dump_db(e.data)
+ if not retval:
+ bb.fatal("prexport_handler: export failed!")
+ (metainfo, datainfo) = retval
+ if not datainfo:
+ bb.note("prexport_handler: No AUTOPR values found for %s" % ver)
+ return
+ oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
+ if 'AUTOINC' in ver:
+ import re
+ srcpv = bb.fetch2.get_srcrev(e.data)
+ base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)]
+ e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver)
+ retval = oe.prservice.prserv_dump_db(e.data)
+ if not retval:
+ bb.fatal("prexport_handler: export failed!")
+ (metainfo, datainfo) = retval
+ oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
+ elif isinstance(e, bb.event.ParseStarted):
+ import bb.utils
+ import oe.prservice
+ oe.prservice.prserv_check_avail(e.data)
+ #remove dumpfile
+ bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
+ elif isinstance(e, bb.event.ParseCompleted):
+ import oe.prservice
+ #dump meta info of tables
+ d = e.data.createCopy()
+ d.setVar('PRSERV_DUMPOPT_COL', "1")
+ retval = oe.prservice.prserv_dump_db(d)
+ if not retval:
+ bb.error("prexport_handler: export failed!")
+ return
+ (metainfo, datainfo) = retval
+ oe.prservice.prserv_export_tofile(d, metainfo, None, True)
+
+}
+
+addhandler prexport_handler
+prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted"
diff --git a/import-layers/yocto-poky/meta/classes/primport.bbclass b/import-layers/yocto-poky/meta/classes/primport.bbclass
new file mode 100644
index 000000000..8ed45f03f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/primport.bbclass
@@ -0,0 +1,21 @@
+python primport_handler () {
+ import bb.event
+ if not e.data:
+ return
+
+ if isinstance(e, bb.event.ParseCompleted):
+ import oe.prservice
+ #import all exported AUTOPR values
+ imported = oe.prservice.prserv_import_db(e.data)
+ if imported is None:
+ bb.fatal("import failed!")
+
+ for (version, pkgarch, checksum, value) in imported:
+ bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
+ elif isinstance(e, bb.event.ParseStarted):
+ import oe.prservice
+ oe.prservice.prserv_check_avail(e.data)
+}
+
+addhandler primport_handler
+primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted"
diff --git a/import-layers/yocto-poky/meta/classes/ptest-gnome.bbclass b/import-layers/yocto-poky/meta/classes/ptest-gnome.bbclass
new file mode 100644
index 000000000..478a33474
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/ptest-gnome.bbclass
@@ -0,0 +1,8 @@
+inherit ptest
+
+EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
+
+FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
+ ${datadir}/installed-tests/"
+
+RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
diff --git a/import-layers/yocto-poky/meta/classes/ptest.bbclass b/import-layers/yocto-poky/meta/classes/ptest.bbclass
new file mode 100644
index 000000000..fa3561e62
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/ptest.bbclass
@@ -0,0 +1,67 @@
+SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
+DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
+This package contains a test directory ${PTEST_PATH} for package test purposes."
+
+PTEST_PATH ?= "${libdir}/${PN}/ptest"
+FILES_${PN}-ptest = "${PTEST_PATH}"
+SECTION_${PN}-ptest = "devel"
+ALLOW_EMPTY_${PN}-ptest = "1"
+PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
+PTEST_ENABLED_class-native = ""
+PTEST_ENABLED_class-nativesdk = ""
+PTEST_ENABLED_class-cross-canadian = ""
+RDEPENDS_${PN}-ptest_class-native = ""
+RDEPENDS_${PN}-ptest_class-nativesdk = ""
+RRECOMMENDS_${PN}-ptest += "ptest-runner"
+
+PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
+
+do_configure_ptest() {
+ :
+}
+
+do_configure_ptest_base() {
+ do_configure_ptest
+}
+
+do_compile_ptest() {
+ :
+}
+
+do_compile_ptest_base() {
+ do_compile_ptest
+}
+
+do_install_ptest() {
+ :
+}
+
+do_install_ptest_base() {
+ if [ -f ${WORKDIR}/run-ptest ]; then
+ install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
+ fi
+ if grep -q install-ptest: Makefile; then
+ oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
+ fi
+ do_install_ptest
+ chown -R root:root ${D}${PTEST_PATH}
+}
+
+do_configure_ptest_base[dirs] = "${B}"
+do_compile_ptest_base[dirs] = "${B}"
+do_install_ptest_base[dirs] = "${B}"
+do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
+
+addtask configure_ptest_base after do_configure before do_compile
+addtask compile_ptest_base after do_compile before do_install
+addtask install_ptest_base after do_install before do_package do_populate_sysroot
+
+python () {
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
+
+ # Remove all '*ptest_base' tasks when ptest is not enabled
+ if not(d.getVar('PTEST_ENABLED', True) == "1"):
+ for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
+ bb.build.deltask(i, d)
+}
diff --git a/import-layers/yocto-poky/meta/classes/python-dir.bbclass b/import-layers/yocto-poky/meta/classes/python-dir.bbclass
new file mode 100644
index 000000000..ebfa4b30f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/python-dir.bbclass
@@ -0,0 +1,5 @@
+PYTHON_BASEVERSION ?= "2.7"
+PYTHON_ABI ?= ""
+PYTHON_DIR = "python${PYTHON_BASEVERSION}"
+PYTHON_PN = "python${@'' if '${PYTHON_BASEVERSION}'.startswith('2') else '3'}"
+PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/import-layers/yocto-poky/meta/classes/python3native.bbclass b/import-layers/yocto-poky/meta/classes/python3native.bbclass
new file mode 100644
index 000000000..8ec6b769d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/python3native.bbclass
@@ -0,0 +1,7 @@
+PYTHON_BASEVERSION = "3.5"
+
+inherit python-dir
+
+PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
+EXTRANATIVEPATH += "${PYTHON_PN}-native"
+DEPENDS += " ${PYTHON_PN}-native "
diff --git a/import-layers/yocto-poky/meta/classes/pythonnative.bbclass b/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
new file mode 100644
index 000000000..97029dc52
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
@@ -0,0 +1,8 @@
+
+inherit python-dir
+
+PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
+# PYTHON_EXECUTABLE is used by cmake
+PYTHON_EXECUTABLE="${PYTHON}"
+EXTRANATIVEPATH += "${PYTHON_PN}-native"
+DEPENDS += " ${PYTHON_PN}-native "
diff --git a/import-layers/yocto-poky/meta/classes/qemu.bbclass b/import-layers/yocto-poky/meta/classes/qemu.bbclass
new file mode 100644
index 000000000..75739dbbf
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/qemu.bbclass
@@ -0,0 +1,59 @@
+#
+# This class contains functions for recipes that need QEMU or test for its
+# existence.
+#
+
+def qemu_target_binary(data):
+ target_arch = data.getVar("TARGET_ARCH", True)
+ if target_arch in ("i486", "i586", "i686"):
+ target_arch = "i386"
+ elif target_arch == "powerpc":
+ target_arch = "ppc"
+ elif target_arch == "powerpc64":
+ target_arch = "ppc64"
+
+ return "qemu-" + target_arch
+
+def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
+ import string
+
+ qemu_binary = qemu_target_binary(data)
+ if qemu_binary == "qemu-allarch":
+ qemu_binary = "qemuwrapper"
+
+ qemu_options = data.getVar("QEMU_OPTIONS", True)
+
+ return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+ + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
+
+# Next function will return a string containing the command that is needed to
+# to run a certain binary through qemu. For example, in order to make a certain
+# postinstall scriptlet run at do_rootfs time and running the postinstall is
+# architecture dependent, we can run it through qemu. For example, in the
+# postinstall scriptlet, we could use the following:
+#
+# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
+#
+def qemu_run_binary(data, rootfs_path, binary):
+ libdir = rootfs_path + data.getVar("libdir", False)
+ base_libdir = rootfs_path + data.getVar("base_libdir", False)
+
+ return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
+
+# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
+# PACKAGE_ARCH, *NOT* overrides.
+# In some cases (e.g. ppc) simply being arch specific (apparently) isn't good
+# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
+# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
+# qemu-arm default CPU supports all required architecture levels.
+
+QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) or ""}"
+QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
+
+QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
+QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
diff --git a/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass b/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
new file mode 100644
index 000000000..295611f0f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
@@ -0,0 +1,166 @@
+def __note(msg, d):
+ bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
+
+__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
+def bad_runtime_vars(cfgdata, d):
+ if bb.data.inherits_class("native", d) or \
+ bb.data.inherits_class("cross", d):
+ return
+
+ for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
+ val = d.getVar(var, 0)
+ if val and val != cfgdata.get(var):
+ __note("%s should be %s_${PN}" % (var, var), d)
+
+__recipe_sanity_reqvars = "DESCRIPTION"
+__recipe_sanity_reqdiffvars = ""
+def req_vars(cfgdata, d):
+ for var in d.getVar("__recipe_sanity_reqvars", True).split():
+ if not d.getVar(var, 0):
+ __note("%s should be set" % var, d)
+
+ for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
+ val = d.getVar(var, 0)
+ cfgval = cfgdata.get(var)
+
+ if not val:
+ __note("%s should be set" % var, d)
+ elif val == cfgval:
+ __note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
+
+def var_renames_overwrite(cfgdata, d):
+ renames = d.getVar("__recipe_sanity_renames", 0)
+ if renames:
+ for (key, newkey, oldvalue, newvalue) in renames:
+ if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
+ __note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
+
+def incorrect_nonempty_PACKAGES(cfgdata, d):
+ if bb.data.inherits_class("native", d) or \
+ bb.data.inherits_class("cross", d):
+ if d.getVar("PACKAGES", True):
+ return True
+
+def can_use_autotools_base(cfgdata, d):
+ cfg = d.getVar("do_configure", True)
+ if not bb.data.inherits_class("autotools", d):
+ return False
+
+ for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
+ if cfg.find(i) != -1:
+ return False
+
+ for clsfile in d.getVar("__inherit_cache", 0):
+ (base, _) = os.path.splitext(os.path.basename(clsfile))
+ if cfg.find("%s_do_configure" % base) != -1:
+ __note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
+
+ return True
+
+def can_delete_FILESPATH(cfgdata, d):
+ expected = cfgdata.get("FILESPATH")
+ expectedpaths = d.expand(expected)
+ unexpanded = d.getVar("FILESPATH", 0)
+ filespath = d.getVar("FILESPATH", True).split(":")
+ filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
+ for fp in filespath:
+ if not fp in expectedpaths:
+ # __note("Path %s in FILESPATH not in the expected paths %s" %
+ # (fp, expectedpaths), d)
+ return False
+ return expected != unexpanded
+
+def can_delete_FILESDIR(cfgdata, d):
+ expected = cfgdata.get("FILESDIR")
+ #expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
+ unexpanded = d.getVar("FILESDIR", 0)
+ if unexpanded is None:
+ return False
+
+ expanded = os.path.normpath(d.getVar("FILESDIR", True))
+ filespath = d.getVar("FILESPATH", True).split(":")
+ filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
+
+ return unexpanded != expected and \
+ os.path.exists(expanded) and \
+ (expanded in filespath or
+ expanded == d.expand(expected))
+
+def can_delete_others(p, cfgdata, d):
+ for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
+ "SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
+ #for k in cfgdata:
+ unexpanded = d.getVar(k, 0)
+ cfgunexpanded = cfgdata.get(k)
+ if not cfgunexpanded:
+ continue
+
+ try:
+ expanded = d.getVar(k, True)
+ cfgexpanded = d.expand(cfgunexpanded)
+ except bb.fetch.ParameterError:
+ continue
+
+ if unexpanded != cfgunexpanded and \
+ cfgexpanded == expanded:
+ __note("candidate for removal of %s" % k, d)
+ bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
+ (p, cfgunexpanded, unexpanded, expanded))
+
+python do_recipe_sanity () {
+ p = d.getVar("P", True)
+ p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
+
+ sanitychecks = [
+ (can_delete_FILESDIR, "candidate for removal of FILESDIR"),
+ (can_delete_FILESPATH, "candidate for removal of FILESPATH"),
+ #(can_use_autotools_base, "candidate for use of autotools_base"),
+ (incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
+ ]
+ cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
+
+ for (func, msg) in sanitychecks:
+ if func(cfgdata, d):
+ __note(msg, d)
+
+ can_delete_others(p, cfgdata, d)
+ var_renames_overwrite(cfgdata, d)
+ req_vars(cfgdata, d)
+ bad_runtime_vars(cfgdata, d)
+}
+do_recipe_sanity[nostamp] = "1"
+addtask recipe_sanity
+
+do_recipe_sanity_all[nostamp] = "1"
+do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity_all do_recipe_sanity"
+do_recipe_sanity_all () {
+ :
+}
+addtask recipe_sanity_all after do_recipe_sanity
+
+python recipe_sanity_eh () {
+ d = e.data
+
+ cfgdata = {}
+ for k in d.keys():
+ if not isinstance(d.getVar(k, 0), bb.data_smart.DataSmart):
+ cfgdata[k] = d.getVar(k, 0)
+
+ d.setVar("__recipe_sanity_cfgdata", cfgdata)
+ #d.setVar("__recipe_sanity_cfgdata", d)
+
+ # Sick, very sick..
+ from bb.data_smart import DataSmart
+ old = DataSmart.renameVar
+ def myrename(self, key, newkey):
+ oldvalue = self.getVar(newkey, 0)
+ old(self, key, newkey)
+ newvalue = self.getVar(newkey, 0)
+ if oldvalue:
+ renames = self.getVar("__recipe_sanity_renames", 0) or set()
+ renames.add((key, newkey, oldvalue, newvalue))
+ self.setVar("__recipe_sanity_renames", renames)
+ DataSmart.renameVar = myrename
+}
+addhandler recipe_sanity_eh
+recipe_sanity_eh[eventmask] = "bb.event.ConfigParsed"
diff --git a/import-layers/yocto-poky/meta/classes/relocatable.bbclass b/import-layers/yocto-poky/meta/classes/relocatable.bbclass
new file mode 100644
index 000000000..4ca9981f4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/relocatable.bbclass
@@ -0,0 +1,7 @@
+inherit chrpath
+
+SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
+
+python relocatable_binaries_preprocess() {
+ rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
+}
diff --git a/import-layers/yocto-poky/meta/classes/remove-libtool.bbclass b/import-layers/yocto-poky/meta/classes/remove-libtool.bbclass
new file mode 100644
index 000000000..3fd0cd58f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/remove-libtool.bbclass
@@ -0,0 +1,11 @@
+# This class removes libtool .la files after do_install
+
+REMOVE_LIBTOOL_LA ?= "1"
+
+remove_libtool_la() {
+ if [ "${REMOVE_LIBTOOL_LA}" != "0" ]; then
+ find "${D}" -ignore_readdir_race -name "*.la" -delete
+ fi
+}
+
+do_install[postfuncs] += "remove_libtool_la"
diff --git a/import-layers/yocto-poky/meta/classes/report-error.bbclass b/import-layers/yocto-poky/meta/classes/report-error.bbclass
new file mode 100644
index 000000000..82b5bcd69
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/report-error.bbclass
@@ -0,0 +1,95 @@
+#
+# Collects debug information in order to create error report files.
+#
+# Copyright (C) 2013 Intel Corporation
+# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
+#
+# Licensed under the MIT license, see COPYING.MIT for details
+
+ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
+
+def errorreport_getdata(e):
+ import codecs
+ logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ datafile = os.path.join(logpath, "error-report.txt")
+ with codecs.open(datafile, 'r', 'utf-8') as f:
+ data = f.read()
+ return data
+
+def errorreport_savedata(e, newdata, file):
+ import json
+ import codecs
+ logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ datafile = os.path.join(logpath, file)
+ with codecs.open(datafile, 'w', 'utf-8') as f:
+ json.dump(newdata, f, indent=4, sort_keys=True)
+ return datafile
+
+python errorreport_handler () {
+ import json
+ import codecs
+
+ logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ datafile = os.path.join(logpath, "error-report.txt")
+
+ if isinstance(e, bb.event.BuildStarted):
+ bb.utils.mkdirhier(logpath)
+ data = {}
+ machine = e.data.getVar("MACHINE", True)
+ data['machine'] = machine
+ data['build_sys'] = e.data.getVar("BUILD_SYS", True)
+ data['nativelsb'] = e.data.getVar("NATIVELSBSTRING", True)
+ data['distro'] = e.data.getVar("DISTRO", True)
+ data['target_sys'] = e.data.getVar("TARGET_SYS", True)
+ data['failures'] = []
+ data['component'] = e.getPkgs()[0]
+ data['branch_commit'] = base_detect_branch(e.data) + ": " + base_detect_revision(e.data)
+ lock = bb.utils.lockfile(datafile + '.lock')
+ errorreport_savedata(e, data, "error-report.txt")
+ bb.utils.unlockfile(lock)
+
+ elif isinstance(e, bb.build.TaskFailed):
+ task = e.task
+ taskdata={}
+ log = e.data.getVar('BB_LOGFILE', True)
+ taskdata['package'] = e.data.expand("${PF}")
+ taskdata['task'] = task
+ if log:
+ try:
+ logFile = codecs.open(log, 'r', 'utf-8')
+ logdata = logFile.read()
+ logFile.close()
+ except:
+ logdata = "Unable to read log file"
+
+ else:
+ logdata = "No Log"
+
+ # server will refuse failures longer than param specified in project.settings.py
+ # MAX_UPLOAD_SIZE = "5242880"
+ # use lower value, because 650 chars can be spent in task, package, version
+ max_logdata_size = 5242000
+ # upload last max_logdata_size characters
+ if len(logdata) > max_logdata_size:
+ logdata = "..." + logdata[-max_logdata_size:]
+ taskdata['log'] = logdata
+ lock = bb.utils.lockfile(datafile + '.lock')
+ jsondata = json.loads(errorreport_getdata(e))
+ jsondata['failures'].append(taskdata)
+ errorreport_savedata(e, jsondata, "error-report.txt")
+ bb.utils.unlockfile(lock)
+
+ elif isinstance(e, bb.event.BuildCompleted):
+ lock = bb.utils.lockfile(datafile + '.lock')
+ jsondata = json.loads(errorreport_getdata(e))
+ bb.utils.unlockfile(lock)
+ failures = jsondata['failures']
+ if(len(failures) > 0):
+ filename = "error_report_" + e.data.getVar("BUILDNAME", True)+".txt"
+ datafile = errorreport_savedata(e, jsondata, filename)
+ bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
+ bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
+}
+
+addhandler errorreport_handler
+errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed"
diff --git a/import-layers/yocto-poky/meta/classes/rm_work.bbclass b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
new file mode 100644
index 000000000..c647d88d2
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
@@ -0,0 +1,128 @@
+#
+# Removes source after build
+#
+# To use it add that line to conf/local.conf:
+#
+# INHERIT += "rm_work"
+#
+# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE.
+# For example, in conf/local.conf:
+#
+# RM_WORK_EXCLUDE += "icu-native icu busybox"
+#
+
+# Use the completion scheduler by default when rm_work is active
+# to try and reduce disk usage
+BB_SCHEDULER ?= "completion"
+
+RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
+BB_DEFAULT_TASK = "rm_work_all"
+
+do_rm_work () {
+ # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
+ for p in ${RM_WORK_EXCLUDE}; do
+ if [ "$p" = "${PN}" ]; then
+ bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE"
+ exit 0
+ fi
+ done
+
+ cd ${WORKDIR}
+ for dir in *
+ do
+ # Retain only logs and other files in temp, safely ignore
+ # failures of removing pseudo folers on NFS2/3 server.
+ if [ $dir = 'pseudo' ]; then
+ rm -rf $dir 2> /dev/null || true
+ elif [ $dir != 'temp' ]; then
+ rm -rf $dir
+ fi
+ done
+
+ # Need to add pseudo back or subsqeuent work in this workdir
+ # might fail since setscene may not rerun to recreate it
+ mkdir -p ${WORKDIR}/pseudo/
+
+ # Change normal stamps into setscene stamps as they better reflect the
+ # fact WORKDIR is now empty
+ # Also leave noexec stamps since setscene stamps don't cover them
+ cd `dirname ${STAMP}`
+ for i in `basename ${STAMP}`*
+ do
+ for j in ${SSTATETASKS} do_shared_workdir
+ do
+ case $i in
+ *do_setscene*)
+ break
+ ;;
+ *sigdata*)
+ i=dummy
+ break
+ ;;
+ *do_package_write*)
+ i=dummy
+ break
+ ;;
+ *do_rootfs*)
+ i=dummy
+ break
+ ;;
+ *do_image*)
+ i=dummy
+ break
+ ;;
+ *do_build*)
+ i=dummy
+ break
+ ;;
+ # We remove do_package entirely, including any
+ # sstate version since otherwise we'd need to leave 'plaindirs' around
+ # such as 'packages' and 'packages-split' and these can be large. No end
+ # of chain tasks depend directly on do_package anymore.
+ *do_package|*do_package.*|*do_package_setscene.*)
+ rm -f $i;
+ i=dummy
+ break
+ ;;
+ *_setscene*)
+ i=dummy
+ break
+ ;;
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ i=dummy
+ break
+ ;;
+ esac
+ done
+ rm -f $i
+ done
+}
+addtask rm_work after do_${RMWORK_ORIG_TASK}
+
+do_rm_work_all () {
+ :
+}
+do_rm_work_all[recrdeptask] = "do_rm_work"
+addtask rm_work_all after do_rm_work
+
+do_populate_sdk[postfuncs] += "rm_work_populatesdk"
+rm_work_populatesdk () {
+ :
+}
+rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk"
+
+do_image_complete[postfuncs] += "rm_work_rootfs"
+rm_work_rootfs () {
+ :
+}
+rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
+
+python () {
+ # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
+ excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split()
+ pn = d.getVar("PN", True)
+ if pn in excludes:
+ d.delVarFlag('rm_work_rootfs', 'cleandirs')
+ d.delVarFlag('rm_work_populatesdk', 'cleandirs')
+}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
new file mode 100644
index 000000000..95d28afa3
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
@@ -0,0 +1,277 @@
+
+# Zap the root password if debug-tweaks feature is not enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+
+# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
+
+# Enable postinst logging if debug-tweaks is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
+
+# Create /etc/timestamp during image construction to give a reasonably sane default time setting
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+
+# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
+
+# Write manifest
+IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
+ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
+# Set default postinst log file
+POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
+# Set default target for systemd images
+SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
+
+ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+
+# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
+# distros to choose not to take this change
+SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
+ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
+
+
+
+#
+# A hook function to support read-only-rootfs IMAGE_FEATURES
+#
+read_only_rootfs_hook () {
+ # Tweak the mount option and fs_passno for rootfs in fstab
+ sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+
+ # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
+ # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
+ # and the keys under /var/run/ssh.
+ if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
+ echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ else
+ echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ fi
+ fi
+
+ # Also tweak the key location for dropbear in the same way.
+ if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
+ echo "DROPBEAR_RSAKEY_DIR=/etc/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ else
+ echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ fi
+ fi
+
+
+ if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
+ # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
+ if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
+ sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
+ fi
+ # Run populate-volatile.sh at rootfs time to set up basic files
+ # and directories to support read-only rootfs.
+ if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
+ ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
+ fi
+ fi
+
+ if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
+ # Update user database files so that services don't fail for a read-only systemd system
+ for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+ [ -e $conffile ] || continue
+ grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
+ if [ "$type" = "u" ]; then
+ useradd_params=""
+ [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
+ [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
+ useradd_params="$useradd_params --system $name"
+ eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
+ elif [ "$type" = "g" ]; then
+ groupadd_params=""
+ [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
+ groupadd_params="$groupadd_params --system $name"
+ eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
+ fi
+ done
+ done
+ fi
+}
+
+#
+# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
+#
+zap_empty_root_password () {
+ if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
+ sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
+ fi
+ if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
+ sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
+ fi
+}
+
+#
+# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
+#
+ssh_allow_empty_password () {
+ for config in sshd_config sshd_config_readonly; do
+ if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
+ sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
+ sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
+ fi
+ done
+
+ if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
+ if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
+ if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
+ sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+ fi
+ else
+ printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+ fi
+ fi
+
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
+ sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
+ fi
+}
+
+ssh_disable_dns_lookup () {
+ if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
+ sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
+ fi
+}
+
+#
+# Enable postinst logging if debug-tweaks is enabled
+#
+postinst_enable_logging () {
+ mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
+ echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
+ echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
+}
+
+#
+# Modify systemd default target
+#
+set_systemd_default_target () {
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
+ ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
+ fi
+}
+
+# If /var/volatile is not empty, we have seen problems where programs such as the
+# journal make assumptions based on the contents of /var/volatile. The journal
+# would then write to /var/volatile before it was mounted, thus hiding the
+# items previously written.
+#
+# This change is to attempt to fix those types of issues in a way that doesn't
+# affect users that may not be using /var/volatile.
+empty_var_volatile () {
+ if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
+ match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
+ if [ -n "$match" ]; then
+ find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
+ fi
+ fi
+}
+
+# Turn any symbolic /sbin/init link into a file
+remove_init_link () {
+ if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
+ LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
+ rm ${IMAGE_ROOTFS}/sbin/init
+ cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
+ fi
+}
+
+make_zimage_symlink_relative () {
+ if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
+ (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
+ fi
+}
+
+insert_feed_uris () {
+
+ echo "Building feeds for [${DISTRO}].."
+
+ for line in ${FEED_URIS}
+ do
+ # strip leading and trailing spaces/tabs, then split into name and uri
+ line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
+ feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
+ feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
+
+ echo "Added $feed_name feed with URL $feed_uri"
+
+ # insert new feed-sources
+ echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
+ done
+}
+
+python write_image_manifest () {
+ from oe.rootfs import image_list_installed_packages
+ from oe.utils import format_pkg_list
+
+ deploy_dir = d.getVar('DEPLOY_DIR_IMAGE', True)
+ link_name = d.getVar('IMAGE_LINK_NAME', True)
+ manifest_name = d.getVar('IMAGE_MANIFEST', True)
+
+ if not manifest_name:
+ return
+
+ pkgs = image_list_installed_packages(d)
+ with open(manifest_name, 'w+') as image_manifest:
+ image_manifest.write(format_pkg_list(pkgs, "ver"))
+ image_manifest.write("\n")
+
+ if os.path.exists(manifest_name):
+ manifest_link = deploy_dir + "/" + link_name + ".manifest"
+ if os.path.lexists(manifest_link):
+ if d.getVar('RM_OLD_IMAGE', True) == "1" and \
+ os.path.exists(os.path.realpath(manifest_link)):
+ os.remove(os.path.realpath(manifest_link))
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
+}
+
+# Can be use to create /etc/timestamp during image construction to give a reasonably
+# sane default time setting
+rootfs_update_timestamp () {
+ date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
+}
+
+# Prevent X from being started
+rootfs_no_x_startup () {
+ if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
+ chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
+ fi
+}
+
+rootfs_trim_schemas () {
+ for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
+ do
+ # Need this in case no files exist
+ if [ -e $schema ]; then
+ oe-trim-schemas $schema > $schema.new
+ mv $schema.new $schema
+ fi
+ done
+}
+
+rootfs_check_host_user_contaminated () {
+ contaminated="${WORKDIR}/host-user-contaminated.txt"
+ HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
+ HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
+
+ find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \
+ -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated"
+
+ if [ -s "$contaminated" ]; then
+ echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths."
+ cat "$contaminated" | sed "s,^, ,"
+ fi
+}
+
+# Make any absolute links in a sysroot relative
+rootfs_sysroot_relativelinks () {
+ sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
+}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
new file mode 100644
index 000000000..f79fca608
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
@@ -0,0 +1,38 @@
+#
+# Copyright 2006-2007 Openedhand Ltd.
+#
+
+ROOTFS_PKGMANAGE = "dpkg apt"
+ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+
+do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
+do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
+do_rootfs[recrdeptask] += "do_package_write_deb"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+
+do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
+do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
+
+python rootfs_deb_bad_recommendations() {
+ if d.getVar("BAD_RECOMMENDATIONS", True):
+ bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
+}
+do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
+
+DEB_POSTPROCESS_COMMANDS = ""
+
+opkglibdir = "${localstatedir}/lib/opkg"
+
+python () {
+ # Map TARGET_ARCH to Debian's ideas about architectures
+ darch = d.getVar('SDK_ARCH', True)
+ if darch in ["x86", "i486", "i586", "i686", "pentium"]:
+ d.setVar('DEB_SDK_ARCH', 'i386')
+ elif darch == "x86_64":
+ d.setVar('DEB_SDK_ARCH', 'amd64')
+ elif darch == "arm":
+ d.setVar('DEB_SDK_ARCH', 'armel')
+}
+
+# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
+DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass
new file mode 100644
index 000000000..d5c38fef7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass
@@ -0,0 +1,38 @@
+#
+# Creates a root filesystem out of IPKs
+#
+# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
+# See image.bbclass for a usage of this.
+#
+
+EXTRAOPKGCONFIG ?= ""
+ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
+ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+
+do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
+do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
+do_rootfs[recrdeptask] += "do_package_write_ipk"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+
+do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
+
+OPKG_PREPROCESS_COMMANDS = ""
+
+OPKG_POSTPROCESS_COMMANDS = ""
+
+OPKGLIBDIR = "${localstatedir}/lib"
+
+MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
+
+python () {
+
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
+ flags = flags.replace("do_package_write_ipk", "")
+ flags = flags.replace("do_deploy", "")
+ flags = flags.replace("do_populate_sysroot", "")
+ d.setVarFlag('do_rootfs', 'recrdeptask', flags)
+ d.setVar('OPKG_PREPROCESS_COMMANDS', "")
+ d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
+}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
new file mode 100644
index 000000000..0d2e897c2
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
@@ -0,0 +1,46 @@
+#
+# Creates a root filesystem out of rpm packages
+#
+
+ROOTFS_PKGMANAGE = "rpm smartpm"
+ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+
+# Add 50Meg of extra space for Smart
+IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}"
+
+# Smart is python based, so be sure python-native is available to us.
+EXTRANATIVEPATH += "python-native"
+
+# opkg is needed for update-alternatives
+RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
+ rpmresolve-native:do_populate_sysroot \
+ python-smartpm-native:do_populate_sysroot \
+ createrepo-native:do_populate_sysroot \
+ opkg-native:do_populate_sysroot"
+
+do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
+do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
+
+do_rootfs[recrdeptask] += "do_package_write_rpm"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+
+# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
+# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
+do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
+do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
+
+python () {
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
+ flags = flags.replace("do_package_write_rpm", "")
+ flags = flags.replace("do_deploy", "")
+ flags = flags.replace("do_populate_sysroot", "")
+ d.setVarFlag('do_rootfs', 'recrdeptask', flags)
+ d.setVar('RPM_PREPROCESS_COMMANDS', '')
+ d.setVar('RPM_POSTPROCESS_COMMANDS', '')
+
+}
+# Smart is python based, so be sure python-native is available to us.
+EXTRANATIVEPATH += "python-native"
+
+rpmlibdir = "/var/lib/rpm"
diff --git a/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass b/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass
new file mode 100644
index 000000000..a558871e9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass
@@ -0,0 +1,36 @@
+# This class installs additional files found on the build host
+# directly into the rootfs.
+#
+# One use case is to install a constant ssh host key in
+# an image that gets created for just one machine. This
+# solves two issues:
+# - host key generation on the device can stall when the
+# kernel has not gathered enough entropy yet (seen in practice
+# under qemu)
+# - ssh complains by default when the host key changes
+#
+# For dropbear, with the ssh host key store along side the local.conf:
+# 1. Extend local.conf:
+# INHERIT += "rootfsdebugfiles"
+# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
+# 2. Boot the image once, copy the dropbear_rsa_host_key from
+# the device into your build conf directory.
+#
+# Do not use for production images! It bypasses several
+# core build mechanisms (updating the image when one
+# of the files changes, license tracking in the image
+# manifest, ...).
+
+ROOTFS_DEBUG_FILES ?= ""
+ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
+
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+rootfs_debug_files () {
+ #!/bin/sh -e
+ echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target; do
+ if [ -e "$source" ]; then
+ mkdir -p $(dirname $target)
+ cp -a $source $target
+ fi
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/sanity.bbclass b/import-layers/yocto-poky/meta/classes/sanity.bbclass
new file mode 100644
index 000000000..77813e41b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sanity.bbclass
@@ -0,0 +1,1015 @@
+#
+# Sanity check the users setup for common misconfigurations
+#
+
+SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
+ gzip gawk chrpath wget cpio perl file"
+
+def bblayers_conf_file(d):
+ return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
+
+def sanity_conf_read(fn):
+ with open(fn, 'r') as f:
+ lines = f.readlines()
+ return lines
+
+def sanity_conf_find_line(pattern, lines):
+ import re
+ return next(((index, line)
+ for index, line in enumerate(lines)
+ if re.search(pattern, line)), (None, None))
+
+def sanity_conf_update(fn, lines, version_var_name, new_version):
+ index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
+ lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
+ with open(fn, "w") as f:
+ f.write(''.join(lines))
+
+# Functions added to this variable MUST throw a NotImplementedError exception unless
+# they successfully changed the config version in the config file. Exceptions
+# are used since exec_func doesn't handle return values.
+BBLAYERS_CONF_UPDATE_FUNCS += " \
+ conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
+ conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
+ conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
+"
+
+SANITY_DIFF_TOOL ?= "meld"
+
+SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
+python oecore_update_localconf() {
+ # Check we are using a valid local.conf
+ current_conf = d.getVar('CONF_VERSION', True)
+ conf_version = d.getVar('LOCALCONF_VERSION', True)
+
+ failmsg = """Your version of local.conf was generated from an older/newer version of
+local.conf.sample and there have been updates made to this file. Please compare the two
+files and merge any changes before continuing.
+
+Matching the version numbers will remove this message.
+
+\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\"
+
+is a good way to visualise the changes."""
+ failmsg = d.expand(failmsg)
+
+ raise NotImplementedError(failmsg)
+}
+
+SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
+python oecore_update_siteconf() {
+ # If we have a site.conf, check it's valid
+ current_sconf = d.getVar('SCONF_VERSION', True)
+ sconf_version = d.getVar('SITE_CONF_VERSION', True)
+
+ failmsg = """Your version of site.conf was generated from an older version of
+site.conf.sample and there have been updates made to this file. Please compare the two
+files and merge any changes before continuing.
+
+Matching the version numbers will remove this message.
+
+\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\"
+
+is a good way to visualise the changes."""
+ failmsg = d.expand(failmsg)
+
+ raise NotImplementedError(failmsg)
+}
+
+SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
+python oecore_update_bblayers() {
+ # bblayers.conf is out of date, so see if we can resolve that
+
+ current_lconf = int(d.getVar('LCONF_VERSION', True))
+ lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
+
+ failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
+Please compare your file against bblayers.conf.sample and merge any changes before continuing.
+"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}"
+
+is a good way to visualise the changes."""
+ failmsg = d.expand(failmsg)
+
+ if not current_lconf:
+ raise NotImplementedError(failmsg)
+
+ lines = []
+
+ if current_lconf < 4:
+ raise NotImplementedError(failmsg)
+
+ bblayers_fn = bblayers_conf_file(d)
+ lines = sanity_conf_read(bblayers_fn)
+
+ if current_lconf == 4 and lconf_version > 4:
+ topdir_var = '$' + '{TOPDIR}'
+ index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
+ if bbpath_line:
+ start = bbpath_line.find('"')
+ if start != -1 and (len(bbpath_line) != (start + 1)):
+ if bbpath_line[start + 1] == '"':
+ lines[index] = (bbpath_line[:start + 1] +
+ topdir_var + bbpath_line[start + 1:])
+ else:
+ if not topdir_var in bbpath_line:
+ lines[index] = (bbpath_line[:start + 1] +
+ topdir_var + ':' + bbpath_line[start + 1:])
+ else:
+ raise NotImplementedError(failmsg)
+ else:
+ index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
+ if bbfiles_line:
+ lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
+ else:
+ raise NotImplementedError(failmsg)
+
+ current_lconf += 1
+ sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+ bb.note("Your conf/bblayers.conf has been automatically updated.")
+ return
+
+ elif current_lconf == 5 and lconf_version > 5:
+ # Null update, to avoid issues with people switching between poky and other distros
+ current_lconf = 6
+ sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+ bb.note("Your conf/bblayers.conf has been automatically updated.")
+ return
+
+ if not status.reparse:
+ status.addresult()
+
+ elif current_lconf == 6 and lconf_version > 6:
+ # Handle rename of meta-yocto -> meta-poky
+ # This marks the start of separate version numbers but code is needed in OE-Core
+ # for the migration, one last time.
+ layers = d.getVar('BBLAYERS', True).split()
+ layers = [ os.path.basename(path) for path in layers ]
+ if 'meta-yocto' in layers:
+ found = False
+ while True:
+ index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
+ if meta_yocto_line:
+ lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
+ found = True
+ else:
+ break
+ if not found:
+ raise NotImplementedError(failmsg)
+ index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
+ if meta_yocto_line:
+ lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
+ else:
+ raise NotImplementedError(failmsg)
+ with open(bblayers_fn, "w") as f:
+ f.write(''.join(lines))
+ bb.note("Your conf/bblayers.conf has been automatically updated.")
+ return
+ current_lconf += 1
+ sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+ bb.note("Your conf/bblayers.conf has been automatically updated.")
+ return
+
+ raise NotImplementedError(failmsg)
+}
+
+def raise_sanity_error(msg, d, network_error=False):
+ if d.getVar("SANITY_USE_EVENTS", True) == "1":
+ try:
+ bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
+ except TypeError:
+ bb.event.fire(bb.event.SanityCheckFailed(msg), d)
+ return
+
+ bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
+ Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
+ Following is the list of potential problems / advisories:
+
+ %s""" % msg)
+
+# Check flags associated with a tuning.
+def check_toolchain_tune_args(data, tune, multilib, errs):
+ found_errors = False
+ if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
+ found_errors = True
+ if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
+ found_errors = True
+ if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
+ found_errors = True
+
+ return found_errors
+
+def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
+ args_set = (data.getVar("TUNE_%s" % which, True) or "").split()
+ args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune), True) or "").split()
+ args_missing = []
+
+ # If no args are listed/required, we are done.
+ if not args_wanted:
+ return
+ for arg in args_wanted:
+ if arg not in args_set:
+ args_missing.append(arg)
+
+ found_errors = False
+ if args_missing:
+ found_errors = True
+ tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
+ (tune, ' '.join(args_missing), which, ' '.join(args_set)))
+ return found_errors
+
+# Check a single tune for validity.
+def check_toolchain_tune(data, tune, multilib):
+ tune_errors = []
+ if not tune:
+ return "No tuning found for %s multilib." % multilib
+ localdata = bb.data.createCopy(data)
+ if multilib != "default":
+ # Apply the overrides so we can look at the details.
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
+ localdata.setVar("OVERRIDES", overrides)
+ bb.data.update_data(localdata)
+ bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
+ features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
+ if not features:
+ return "Tuning '%s' has no defined features, and cannot be used." % tune
+ valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
+ conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
+ # [doc] is the documentation for the variable, not a real feature
+ if 'doc' in valid_tunes:
+ del valid_tunes['doc']
+ if 'doc' in conflicts:
+ del conflicts['doc']
+ for feature in features:
+ if feature in conflicts:
+ for conflict in conflicts[feature].split():
+ if conflict in features:
+ tune_errors.append("Feature '%s' conflicts with '%s'." %
+ (feature, conflict))
+ if feature in valid_tunes:
+ bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
+ else:
+ tune_errors.append("Feature '%s' is not defined." % feature)
+ whitelist = localdata.getVar("TUNEABI_WHITELIST", True)
+ if whitelist:
+ tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune, True)
+ if not tuneabi:
+ tuneabi = tune
+ if True not in [x in whitelist.split() for x in tuneabi.split()]:
+ tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
+ (tune, tuneabi))
+ else:
+ if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
+ bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
+ if tune_errors:
+ return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
+
+def check_toolchain(data):
+ tune_error_set = []
+ deftune = data.getVar("DEFAULTTUNE", True)
+ tune_errors = check_toolchain_tune(data, deftune, 'default')
+ if tune_errors:
+ tune_error_set.append(tune_errors)
+
+ multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
+ global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split()
+
+ if multilibs:
+ seen_libs = []
+ seen_tunes = []
+ for lib in multilibs:
+ if lib in seen_libs:
+ tune_error_set.append("The multilib '%s' appears more than once." % lib)
+ else:
+ seen_libs.append(lib)
+ if not lib in global_multilibs:
+ tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
+ tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
+ if tune in seen_tunes:
+ tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
+ else:
+ seen_libs.append(tune)
+ if tune == deftune:
+ tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
+ else:
+ tune_errors = check_toolchain_tune(data, tune, lib)
+ if tune_errors:
+ tune_error_set.append(tune_errors)
+ if tune_error_set:
+ return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
+
+ return ""
+
+def check_conf_exists(fn, data):
+ bbpath = []
+ fn = data.expand(fn)
+ vbbpath = data.getVar("BBPATH", False)
+ if vbbpath:
+ bbpath += vbbpath.split(":")
+ for p in bbpath:
+ currname = os.path.join(data.expand(p), fn)
+ if os.access(currname, os.R_OK):
+ return True
+ return False
+
+def check_create_long_filename(filepath, pathname):
+ import string, random
+ testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
+ try:
+ if not os.path.exists(filepath):
+ bb.utils.mkdirhier(filepath)
+ f = open(testfile, "w")
+ f.close()
+ os.remove(testfile)
+ except IOError as e:
+ import errno
+ err, strerror = e.args
+ if err == errno.ENAMETOOLONG:
+ return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
+ else:
+ return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
+ except OSError as e:
+ errno, strerror = e.args
+ return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
+ return ""
+
+def check_path_length(filepath, pathname, limit):
+ if len(filepath) > limit:
+ return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
+ return ""
+
+def get_filesystem_id(path):
+ status, result = oe.utils.getstatusoutput("stat -f -c '%s' %s" % ("%t", path))
+ if status == 0:
+ return result
+ else:
+ bb.warn("Can't get the filesystem id of: %s" % path)
+ return None
+
+# Check that the path isn't located on nfs.
+def check_not_nfs(path, name):
+ # The nfs' filesystem id is 6969
+ if get_filesystem_id(path) == "6969":
+ return "The %s: %s can't be located on nfs.\n" % (name, path)
+ return ""
+
+# Check that path isn't a broken symlink
+def check_symlink(lnk, data):
+ if os.path.islink(lnk) and not os.path.exists(lnk):
+ raise_sanity_error("%s is a broken symlink." % lnk, data)
+
+def check_connectivity(d):
+ # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
+ # using the same syntax as for SRC_URI. If the variable is not set
+ # the check is skipped
+ test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
+ retval = ""
+
+ # Only check connectivity if network enabled and the
+ # CONNECTIVITY_CHECK_URIS are set
+ network_enabled = not d.getVar('BB_NO_NETWORK', True)
+ check_enabled = len(test_uris)
+ # Take a copy of the data store and unset MIRRORS and PREMIRRORS
+ data = bb.data.createCopy(d)
+ data.delVar('PREMIRRORS')
+ data.delVar('MIRRORS')
+ if check_enabled and network_enabled:
+ try:
+ fetcher = bb.fetch2.Fetch(test_uris, data)
+ fetcher.checkstatus()
+ except Exception as err:
+ # Allow the message to be configured so that users can be
+ # pointed to a support mechanism.
+ msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
+ if len(msg) == 0:
+ msg = "%s. Please ensure your network is configured correctly.\n" % err
+ retval = msg
+
+ return retval
+
+def check_supported_distro(sanity_data):
+ from fnmatch import fnmatch
+
+ tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True)
+ if not tested_distros:
+ return
+
+ try:
+ distro = oe.lsb.distro_identifier()
+ except Exception:
+ distro = None
+
+ if not distro:
+ bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
+
+ for supported in [x.strip() for x in tested_distros.split('\\n')]:
+ if fnmatch(distro, supported):
+ return
+
+ bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
+
+# Checks we should only make if MACHINE is set correctly
+def check_sanity_validmachine(sanity_data):
+ messages = ""
+
+ # Check TUNE_ARCH is set
+ if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID':
+ messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
+
+ # Check TARGET_OS is set
+ if sanity_data.getVar('TARGET_OS', True) == 'INVALID':
+ messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
+
+ # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
+ pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
+ tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
+ defaulttune = sanity_data.getVar('DEFAULTTUNE', True)
+ tunefound = False
+ seen = {}
+ dups = []
+
+ for pa in pkgarchs.split():
+ if seen.get(pa, 0) == 1:
+ dups.append(pa)
+ else:
+ seen[pa] = 1
+ if pa == tunepkg:
+ tunefound = True
+
+ if len(dups):
+ messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
+
+ if tunefound == False:
+ messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
+
+ return messages
+
+# Checks if necessary to add option march to host gcc
+def check_gcc_march(sanity_data):
+ result = True
+ message = ""
+
+ # Check if -march not in BUILD_CFLAGS
+ if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0:
+ result = False
+
+ # Construct a test file
+ f = open("gcc_test.c", "w")
+ f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n")
+ f.close()
+
+ # Check if GCC could work without march
+ if not result:
+ status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc gcc_test.c -o gcc_test")
+ if status == 0:
+ result = True;
+
+ if not result:
+ status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=native gcc_test.c -o gcc_test")
+ if status == 0:
+ message = "BUILD_CFLAGS_append = \" -march=native\""
+ result = True;
+
+ if not result:
+ build_arch = sanity_data.getVar('BUILD_ARCH', True)
+ status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=%s gcc_test.c -o gcc_test" % build_arch)
+ if status == 0:
+ message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
+ result = True;
+
+ os.remove("gcc_test.c")
+ if os.path.exists("gcc_test"):
+ os.remove("gcc_test")
+
+ return (result, message)
+
+# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
+# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
+def check_make_version(sanity_data):
+ from distutils.version import LooseVersion
+ status, result = oe.utils.getstatusoutput("make --version")
+ if status != 0:
+ return "Unable to execute make --version, exit code %s\n" % status
+ version = result.split()[2]
+ if LooseVersion(version) == LooseVersion("3.82"):
+ # Construct a test file
+ f = open("makefile_test", "w")
+ f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
+ f.write("\n")
+ f.write("makefile_test_a.c:\n")
+ f.write(" touch $@\n")
+ f.write("\n")
+ f.write("makefile_test_b.c:\n")
+ f.write(" touch $@\n")
+ f.close()
+
+ # Check if make 3.82 has been patched
+ status,result = oe.utils.getstatusoutput("make -f makefile_test")
+
+ os.remove("makefile_test")
+ if os.path.exists("makefile_test_a.c"):
+ os.remove("makefile_test_a.c")
+ if os.path.exists("makefile_test_b.c"):
+ os.remove("makefile_test_b.c")
+ if os.path.exists("makefile_test.a"):
+ os.remove("makefile_test.a")
+
+ if status != 0:
+ return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
+ return None
+
+
+# Tar version 1.24 and onwards handle overwriting symlinks correctly
+# but earlier versions do not; this needs to work properly for sstate
+def check_tar_version(sanity_data):
+ from distutils.version import LooseVersion
+ status, result = oe.utils.getstatusoutput("tar --version")
+ if status != 0:
+ return "Unable to execute tar --version, exit code %s\n" % status
+ version = result.split()[3]
+ if LooseVersion(version) < LooseVersion("1.24"):
+ return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
+ return None
+
+# We use git parameters and functionality only found in 1.7.8 or later
+# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
+# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
+def check_git_version(sanity_data):
+ from distutils.version import LooseVersion
+ status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
+ if status != 0:
+ return "Unable to execute git --version, exit code %s\n" % status
+ version = result.split()[2]
+ if LooseVersion(version) < LooseVersion("1.8.3.1"):
+ return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
+ return None
+
+# Check the required perl modules which may not be installed by default
+def check_perl_modules(sanity_data):
+ ret = ""
+ modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
+ errresult = ''
+ for m in modules:
+ status, result = oe.utils.getstatusoutput("perl -e 'use %s'" % m)
+ if status != 0:
+ errresult += result
+ ret += "%s " % m
+ if ret:
+ return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
+ return None
+
+def sanity_check_conffiles(status, d):
+ funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
+ for func in funcs:
+ conffile, current_version, required_version, func = func.split(":")
+ if check_conf_exists(conffile, d) and d.getVar(current_version, True) is not None and \
+ d.getVar(current_version, True) != d.getVar(required_version, True):
+ success = True
+ try:
+ bb.build.exec_func(func, d, pythonexception=True)
+ except NotImplementedError as e:
+ success = False
+ status.addresult(str(e))
+ if success:
+ status.reparse = True
+
+def sanity_handle_abichanges(status, d):
+ #
+ # Check the 'ABI' of TMPDIR
+ #
+ import subprocess
+
+ current_abi = d.getVar('OELAYOUT_ABI', True)
+ abifile = d.getVar('SANITY_ABIFILE', True)
+ if os.path.exists(abifile):
+ with open(abifile, "r") as f:
+ abi = f.read().strip()
+ if not abi.isdigit():
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+ elif abi == "2" and current_abi == "3":
+ bb.note("Converting staging from layout version 2 to layout version 3")
+ subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True)
+ subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True)
+ subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+ elif abi == "3" and current_abi == "4":
+ bb.note("Converting staging layout from version 3 to layout version 4")
+ if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
+ subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True)
+ subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+ elif abi == "4":
+ status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n")
+ elif abi == "5" and current_abi == "6":
+ bb.note("Converting staging layout from version 5 to layout version 6")
+ subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+ elif abi == "7" and current_abi == "8":
+ status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
+ elif (abi != current_abi and current_abi == "9"):
+ status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
+ elif (abi != current_abi and current_abi == "10" and (abi == "8" or abi == "9")):
+ bb.note("Converting staging layout from version 8/9 to layout version 10")
+ cmd = d.expand("grep -r -l sysroot-providers/virtual_kernel ${SSTATE_MANIFESTS}")
+ ret, result = oe.utils.getstatusoutput(cmd)
+ result = result.split()
+ for f in result:
+ bb.note("Uninstalling manifest file %s" % f)
+ sstate_clean_manifest(f, d)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+ elif abi == "10" and current_abi == "11":
+ bb.note("Converting staging layout from version 10 to layout version 11")
+ # Files in xf86-video-modesetting moved to xserver-xorg and bitbake can't currently handle that:
+ subprocess.call(d.expand("rm ${TMPDIR}/sysroots/*/usr/lib/xorg/modules/drivers/modesetting_drv.so ${TMPDIR}/sysroots/*/pkgdata/runtime/xf86-video-modesetting* ${TMPDIR}/sysroots/*/pkgdata/runtime-reverse/xf86-video-modesetting* ${TMPDIR}/sysroots/*/pkgdata/shlibs2/xf86-video-modesetting*"), shell=True)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+ elif (abi != current_abi):
+ # Code to convert from one ABI to another could go here if possible.
+ status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
+ else:
+ with open(abifile, "w") as f:
+ f.write(current_abi)
+
+def check_sanity_sstate_dir_change(sstate_dir, data):
+ # Sanity checks to be done when the value of SSTATE_DIR changes
+
+ # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ testmsg = ""
+ if sstate_dir != "":
+ testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
+ # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
+ try:
+ err = testmsg.split(': ')[1].strip()
+ if err == "Permission denied.":
+ testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
+ except IndexError:
+ pass
+ return testmsg
+
+def check_sanity_version_change(status, d):
+ # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
+ # In other words, these tests run once in a given build directory and then
+ # never again until the sanity version or host distrubution id/version changes.
+
+ # Check the python install is complete. glib-2.0-natives requries
+ # xml.parsers.expat
+ try:
+ import xml.parsers.expat
+ except ImportError:
+ status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
+ import stat
+
+ status.addresult(check_make_version(d))
+ status.addresult(check_tar_version(d))
+ status.addresult(check_git_version(d))
+ status.addresult(check_perl_modules(d))
+
+ missing = ""
+
+ if not check_app_exists("${MAKE}", d):
+ missing = missing + "GNU make,"
+
+ if not check_app_exists('${BUILD_PREFIX}gcc', d):
+ missing = missing + "C Compiler (%sgcc)," % d.getVar("BUILD_PREFIX", True)
+
+ if not check_app_exists('${BUILD_PREFIX}g++', d):
+ missing = missing + "C++ Compiler (%sg++)," % d.getVar("BUILD_PREFIX", True)
+
+ required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
+
+ for util in required_utilities.split():
+ if not check_app_exists(util, d):
+ missing = missing + "%s," % util
+
+ if missing:
+ missing = missing.rstrip(',')
+ status.addresult("Please install the following missing utilities: %s\n" % missing)
+
+ assume_provided = d.getVar('ASSUME_PROVIDED', True).split()
+ # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
+ if "diffstat-native" not in assume_provided:
+ status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
+
+ if "qemu-native" in assume_provided:
+ if not check_app_exists("qemu-arm", d):
+ status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH")
+
+ if "libsdl-native" in assume_provided:
+ if not check_app_exists("sdl-config", d):
+ status.addresult("libsdl-native is set to be ASSUME_PROVIDED but sdl-config can't be found in PATH. Please either install it, or configure qemu not to require sdl.")
+
+ (result, message) = check_gcc_march(d)
+ if result and message:
+ status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \
+ %s\n" % message)
+ if not result:
+ status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build")
+ status.addresult(" and link something that uses atomic operations, such as: \n")
+ status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
+
+ # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ tmpdir = d.getVar('TMPDIR', True)
+ status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
+ tmpdirmode = os.stat(tmpdir).st_mode
+ if (tmpdirmode & stat.S_ISGID):
+ status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
+ if (tmpdirmode & stat.S_ISUID):
+ status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
+
+ # Some third-party software apparently relies on chmod etc. being suid root (!!)
+ import stat
+ suid_check_bins = "chown chmod mknod".split()
+ for bin_cmd in suid_check_bins:
+ bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
+ if bin_path:
+ bin_stat = os.stat(bin_path)
+ if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
+ status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
+
+ # Check that we can fetch from various network transports
+ netcheck = check_connectivity(d)
+ status.addresult(netcheck)
+ if netcheck:
+ status.network_error = True
+
+ nolibs = d.getVar('NO32LIBS', True)
+ if not nolibs:
+ lib32path = '/lib'
+ if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
+ lib32path = '/lib32'
+
+ if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
+ status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
+
+ bbpaths = d.getVar('BBPATH', True).split(":")
+ if ("." in bbpaths or "./" in bbpaths or "" in bbpaths) and not status.reparse:
+ status.addresult("BBPATH references the current directory, either through " \
+ "an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
+ "layer configuration is adding empty elements to BBPATH.\n\t "\
+ "Please check your layer.conf files and other BBPATH " \
+ "settings to remove the current working directory " \
+ "references.\n" \
+ "Parsed BBPATH is" + str(bbpaths));
+
+ oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True)
+ if not oes_bb_conf:
+ status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
+
+ # The length of TMPDIR can't be longer than 410
+ status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
+
+ # Check that TMPDIR isn't located on nfs
+ status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
+
+def check_sanity_everybuild(status, d):
+ import os, stat
+ # Sanity tests which test the users environment so need to run at each build (or are so cheap
+ # it makes sense to always run them.
+
+ if 0 == os.getuid():
+ raise_sanity_error("Do not use Bitbake as root.", d)
+
+ # Check the Python version, we now have a minimum of Python 2.7.3
+ import sys
+ if sys.hexversion < 0x020703F0:
+ status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n')
+
+ # Check the bitbake version meets minimum requirements
+ from distutils.version import LooseVersion
+ minversion = d.getVar('BB_MIN_VERSION', True)
+ if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
+ status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
+
+ sanity_check_conffiles(status, d)
+
+ paths = d.getVar('PATH', True).split(":")
+ if "." in paths or "./" in paths or "" in paths:
+ status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
+
+ # Check that the DISTRO is valid, if set
+ # need to take into account DISTRO renaming DISTRO
+ distro = d.getVar('DISTRO', True)
+ if distro and distro != "nodistro":
+ if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
+ status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True))
+
+ # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
+ # set, since so much relies on it being set.
+ dldir = d.getVar('DL_DIR', True)
+ if not dldir:
+ status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
+ if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
+ status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
+ check_symlink(dldir, d)
+
+ # Check that the MACHINE is valid, if it is set
+ machinevalid = True
+ if d.getVar('MACHINE', True):
+ if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
+ status.addresult('Please set a valid MACHINE in your local.conf or environment\n')
+ machinevalid = False
+ else:
+ status.addresult(check_sanity_validmachine(d))
+ else:
+ status.addresult('Please set a MACHINE in your local.conf or environment\n')
+ machinevalid = False
+ if machinevalid:
+ status.addresult(check_toolchain(d))
+
+ # Check that the SDKMACHINE is valid, if it is set
+ if d.getVar('SDKMACHINE', True):
+ if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
+ status.addresult('Specified SDKMACHINE value is not valid\n')
+ elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
+ status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
+
+ check_supported_distro(d)
+
+ omask = os.umask(022)
+ if omask & 0755:
+ status.addresult("Please use a umask which allows a+rx and u+rwx\n")
+ os.umask(omask)
+
+ if d.getVar('TARGET_ARCH', True) == "arm":
+ # This path is no longer user-readable in modern (very recent) Linux
+ try:
+ if os.path.exists("/proc/sys/vm/mmap_min_addr"):
+ f = open("/proc/sys/vm/mmap_min_addr", "r")
+ try:
+ if (int(f.read().strip()) > 65536):
+ status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
+ finally:
+ f.close()
+ except:
+ pass
+
+ oeroot = d.getVar('COREBASE', True)
+ if oeroot.find('+') != -1:
+ status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
+ if oeroot.find('@') != -1:
+ status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
+ if oeroot.find(' ') != -1:
+ status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
+
+ # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
+ import re
+ mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
+ protocols = ['http', 'ftp', 'file', 'https', \
+ 'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh']
+ for mirror_var in mirror_vars:
+ mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n')
+ for mirror_entry in mirrors:
+ mirror_entry = mirror_entry.strip()
+ if not mirror_entry:
+ # ignore blank lines
+ continue
+
+ try:
+ pattern, mirror = mirror_entry.split()
+ except ValueError:
+ bb.warn('Invalid %s: %s, should be 2 members.' % (mirror_var, mirror_entry.strip()))
+ continue
+
+ decoded = bb.fetch2.decodeurl(pattern)
+ try:
+ pattern_scheme = re.compile(decoded[0])
+ except re.error as exc:
+ bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
+ continue
+
+ if not any(pattern_scheme.match(protocol) for protocol in protocols):
+ bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
+ continue
+
+ if not any(mirror.startswith(protocol + '://') for protocol in protocols):
+ bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
+ continue
+
+ if mirror.startswith('file://'):
+ import urlparse
+ check_symlink(urlparse.urlparse(mirror).path, d)
+ # SSTATE_MIRROR ends with a /PATH string
+ if mirror.endswith('/PATH'):
+ # remove /PATH$ from SSTATE_MIRROR to get a working
+ # base directory path
+ mirror_base = urlparse.urlparse(mirror[:-1*len('/PATH')]).path
+ check_symlink(mirror_base, d)
+
+ # Check that TMPDIR hasn't changed location since the last time we were run
+ tmpdir = d.getVar('TMPDIR', True)
+ checkfile = os.path.join(tmpdir, "saved_tmpdir")
+ if os.path.exists(checkfile):
+ with open(checkfile, "r") as f:
+ saved_tmpdir = f.read().strip()
+ if (saved_tmpdir != tmpdir):
+ status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
+ else:
+ bb.utils.mkdirhier(tmpdir)
+ # Remove setuid, setgid and sticky bits from TMPDIR
+ try:
+ os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
+ os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
+ os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
+ except OSError as exc:
+ bb.warn("Unable to chmod TMPDIR: %s" % exc)
+ with open(checkfile, "w") as f:
+ f.write(tmpdir)
+
+ # Check /bin/sh links to dash or bash
+ real_sh = os.path.realpath('/bin/sh')
+ if not real_sh.endswith('/dash') and not real_sh.endswith('/bash'):
+ status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
+
+def check_sanity(sanity_data):
+ class SanityStatus(object):
+ def __init__(self):
+ self.messages = ""
+ self.network_error = False
+ self.reparse = False
+
+ def addresult(self, message):
+ if message:
+ self.messages = self.messages + message
+
+ status = SanityStatus()
+
+ tmpdir = sanity_data.getVar('TMPDIR', True)
+ sstate_dir = sanity_data.getVar('SSTATE_DIR', True)
+
+ check_symlink(sstate_dir, sanity_data)
+
+ # Check saved sanity info
+ last_sanity_version = 0
+ last_tmpdir = ""
+ last_sstate_dir = ""
+ last_nativelsbstr = ""
+ sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
+ if os.path.exists(sanityverfile):
+ with open(sanityverfile, 'r') as f:
+ for line in f:
+ if line.startswith('SANITY_VERSION'):
+ last_sanity_version = int(line.split()[1])
+ if line.startswith('TMPDIR'):
+ last_tmpdir = line.split()[1]
+ if line.startswith('SSTATE_DIR'):
+ last_sstate_dir = line.split()[1]
+ if line.startswith('NATIVELSBSTRING'):
+ last_nativelsbstr = line.split()[1]
+
+ check_sanity_everybuild(status, sanity_data)
+
+ sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
+ network_error = False
+ # NATIVELSBSTRING var may have been overridden with "universal", so
+ # get actual host distribution id and version
+ nativelsbstr = lsb_distro_identifier(sanity_data)
+ if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr:
+ check_sanity_version_change(status, sanity_data)
+ status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
+ else:
+ if last_sstate_dir != sstate_dir:
+ status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
+
+ if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
+ with open(sanityverfile, 'w') as f:
+ f.write("SANITY_VERSION %s\n" % sanity_version)
+ f.write("TMPDIR %s\n" % tmpdir)
+ f.write("SSTATE_DIR %s\n" % sstate_dir)
+ f.write("NATIVELSBSTRING %s\n" % nativelsbstr)
+
+ sanity_handle_abichanges(status, sanity_data)
+
+ if status.messages != "":
+ raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
+ return status.reparse
+
+# Create a copy of the datastore and finalise it to ensure appends and
+# overrides are set - the datastore has yet to be finalised at ConfigParsed
+def copy_data(e):
+ sanity_data = bb.data.createCopy(e.data)
+ sanity_data.finalize()
+ return sanity_data
+
+addhandler check_sanity_eventhandler
+check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
+python check_sanity_eventhandler() {
+ if bb.event.getName(e) == "SanityCheck":
+ sanity_data = copy_data(e)
+ if e.generateevents:
+ sanity_data.setVar("SANITY_USE_EVENTS", "1")
+ reparse = check_sanity(sanity_data)
+ e.data.setVar("BB_INVALIDCONF", reparse)
+ bb.event.fire(bb.event.SanityCheckPassed(), e.data)
+ elif bb.event.getName(e) == "NetworkTest":
+ sanity_data = copy_data(e)
+ if e.generateevents:
+ sanity_data.setVar("SANITY_USE_EVENTS", "1")
+ bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
+
+ return
+}
diff --git a/import-layers/yocto-poky/meta/classes/scons.bbclass b/import-layers/yocto-poky/meta/classes/scons.bbclass
new file mode 100644
index 000000000..1579b05c6
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/scons.bbclass
@@ -0,0 +1,17 @@
+DEPENDS += "python-scons-native"
+
+EXTRA_OESCONS ?= ""
+
+do_configure[noexec] = "1"
+
+scons_do_compile() {
+ ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ die "scons build execution failed."
+}
+
+scons_do_install() {
+ ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \
+ die "scons install execution failed."
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/sdl.bbclass b/import-layers/yocto-poky/meta/classes/sdl.bbclass
new file mode 100644
index 000000000..cc31288f6
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sdl.bbclass
@@ -0,0 +1,6 @@
+#
+# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
+#
+
+DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
+SECTION = "x11/games"
diff --git a/import-layers/yocto-poky/meta/classes/setuptools.bbclass b/import-layers/yocto-poky/meta/classes/setuptools.bbclass
new file mode 100644
index 000000000..56343b1c7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/setuptools.bbclass
@@ -0,0 +1,8 @@
+inherit distutils
+
+DEPENDS += "python-distribute-native"
+
+DISTUTILS_INSTALL_ARGS = "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
diff --git a/import-layers/yocto-poky/meta/classes/setuptools3.bbclass b/import-layers/yocto-poky/meta/classes/setuptools3.bbclass
new file mode 100644
index 000000000..de6dd9440
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/setuptools3.bbclass
@@ -0,0 +1,8 @@
+inherit distutils3
+
+DEPENDS += "python3-setuptools-native"
+
+DISTUTILS_INSTALL_ARGS = "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
diff --git a/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass b/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass
new file mode 100644
index 000000000..a481f6d9a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass
@@ -0,0 +1,52 @@
+# Class for generating signed IPK packages.
+#
+# Configuration variables used by this class:
+# IPK_GPG_PASSPHRASE_FILE
+# Path to a file containing the passphrase of the signing key.
+# IPK_GPG_NAME
+# Name of the key to sign with.
+# IPK_GPG_BACKEND
+# Optional variable for specifying the backend to use for signing.
+# Currently the only available option is 'local', i.e. local signing
+# on the build host.
+# IPK_GPG_SIGNATURE_TYPE
+# Optional variable for specifying the type of gpg signatures, can be:
+# 1. Ascii armored (ASC), default if not set
+# 2. Binary (BIN)
+# GPG_BIN
+# Optional variable for specifying the gpg binary/wrapper to use for
+# signing.
+# GPG_PATH
+# Optional variable for specifying the gnupg "home" directory:
+#
+
+inherit sanity
+
+IPK_SIGN_PACKAGES = '1'
+IPK_GPG_BACKEND ?= 'local'
+IPK_GPG_SIGNATURE_TYPE ?= 'ASC'
+
+python () {
+ # Check configuration
+ for var in ('IPK_GPG_NAME', 'IPK_GPG_PASSPHRASE_FILE'):
+ if not d.getVar(var, True):
+ raise_sanity_error("You need to define %s in the config" % var, d)
+
+ sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE", True)
+ if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
+ raise_sanity_error("Bad value for IPK_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
+}
+
+def sign_ipk(d, ipk_to_sign):
+ from oe.gpg_sign import get_signer
+
+ bb.debug(1, 'Signing ipk: %s' % ipk_to_sign)
+
+ signer = get_signer(d, d.getVar('IPK_GPG_BACKEND', True))
+ sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE', True)
+ is_ascii_sig = (sig_type.upper() != "BIN")
+
+ signer.detach_sign(ipk_to_sign,
+ d.getVar('IPK_GPG_NAME', True),
+ d.getVar('IPK_GPG_PASSPHRASE_FILE', True),
+ armor=is_ascii_sig)
diff --git a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
new file mode 100644
index 000000000..31a6e9b04
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
@@ -0,0 +1,43 @@
+# Class for signing package feeds
+#
+# Related configuration variables that will be used after this class is
+# iherited:
+# PACKAGE_FEED_PASSPHRASE_FILE
+# Path to a file containing the passphrase of the signing key.
+# PACKAGE_FEED_GPG_NAME
+# Name of the key to sign with. May be key id or key name.
+# PACKAGE_FEED_GPG_BACKEND
+# Optional variable for specifying the backend to use for signing.
+# Currently the only available option is 'local', i.e. local signing
+# on the build host.
+# PACKAGE_FEED_GPG_SIGNATURE_TYPE
+# Optional variable for specifying the type of gpg signature, can be:
+# 1. Ascii armored (ASC), default if not set
+# 2. Binary (BIN)
+# This variable is only available for IPK feeds. It is ignored on
+# other packaging backends.
+# GPG_BIN
+# Optional variable for specifying the gpg binary/wrapper to use for
+# signing.
+# GPG_PATH
+# Optional variable for specifying the gnupg "home" directory:
+#
+inherit sanity
+
+PACKAGE_FEED_SIGN = '1'
+PACKAGE_FEED_GPG_BACKEND ?= 'local'
+PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
+
+python () {
+ # Check sanity of configuration
+ for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
+ if not d.getVar(var, True):
+ raise_sanity_error("You need to define %s in the config" % var, d)
+
+ sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE", True)
+ if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
+ raise_sanity_error("Bad value for PACKAGE_FEED_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
+}
+
+do_package_index[depends] += "signing-keys:do_deploy"
+do_rootfs[depends] += "signing-keys:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
new file mode 100644
index 000000000..a8ea75faa
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
@@ -0,0 +1,53 @@
+# Class for generating signed RPM packages.
+#
+# Configuration variables used by this class:
+# RPM_GPG_PASSPHRASE
+# The passphrase of the signing key.
+# RPM_GPG_NAME
+# Name of the key to sign with. May be key id or key name.
+# RPM_GPG_BACKEND
+# Optional variable for specifying the backend to use for signing.
+# Currently the only available option is 'local', i.e. local signing
+# on the build host.
+# GPG_BIN
+# Optional variable for specifying the gpg binary/wrapper to use for
+# signing.
+# GPG_PATH
+# Optional variable for specifying the gnupg "home" directory:
+#
+inherit sanity
+
+RPM_SIGN_PACKAGES='1'
+RPM_GPG_BACKEND ?= 'local'
+
+
+python () {
+ if d.getVar('RPM_GPG_PASSPHRASE_FILE', True):
+ raise_sanity_error('RPM_GPG_PASSPHRASE_FILE is replaced by RPM_GPG_PASSPHRASE', d)
+ # Check configuration
+ for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
+ if not d.getVar(var, True):
+ raise_sanity_error("You need to define %s in the config" % var, d)
+
+ # Set the expected location of the public key
+ d.setVar('RPM_GPG_PUBKEY', os.path.join(d.getVar('STAGING_DIR_TARGET', False),
+ d.getVar('sysconfdir', False),
+ 'pki',
+ 'rpm-gpg',
+ 'RPM-GPG-KEY-${DISTRO_VERSION}'))
+}
+
+python sign_rpm () {
+ import glob
+ from oe.gpg_sign import get_signer
+
+ signer = get_signer(d, d.getVar('RPM_GPG_BACKEND', True))
+ rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR', True) + '/*')
+
+ signer.sign_rpms(rpms,
+ d.getVar('RPM_GPG_NAME', True),
+ d.getVar('RPM_GPG_PASSPHRASE', True))
+}
+
+do_package_index[depends] += "signing-keys:do_deploy"
+do_rootfs[depends] += "signing-keys:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/sip.bbclass b/import-layers/yocto-poky/meta/classes/sip.bbclass
new file mode 100644
index 000000000..6ed2a13bd
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sip.bbclass
@@ -0,0 +1,61 @@
+# Build Class for Sip based Python Bindings
+# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
+#
+STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip"
+
+DEPENDS =+ "sip-native"
+RDEPENDS_${PN} += "python-sip"
+
+# default stuff, do not uncomment
+# EXTRA_SIPTAGS = "-tWS_X11 -tQt_4_3_0"
+
+# do_generate is before do_configure so ensure that sip_native is populated in sysroot before executing it
+do_generate[depends] += "sip-native:do_populate_sysroot"
+
+sip_do_generate() {
+ if [ -z "${SIP_MODULES}" ]; then
+ MODULES="`ls sip/*mod.sip`"
+ else
+ MODULES="${SIP_MODULES}"
+ fi
+
+ if [ -z "$MODULES" ]; then
+ die "SIP_MODULES not set and no modules found in $PWD"
+ else
+ bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
+ fi
+
+ if [ -z "${EXTRA_SIPTAGS}" ]; then
+ die "EXTRA_SIPTAGS needs to be set!"
+ else
+ SIPTAGS="${EXTRA_SIPTAGS}"
+ fi
+
+ if [ ! -z "${SIP_FEATURES}" ]; then
+ FEATURES="-z ${SIP_FEATURES}"
+ bbnote "sip feature file: ${SIP_FEATURES}"
+ fi
+
+ for module in $MODULES
+ do
+ install -d ${module}/
+ echo "calling 'sip4 -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'"
+ sip4 -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf \
+ sip/${module}/${module}mod.sip || die "Error calling sip on ${module}"
+ sed -e 's,target,TARGET,' -e 's,sources,SOURCES,' -e 's,headers,HEADERS,' \
+ ${module}/${module}.sbf | sed s,"moc_HEADERS =","HEADERS +=", \
+ >${module}/${module}.pro
+ echo "TEMPLATE=lib" >>${module}/${module}.pro
+ [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro
+ [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro
+ true
+ done
+}
+
+EXPORT_FUNCTIONS do_generate
+
+addtask generate after do_unpack do_patch before do_configure
diff --git a/import-layers/yocto-poky/meta/classes/siteconfig.bbclass b/import-layers/yocto-poky/meta/classes/siteconfig.bbclass
new file mode 100644
index 000000000..45dce489d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/siteconfig.bbclass
@@ -0,0 +1,33 @@
+python siteconfig_do_siteconfig () {
+ shared_state = sstate_state_fromvars(d)
+ if shared_state['task'] != 'populate_sysroot':
+ return
+ if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
+ bb.debug(1, "No site_config directory, skipping do_siteconfig")
+ return
+ bb.build.exec_func('do_siteconfig_gencache', d)
+ sstate_clean(shared_state, d)
+ sstate_install(shared_state, d)
+}
+
+EXTRASITECONFIG ?= ""
+
+siteconfig_do_siteconfig_gencache () {
+ mkdir -p ${WORKDIR}/site_config_${MACHINE}
+ gen-site-config ${FILE_DIRNAME}/site_config \
+ >${WORKDIR}/site_config_${MACHINE}/configure.ac
+ cd ${WORKDIR}/site_config_${MACHINE}
+ autoconf
+ rm -f ${BPN}_cache
+ CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${BPN}_cache
+ sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
+ -e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
+ < ${BPN}_cache > ${BPN}_config
+ mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+ cp ${BPN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+
+}
+
+do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
+
+EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache
diff --git a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
new file mode 100644
index 000000000..50141a353
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
@@ -0,0 +1,175 @@
+# This class exists to provide information about the targets that
+# may be needed by other classes and/or recipes. If you add a new
+# target this will probably need to be updated.
+
+#
+# Returns information about 'what' for the named target 'target'
+# where 'target' == "<arch>-<os>"
+#
+# 'what' can be one of
+# * target: Returns the target name ("<arch>-<os>")
+# * endianness: Return "be" for big endian targets, "le" for little endian
+# * bits: Returns the bit size of the target, either "32" or "64"
+# * libc: Returns the name of the c library used by the target
+#
+# It is an error for the target not to exist.
+# If 'what' doesn't exist then an empty value is returned
+#
+def siteinfo_data(d):
+ archinfo = {
+ "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
+ "aarch64": "endian-little bit-64 arm-common arm-64",
+ "aarch64_be": "endian-big bit-64 arm-common arm-64",
+ "arm": "endian-little bit-32 arm-common arm-32",
+ "armeb": "endian-big bit-32 arm-common arm-32",
+ "avr32": "endian-big bit-32 avr32-common",
+ "bfin": "endian-little bit-32 bfin-common",
+ "epiphany": "endian-little bit-32",
+ "i386": "endian-little bit-32 ix86-common",
+ "i486": "endian-little bit-32 ix86-common",
+ "i586": "endian-little bit-32 ix86-common",
+ "i686": "endian-little bit-32 ix86-common",
+ "ia64": "endian-little bit-64",
+ "microblaze": "endian-big bit-32 microblaze-common",
+ "microblazeeb": "endian-big bit-32 microblaze-common",
+ "microblazeel": "endian-little bit-32 microblaze-common",
+ "mips": "endian-big bit-32 mips-common",
+ "mips64": "endian-big bit-64 mips-common",
+ "mips64el": "endian-little bit-64 mips-common",
+ "mipsel": "endian-little bit-32 mips-common",
+ "powerpc": "endian-big bit-32 powerpc-common",
+ "nios2": "endian-little bit-32 nios2-common",
+ "powerpc64": "endian-big bit-64 powerpc-common",
+ "ppc": "endian-big bit-32 powerpc-common",
+ "ppc64": "endian-big bit-64 powerpc-common",
+ "ppc64le" : "endian-little bit-64 powerpc-common",
+ "sh3": "endian-little bit-32 sh-common",
+ "sh4": "endian-little bit-32 sh-common",
+ "sparc": "endian-big bit-32",
+ "viac3": "endian-little bit-32 ix86-common",
+ "x86_64": "endian-little", # bitinfo specified in targetinfo
+ }
+ osinfo = {
+ "darwin": "common-darwin",
+ "darwin9": "common-darwin",
+ "linux": "common-linux common-glibc",
+ "linux-gnu": "common-linux common-glibc",
+ "linux-gnux32": "common-linux common-glibc",
+ "linux-gnun32": "common-linux common-glibc",
+ "linux-gnueabi": "common-linux common-glibc",
+ "linux-gnuspe": "common-linux common-glibc",
+ "linux-uclibc": "common-linux common-uclibc",
+ "linux-uclibceabi": "common-linux common-uclibc",
+ "linux-uclibcspe": "common-linux common-uclibc",
+ "linux-musl": "common-linux common-musl",
+ "linux-musleabi": "common-linux common-musl",
+ "linux-muslspe": "common-linux common-musl",
+ "uclinux-uclibc": "common-uclibc",
+ "cygwin": "common-cygwin",
+ "mingw32": "common-mingw",
+ }
+ targetinfo = {
+ "aarch64-linux-gnu": "aarch64-linux",
+ "aarch64_be-linux-gnu": "aarch64_be-linux",
+ "aarch64-linux-musl": "aarch64-linux",
+ "aarch64_be-linux-musl": "aarch64_be-linux",
+ "arm-linux-gnueabi": "arm-linux",
+ "arm-linux-musleabi": "arm-linux",
+ "arm-linux-uclibceabi": "arm-linux-uclibc",
+ "armeb-linux-gnueabi": "armeb-linux",
+ "armeb-linux-uclibceabi": "armeb-linux-uclibc",
+ "armeb-linux-musleabi": "armeb-linux",
+ "mips-linux-musl": "mips-linux",
+ "mipsel-linux-musl": "mipsel-linux",
+ "mips64-linux-musl": "mips64-linux",
+ "mips64el-linux-musl": "mips64el-linux",
+ "mips64-linux-gnun32": "mips-linux bit-32",
+ "mips64el-linux-gnun32": "mipsel-linux bit-32",
+ "powerpc-linux": "powerpc32-linux",
+ "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
+ "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
+ "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
+ "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
+ "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
+ "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
+ "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
+ "powerpc64-linux": "powerpc-linux",
+ "powerpc64-linux-musl": "powerpc-linux",
+ "x86_64-cygwin": "bit-64",
+ "x86_64-darwin": "bit-64",
+ "x86_64-darwin9": "bit-64",
+ "x86_64-linux": "bit-64",
+ "x86_64-linux-musl": "x86_64-linux bit-64",
+ "x86_64-linux-uclibc": "bit-64",
+ "x86_64-elf": "bit-64",
+ "x86_64-linux-gnu": "bit-64 x86_64-linux",
+ "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
+ "x86_64-mingw32": "bit-64",
+ }
+
+ hostarch = d.getVar("HOST_ARCH", True)
+ hostos = d.getVar("HOST_OS", True)
+ target = "%s-%s" % (hostarch, hostos)
+
+ sitedata = []
+ if hostarch in archinfo:
+ sitedata.extend(archinfo[hostarch].split())
+ if hostos in osinfo:
+ sitedata.extend(osinfo[hostos].split())
+ if target in targetinfo:
+ sitedata.extend(targetinfo[target].split())
+ sitedata.append(target)
+ sitedata.append("common")
+
+ bb.debug(1, "SITE files %s" % sitedata);
+ return sitedata
+
+python () {
+ sitedata = set(siteinfo_data(d))
+ if "endian-little" in sitedata:
+ d.setVar("SITEINFO_ENDIANNESS", "le")
+ elif "endian-big" in sitedata:
+ d.setVar("SITEINFO_ENDIANNESS", "be")
+ else:
+ bb.error("Unable to determine endianness for architecture '%s'" %
+ d.getVar("HOST_ARCH", True))
+ bb.fatal("Please add your architecture to siteinfo.bbclass")
+
+ if "bit-32" in sitedata:
+ d.setVar("SITEINFO_BITS", "32")
+ elif "bit-64" in sitedata:
+ d.setVar("SITEINFO_BITS", "64")
+ else:
+ bb.error("Unable to determine bit size for architecture '%s'" %
+ d.getVar("HOST_ARCH", True))
+ bb.fatal("Please add your architecture to siteinfo.bbclass")
+}
+
+def siteinfo_get_files(d, no_cache = False):
+ sitedata = siteinfo_data(d)
+ sitefiles = ""
+ for path in d.getVar("BBPATH", True).split(":"):
+ for element in sitedata:
+ filename = os.path.join(path, "site", element)
+ if os.path.exists(filename):
+ sitefiles += filename + " "
+
+ if no_cache: return sitefiles
+
+ # Now check for siteconfig cache files
+ # Use the files copied to the aclocal cache generated by autotools.bbclass
+ # to avoid races
+ path_siteconfig = d.getVar('ACLOCALDIR', True)
+ if path_siteconfig and os.path.isdir(path_siteconfig):
+ for i in os.listdir(path_siteconfig):
+ if not i.endswith("_config"):
+ continue
+ filename = os.path.join(path_siteconfig, i)
+ sitefiles += filename + " "
+
+ return sitefiles
+
+#
+# Make some information available via variables
+#
+SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/import-layers/yocto-poky/meta/classes/spdx.bbclass b/import-layers/yocto-poky/meta/classes/spdx.bbclass
new file mode 100644
index 000000000..0c9276584
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/spdx.bbclass
@@ -0,0 +1,365 @@
+# This class integrates real-time license scanning, generation of SPDX standard
+# output and verifiying license info during the building process.
+# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
+#
+# For more information on FOSSology:
+# http://www.fossology.org
+#
+# For more information on FOSSologySPDX commandline:
+# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
+#
+# For more information on SPDX:
+# http://www.spdx.org
+#
+
+# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
+# in ./meta/conf/licenses.conf.
+
+SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
+
+# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
+# the real top-level directory.
+SPDX_S ?= "${S}"
+
+python do_spdx () {
+ import os, sys
+ import json, shutil
+
+ info = {}
+ info['workdir'] = d.getVar('WORKDIR', True)
+ info['sourcedir'] = d.getVar('SPDX_S', True)
+ info['pn'] = d.getVar('PN', True)
+ info['pv'] = d.getVar('PV', True)
+ info['spdx_version'] = d.getVar('SPDX_VERSION', True)
+ info['data_license'] = d.getVar('DATA_LICENSE', True)
+
+ sstatedir = d.getVar('SPDXSSTATEDIR', True)
+ sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
+
+ manifest_dir = d.getVar('SPDX_MANIFEST_DIR', True)
+ info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
+
+ info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR', True)
+ info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
+
+ # Make sure important dirs exist
+ try:
+ bb.utils.mkdirhier(manifest_dir)
+ bb.utils.mkdirhier(sstatedir)
+ bb.utils.mkdirhier(info['spdx_temp_dir'])
+ except OSError as e:
+ bb.error("SPDX: Could not set up required directories: " + str(e))
+ return
+
+ ## get everything from cache. use it to decide if
+ ## something needs to be rerun
+ cur_ver_code = get_ver_code(info['sourcedir'])
+ cache_cur = False
+ if os.path.exists(sstatefile):
+ ## cache for this package exists. read it in
+ cached_spdx = get_cached_spdx(sstatefile)
+
+ if cached_spdx['PackageVerificationCode'] == cur_ver_code:
+ bb.warn("SPDX: Verification code for " + info['pn']
+ + "is same as cache's. do nothing")
+ cache_cur = True
+ else:
+ local_file_info = setup_foss_scan(info, True, cached_spdx['Files'])
+ else:
+ local_file_info = setup_foss_scan(info, False, None)
+
+ if cache_cur:
+ spdx_file_info = cached_spdx['Files']
+ foss_package_info = cached_spdx['Package']
+ foss_license_info = cached_spdx['Licenses']
+ else:
+ ## setup fossology command
+ foss_server = d.getVar('FOSS_SERVER', True)
+ foss_flags = d.getVar('FOSS_WGET_FLAGS', True)
+ foss_full_spdx = d.getVar('FOSS_FULL_SPDX', True) == "true" or False
+ foss_command = "wget %s --post-file=%s %s"\
+ % (foss_flags, info['tar_file'], foss_server)
+
+ foss_result = run_fossology(foss_command, foss_full_spdx)
+ if foss_result is not None:
+ (foss_package_info, foss_file_info, foss_license_info) = foss_result
+ spdx_file_info = create_spdx_doc(local_file_info, foss_file_info)
+ ## write to cache
+ write_cached_spdx(sstatefile, cur_ver_code, foss_package_info,
+ spdx_file_info, foss_license_info)
+ else:
+ bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command)
+ return
+
+ ## Get document and package level information
+ spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info)
+
+ ## CREATE MANIFEST
+ create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info)
+
+ ## clean up the temp stuff
+ shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True)
+ if os.path.exists(info['tar_file']):
+ remove_file(info['tar_file'])
+}
+addtask spdx after do_patch before do_configure
+
+def create_manifest(info, header, files, licenses):
+ import codecs
+ with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f:
+ # Write header
+ f.write(header + '\n')
+
+ # Write file data
+ for chksum, block in files.iteritems():
+ f.write("FileName: " + block['FileName'] + '\n')
+ for key, value in block.iteritems():
+ if not key == 'FileName':
+ f.write(key + ": " + value + '\n')
+ f.write('\n')
+
+ # Write license data
+ for id, block in licenses.iteritems():
+ f.write("LicenseID: " + id + '\n')
+ for key, value in block.iteritems():
+ f.write(key + ": " + value + '\n')
+ f.write('\n')
+
+def get_cached_spdx(sstatefile):
+ import json
+ import codecs
+ cached_spdx_info = {}
+ with codecs.open(sstatefile, mode='r', encoding='utf-8') as f:
+ try:
+ cached_spdx_info = json.load(f)
+ except ValueError as e:
+ cached_spdx_info = None
+ return cached_spdx_info
+
+def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info):
+ import json
+ import codecs
+ spdx_doc = {}
+ spdx_doc['PackageVerificationCode'] = ver_code
+ spdx_doc['Files'] = {}
+ spdx_doc['Files'] = files
+ spdx_doc['Package'] = {}
+ spdx_doc['Package'] = package_info
+ spdx_doc['Licenses'] = {}
+ spdx_doc['Licenses'] = license_info
+ with codecs.open(sstatefile, mode='w', encoding='utf-8') as f:
+ f.write(json.dumps(spdx_doc))
+
+def setup_foss_scan(info, cache, cached_files):
+ import errno, shutil
+ import tarfile
+ file_info = {}
+ cache_dict = {}
+
+ for f_dir, f in list_files(info['sourcedir']):
+ full_path = os.path.join(f_dir, f)
+ abs_path = os.path.join(info['sourcedir'], full_path)
+ dest_dir = os.path.join(info['spdx_temp_dir'], f_dir)
+ dest_path = os.path.join(info['spdx_temp_dir'], full_path)
+
+ checksum = hash_file(abs_path)
+ if not checksum is None:
+ file_info[checksum] = {}
+ ## retain cache information if it exists
+ if cache and checksum in cached_files:
+ file_info[checksum] = cached_files[checksum]
+ ## have the file included in what's sent to the FOSSology server
+ else:
+ file_info[checksum]['FileName'] = full_path
+ try:
+ bb.utils.mkdirhier(dest_dir)
+ shutil.copyfile(abs_path, dest_path)
+ except OSError as e:
+ bb.warn("SPDX: mkdirhier failed: " + str(e))
+ except shutil.Error as e:
+ bb.warn("SPDX: copyfile failed: " + str(e))
+ except IOError as e:
+ bb.warn("SPDX: copyfile failed: " + str(e))
+ else:
+ bb.warn("SPDX: Could not get checksum for file: " + f)
+
+ with tarfile.open(info['tar_file'], "w:gz") as tar:
+ tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']))
+
+ return file_info
+
+def remove_file(file_name):
+ try:
+ os.remove(file_name)
+ except OSError as e:
+ pass
+
+def list_files(dir):
+ for root, subFolders, files in os.walk(dir):
+ for f in files:
+ rel_root = os.path.relpath(root, dir)
+ yield rel_root, f
+ return
+
+def hash_file(file_name):
+ try:
+ with open(file_name, 'rb') as f:
+ data_string = f.read()
+ sha1 = hash_string(data_string)
+ return sha1
+ except:
+ return None
+
+def hash_string(data):
+ import hashlib
+ sha1 = hashlib.sha1()
+ sha1.update(data)
+ return sha1.hexdigest()
+
+def run_fossology(foss_command, full_spdx):
+ import string, re
+ import subprocess
+
+ p = subprocess.Popen(foss_command.split(),
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ foss_output, foss_error = p.communicate()
+ if p.returncode != 0:
+ return None
+
+ foss_output = unicode(foss_output, "utf-8")
+ foss_output = string.replace(foss_output, '\r', '')
+
+ # Package info
+ package_info = {}
+ if full_spdx:
+ # All mandatory, only one occurrence
+ package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
+ package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
+ package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
+ # These may be more than one
+ package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output)
+ else:
+ DEFAULT = "NOASSERTION"
+ package_info['PackageCopyrightText'] = "<text>" + DEFAULT + "</text>"
+ package_info['PackageLicenseDeclared'] = DEFAULT
+ package_info['PackageLicenseConcluded'] = DEFAULT
+ package_info['PackageLicenseInfoFromFiles'] = []
+
+ # File info
+ file_info = {}
+ records = []
+ # FileName is also in PackageFileName, so we match on FileType as well.
+ records = re.findall('FileName:.*?FileType:.*?</text>', foss_output, re.S)
+ for rec in records:
+ chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0]
+ file_info[chksum] = {}
+ file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: '
+ + '(.*?</text>)', rec, re.S )[0]
+ fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile']
+ for field in fields:
+ file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
+
+ # Licenses
+ license_info = {}
+ licenses = []
+ licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S)
+ for lic in licenses:
+ license_id = re.findall('LicenseID: (.*)\n', lic)[0]
+ license_info[license_id] = {}
+ license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?</text>)', lic, re.S)[0]
+ license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0]
+
+ return (package_info, file_info, license_info)
+
+def create_spdx_doc(file_info, scanned_files):
+ import json
+ ## push foss changes back into cache
+ for chksum, lic_info in scanned_files.iteritems():
+ if chksum in file_info:
+ file_info[chksum]['FileType'] = lic_info['FileType']
+ file_info[chksum]['FileChecksum: SHA1'] = chksum
+ file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
+ file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
+ file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
+ else:
+ bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum
+ + " : is not in the local file info: "
+ + json.dumps(lic_info, indent=1))
+ return file_info
+
+def get_ver_code(dirname):
+ chksums = []
+ for f_dir, f in list_files(dirname):
+ hash = hash_file(os.path.join(dirname, f_dir, f))
+ if not hash is None:
+ chksums.append(hash)
+ else:
+ bb.warn("SPDX: Could not hash file: " + path)
+ ver_code_string = ''.join(chksums).lower()
+ ver_code = hash_string(ver_code_string)
+ return ver_code
+
+def get_header_info(info, spdx_verification_code, package_info):
+ """
+ Put together the header SPDX information.
+ Eventually this needs to become a lot less
+ of a hardcoded thing.
+ """
+ from datetime import datetime
+ import os
+ head = []
+ DEFAULT = "NOASSERTION"
+
+ package_checksum = hash_file(info['tar_file'])
+ if package_checksum is None:
+ package_checksum = DEFAULT
+
+ ## document level information
+ head.append("## SPDX Document Information")
+ head.append("SPDXVersion: " + info['spdx_version'])
+ head.append("DataLicense: " + info['data_license'])
+ head.append("DocumentComment: <text>SPDX for "
+ + info['pn'] + " version " + info['pv'] + "</text>")
+ head.append("")
+
+ ## Creator information
+ ## Note that this does not give time in UTC.
+ now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
+ head.append("## Creation Information")
+ ## Tools are supposed to have a version, but FOSSology+SPDX provides none.
+ head.append("Creator: Tool: FOSSology+SPDX")
+ head.append("Created: " + now)
+ head.append("CreatorComment: <text>UNO</text>")
+ head.append("")
+
+ ## package level information
+ head.append("## Package Information")
+ head.append("PackageName: " + info['pn'])
+ head.append("PackageVersion: " + info['pv'])
+ head.append("PackageFileName: " + os.path.basename(info['tar_file']))
+ head.append("PackageSupplier: Person:" + DEFAULT)
+ head.append("PackageDownloadLocation: " + DEFAULT)
+ head.append("PackageSummary: <text></text>")
+ head.append("PackageOriginator: Person:" + DEFAULT)
+ head.append("PackageChecksum: SHA1: " + package_checksum)
+ head.append("PackageVerificationCode: " + spdx_verification_code)
+ head.append("PackageDescription: <text>" + info['pn']
+ + " version " + info['pv'] + "</text>")
+ head.append("")
+ head.append("PackageCopyrightText: "
+ + package_info['PackageCopyrightText'])
+ head.append("")
+ head.append("PackageLicenseDeclared: "
+ + package_info['PackageLicenseDeclared'])
+ head.append("PackageLicenseConcluded: "
+ + package_info['PackageLicenseConcluded'])
+
+ for licref in package_info['PackageLicenseInfoFromFiles']:
+ head.append("PackageLicenseInfoFromFiles: " + licref)
+ head.append("")
+
+ ## header for file level
+ head.append("## File Information")
+ head.append("")
+
+ return '\n'.join(head)
diff --git a/import-layers/yocto-poky/meta/classes/sstate.bbclass b/import-layers/yocto-poky/meta/classes/sstate.bbclass
new file mode 100644
index 000000000..8c623271a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/sstate.bbclass
@@ -0,0 +1,993 @@
+SSTATE_VERSION = "3"
+
+SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
+SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
+
+def generate_sstatefn(spec, hash, d):
+ if not hash:
+ hash = "INVALID"
+ return hash[:2] + "/" + spec + hash
+
+SSTATE_PKGARCH = "${PACKAGE_ARCH}"
+SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
+SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
+SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
+SSTATE_EXTRAPATH = ""
+SSTATE_EXTRAPATHWILDCARD = ""
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
+
+# We don't want the sstate to depend on things like the distro string
+# of the system, we let the sstate paths take care of this.
+SSTATE_EXTRAPATH[vardepvalue] = ""
+
+# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
+SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
+# Avoid docbook/sgml catalog warnings for now
+SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+# Archive the sources for many architectures in one deploy folder
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+
+SSTATE_SCAN_FILES ?= "*.la *-config *_config"
+SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
+
+BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
+
+SSTATE_ARCHS = " \
+ ${BUILD_ARCH} \
+ ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
+ ${BUILD_ARCH}_${TARGET_ARCH} \
+ ${SDK_ARCH}_${SDK_OS} \
+ ${SDK_ARCH}_${PACKAGE_ARCH} \
+ allarch \
+ ${PACKAGE_ARCH} \
+ ${MACHINE}"
+
+SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
+
+SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATEPOSTCREATEFUNCS = ""
+SSTATEPREINSTFUNCS = ""
+SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
+SSTATEPOSTINSTFUNCS = ""
+EXTRA_STAGING_FIXMES ?= ""
+SSTATECLEANFUNCS = ""
+
+# Check whether sstate exists for tasks that support sstate and are in the
+# locked signatures file.
+SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
+
+# Check whether the task's computed hash matches the task's hash in the
+# locked signatures file.
+SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
+
+# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
+# not sign)
+SSTATE_SIG_KEY ?= ""
+SSTATE_SIG_PASSPHRASE ?= ""
+# Whether to verify the GnUPG signatures when extracting sstate archives
+SSTATE_VERIFY_SIG ?= "0"
+
+python () {
+ if bb.data.inherits_class('native', d):
+ d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
+ elif bb.data.inherits_class('crosssdk', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
+ elif bb.data.inherits_class('cross', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
+ elif bb.data.inherits_class('nativesdk', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
+ elif bb.data.inherits_class('cross-canadian', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
+ elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
+ d.setVar('SSTATE_PKGARCH', "allarch")
+ else:
+ d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
+ d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
+ d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
+ d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
+
+ # These classes encode staging paths into their scripts data so can only be
+ # reused if we manipulate the paths
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
+ scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
+ d.setVar('SSTATE_SCAN_CMD', scan_cmd)
+
+ unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
+ d.setVar('SSTATETASKS', " ".join(unique_tasks))
+ for task in unique_tasks:
+ d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
+ d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
+}
+
+def sstate_init(task, d):
+ ss = {}
+ ss['task'] = task
+ ss['dirs'] = []
+ ss['plaindirs'] = []
+ ss['lockfiles'] = []
+ ss['lockfiles-shared'] = []
+ return ss
+
+def sstate_state_fromvars(d, task = None):
+ if task is None:
+ task = d.getVar('BB_CURRENTTASK', True)
+ if not task:
+ bb.fatal("sstate code running without task context?!")
+ task = task.replace("_setscene", "")
+
+ if task.startswith("do_"):
+ task = task[3:]
+ inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
+ outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
+ plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
+ lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
+ lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
+ interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
+ if not task or len(inputs) != len(outputs):
+ bb.fatal("sstate variables not setup correctly?!")
+
+ if task == "populate_lic":
+ d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
+ d.setVar("SSTATE_EXTRAPATH", "")
+ d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
+
+ ss = sstate_init(task, d)
+ for i in range(len(inputs)):
+ sstate_add(ss, inputs[i], outputs[i], d)
+ ss['lockfiles'] = lockfiles
+ ss['lockfiles-shared'] = lockfilesshared
+ ss['plaindirs'] = plaindirs
+ ss['interceptfuncs'] = interceptfuncs
+ return ss
+
+def sstate_add(ss, source, dest, d):
+ if not source.endswith("/"):
+ source = source + "/"
+ if not dest.endswith("/"):
+ dest = dest + "/"
+ source = os.path.normpath(source)
+ dest = os.path.normpath(dest)
+ srcbase = os.path.basename(source)
+ ss['dirs'].append([srcbase, source, dest])
+ return ss
+
+def sstate_install(ss, d):
+ import oe.path
+ import oe.sstatesig
+ import subprocess
+
+ sharedfiles = []
+ shareddirs = []
+ bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+
+ sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
+
+ manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
+
+ if os.access(manifest, os.R_OK):
+ bb.fatal("Package already staged (%s)?!" % manifest)
+
+ locks = []
+ for lock in ss['lockfiles-shared']:
+ locks.append(bb.utils.lockfile(lock, True))
+ for lock in ss['lockfiles']:
+ locks.append(bb.utils.lockfile(lock))
+
+ for state in ss['dirs']:
+ bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
+ for walkroot, dirs, files in os.walk(state[1]):
+ for file in files:
+ srcpath = os.path.join(walkroot, file)
+ dstpath = srcpath.replace(state[1], state[2])
+ #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
+ sharedfiles.append(dstpath)
+ for dir in dirs:
+ srcdir = os.path.join(walkroot, dir)
+ dstdir = srcdir.replace(state[1], state[2])
+ #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
+ if not dstdir.endswith("/"):
+ dstdir = dstdir + "/"
+ shareddirs.append(dstdir)
+
+ # Check the file list for conflicts against files which already exist
+ whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
+ match = []
+ for f in sharedfiles:
+ if os.path.exists(f):
+ f = os.path.normpath(f)
+ realmatch = True
+ for w in whitelist:
+ if f.startswith(w):
+ realmatch = False
+ break
+ if realmatch:
+ match.append(f)
+ sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
+ search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
+ if search_output != "":
+ match.append("Matched in %s" % search_output.rstrip())
+ if match:
+ bb.error("The recipe %s is trying to install files into a shared " \
+ "area when those files already exist. Those files and their manifest " \
+ "location are:\n %s\nPlease verify which recipe should provide the " \
+ "above files.\nThe build has stopped as continuing in this scenario WILL " \
+ "break things, if not now, possibly in the future (we've seen builds fail " \
+ "several months later). If the system knew how to recover from this " \
+ "automatically it would however there are several different scenarios " \
+ "which can result in this and we don't know which one this is. It may be " \
+ "you have switched providers of something like virtual/kernel (e.g. from " \
+ "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
+ "clean task for both recipes and it will resolve this error. It may be " \
+ "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
+ "those recipes should again resolve this error however switching " \
+ "DISTRO_FEATURES on an existing build directory is not supported, you " \
+ "should really clean out tmp and rebuild (reusing sstate should be safe). " \
+ "It could be the overlapping files detected are harmless in which case " \
+ "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
+ "also be your build is including two different conflicting versions of " \
+ "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
+ "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
+ "sharing the error and filelist above." % \
+ (d.getVar('PN', True), "\n ".join(match)))
+ bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
+
+ # Write out the manifest
+ f = open(manifest, "w")
+ for file in sharedfiles:
+ f.write(file + "\n")
+
+ # We want to ensure that directories appear at the end of the manifest
+ # so that when we test to see if they should be deleted any contents
+ # added by the task will have been removed first.
+ dirs = sorted(shareddirs, key=len)
+ # Must remove children first, which will have a longer path than the parent
+ for di in reversed(dirs):
+ f.write(di + "\n")
+ f.close()
+
+ # Append to the list of manifests for this PACKAGE_ARCH
+
+ i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
+ l = bb.utils.lockfile(i + ".lock")
+ filedata = d.getVar("STAMP", True) + " " + d2.getVar("SSTATE_MANFILEPREFIX", True) + " " + d.getVar("WORKDIR", True) + "\n"
+ manifests = []
+ if os.path.exists(i):
+ with open(i, "r") as f:
+ manifests = f.readlines()
+ if filedata not in manifests:
+ with open(i, "a+") as f:
+ f.write(filedata)
+ bb.utils.unlockfile(l)
+
+ # Run the actual file install
+ for state in ss['dirs']:
+ if os.path.exists(state[1]):
+ oe.path.copyhardlinktree(state[1], state[2])
+
+ for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
+ # All hooks should run in the SSTATE_INSTDIR
+ bb.build.exec_func(postinst, d, (sstateinst,))
+
+ for lock in locks:
+ bb.utils.unlockfile(lock)
+
+sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
+sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
+
+def sstate_installpkg(ss, d):
+ import oe.path
+ import subprocess
+ from oe.gpg_sign import get_signer
+
+ def prepdir(dir):
+ # remove dir if it exists, ensure any parent directories do exist
+ if os.path.exists(dir):
+ oe.path.remove(dir)
+ bb.utils.mkdirhier(dir)
+ oe.path.remove(dir)
+
+ sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
+ sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
+ sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
+
+ if not os.path.exists(sstatepkg):
+ pstaging_fetch(sstatefetch, sstatepkg, d)
+
+ if not os.path.isfile(sstatepkg):
+ bb.note("Staging package %s does not exist" % sstatepkg)
+ return False
+
+ sstate_clean(ss, d)
+
+ d.setVar('SSTATE_INSTDIR', sstateinst)
+ d.setVar('SSTATE_PKG', sstatepkg)
+
+ if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
+ signer = get_signer(d, 'local')
+ if not signer.verify(sstatepkg + '.sig'):
+ bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
+
+ for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
+ # All hooks should run in the SSTATE_INSTDIR
+ bb.build.exec_func(f, d, (sstateinst,))
+
+ for state in ss['dirs']:
+ prepdir(state[1])
+ os.rename(sstateinst + state[0], state[1])
+ sstate_install(ss, d)
+
+ for plain in ss['plaindirs']:
+ workdir = d.getVar('WORKDIR', True)
+ src = sstateinst + "/" + plain.replace(workdir, '')
+ dest = plain
+ bb.utils.mkdirhier(src)
+ prepdir(dest)
+ os.rename(src, dest)
+
+ return True
+
+python sstate_hardcode_path_unpack () {
+ # Fixup hardcoded paths
+ #
+ # Note: The logic below must match the reverse logic in
+ # sstate_hardcode_path(d)
+ import subprocess
+
+ sstateinst = d.getVar('SSTATE_INSTDIR', True)
+ fixmefn = sstateinst + "fixmepath"
+ if os.path.isfile(fixmefn):
+ staging = d.getVar('STAGING_DIR', True)
+ staging_target = d.getVar('STAGING_DIR_TARGET', True)
+ staging_host = d.getVar('STAGING_DIR_HOST', True)
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
+ elif bb.data.inherits_class('cross', d):
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
+ else:
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
+
+ extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
+ for fixmevar in extra_staging_fixmes.split():
+ fixme_path = d.getVar(fixmevar, True)
+ sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
+
+ # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
+ sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
+
+ bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
+ subprocess.call(sstate_hardcode_cmd, shell=True)
+
+ # Need to remove this or we'd copy it into the target directory and may
+ # conflict with another writer
+ os.remove(fixmefn)
+}
+
+def sstate_clean_cachefile(ss, d):
+ import oe.path
+
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
+ bb.note("Removing %s" % sstatepkgfile)
+ oe.path.remove(sstatepkgfile)
+
+def sstate_clean_cachefiles(d):
+ for task in (d.getVar('SSTATETASKS', True) or "").split():
+ ld = d.createCopy()
+ ss = sstate_state_fromvars(ld, task)
+ sstate_clean_cachefile(ss, ld)
+
+def sstate_clean_manifest(manifest, d):
+ import oe.path
+
+ mfile = open(manifest)
+ entries = mfile.readlines()
+ mfile.close()
+
+ for entry in entries:
+ entry = entry.strip()
+ bb.debug(2, "Removing manifest: %s" % entry)
+ # We can race against another package populating directories as we're removing them
+ # so we ignore errors here.
+ try:
+ if entry.endswith("/"):
+ if os.path.islink(entry[:-1]):
+ os.remove(entry[:-1])
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ os.rmdir(entry[:-1])
+ else:
+ oe.path.remove(entry)
+ except OSError:
+ pass
+
+ oe.path.remove(manifest)
+
+def sstate_clean(ss, d):
+ import oe.path
+ import glob
+
+ d2 = d.createCopy()
+ stamp_clean = d.getVar("STAMPCLEAN", True)
+ extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
+ if extrainf:
+ d2.setVar("SSTATE_MANMACH", extrainf)
+ wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
+ else:
+ wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
+
+ manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
+
+ if os.path.exists(manifest):
+ locks = []
+ for lock in ss['lockfiles-shared']:
+ locks.append(bb.utils.lockfile(lock))
+ for lock in ss['lockfiles']:
+ locks.append(bb.utils.lockfile(lock))
+
+ sstate_clean_manifest(manifest, d)
+
+ for lock in locks:
+ bb.utils.unlockfile(lock)
+
+ # Remove the current and previous stamps, but keep the sigdata.
+ #
+ # The glob() matches do_task* which may match multiple tasks, for
+ # example: do_package and do_package_write_ipk, so we need to
+ # exactly match *.do_task.* and *.do_task_setscene.*
+ rm_stamp = '.do_%s.' % ss['task']
+ rm_setscene = '.do_%s_setscene.' % ss['task']
+ # For BB_SIGNATURE_HANDLER = "noop"
+ rm_nohash = ".do_%s" % ss['task']
+ for stfile in glob.glob(wildcard_stfile):
+ # Keep the sigdata
+ if ".sigdata." in stfile:
+ continue
+ # Preserve taint files in the stamps directory
+ if stfile.endswith('.taint'):
+ continue
+ if rm_stamp in stfile or rm_setscene in stfile or \
+ stfile.endswith(rm_nohash):
+ oe.path.remove(stfile)
+
+ # Removes the users/groups created by the package
+ for cleanfunc in (d.getVar('SSTATECLEANFUNCS', True) or '').split():
+ bb.build.exec_func(cleanfunc, d)
+
+sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
+
+CLEANFUNCS += "sstate_cleanall"
+
+python sstate_cleanall() {
+ bb.note("Removing shared state for package %s" % d.getVar('PN', True))
+
+ manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
+ if not os.path.exists(manifest_dir):
+ return
+
+ tasks = d.getVar('SSTATETASKS', True).split()
+ for name in tasks:
+ ld = d.createCopy()
+ shared_state = sstate_state_fromvars(ld, name)
+ sstate_clean(shared_state, ld)
+}
+
+python sstate_hardcode_path () {
+ import subprocess, platform
+
+ # Need to remove hardcoded paths and fix these when we install the
+ # staging packages.
+ #
+ # Note: the logic in this function needs to match the reverse logic
+ # in sstate_installpkg(ss, d)
+
+ staging = d.getVar('STAGING_DIR', True)
+ staging_target = d.getVar('STAGING_DIR_TARGET', True)
+ staging_host = d.getVar('STAGING_DIR_HOST', True)
+ sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
+ sstate_grep_cmd = "grep -l -e '%s'" % (staging)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
+ elif bb.data.inherits_class('cross', d):
+ sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
+ else:
+ sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
+
+ extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
+ for fixmevar in extra_staging_fixmes.split():
+ fixme_path = d.getVar(fixmevar, True)
+ sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
+
+ fixmefn = sstate_builddir + "fixmepath"
+
+ sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
+ sstate_filelist_cmd = "tee %s" % (fixmefn)
+
+ # fixmepath file needs relative paths, drop sstate_builddir prefix
+ sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
+
+ xargs_no_empty_run_cmd = '--no-run-if-empty'
+ if platform.system() == 'Darwin':
+ xargs_no_empty_run_cmd = ''
+
+ # Limit the fixpaths and sed operations based on the initial grep search
+ # This has the side effect of making sure the vfs cache is hot
+ sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
+
+ bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
+ subprocess.call(sstate_hardcode_cmd, shell=True)
+
+ # If the fixmefn is empty, remove it..
+ if os.stat(fixmefn).st_size == 0:
+ os.remove(fixmefn)
+ else:
+ bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
+ subprocess.call(sstate_filelist_relative_cmd, shell=True)
+}
+
+def sstate_package(ss, d):
+ import oe.path
+
+ def make_relative_symlink(path, outputpath, d):
+ # Replace out absolute TMPDIR paths in symlinks with relative ones
+ if not os.path.islink(path):
+ return
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ return
+ if not link.startswith(tmpdir):
+ return
+
+ depth = outputpath.rpartition(tmpdir)[2].count('/')
+ base = link.partition(tmpdir)[2].strip()
+ while depth > 1:
+ base = "/.." + base
+ depth -= 1
+ base = "." + base
+
+ bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
+ os.remove(path)
+ os.symlink(base, path)
+
+ tmpdir = d.getVar('TMPDIR', True)
+
+ sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
+ sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
+ bb.utils.remove(sstatebuild, recurse=True)
+ bb.utils.mkdirhier(sstatebuild)
+ bb.utils.mkdirhier(os.path.dirname(sstatepkg))
+ for state in ss['dirs']:
+ if not os.path.exists(state[1]):
+ continue
+ srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
+ for walkroot, dirs, files in os.walk(state[1]):
+ for file in files:
+ srcpath = os.path.join(walkroot, file)
+ dstpath = srcpath.replace(state[1], state[2])
+ make_relative_symlink(srcpath, dstpath, d)
+ for dir in dirs:
+ srcpath = os.path.join(walkroot, dir)
+ dstpath = srcpath.replace(state[1], state[2])
+ make_relative_symlink(srcpath, dstpath, d)
+ bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
+ oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
+
+ workdir = d.getVar('WORKDIR', True)
+ for plain in ss['plaindirs']:
+ pdir = plain.replace(workdir, sstatebuild)
+ bb.utils.mkdirhier(plain)
+ bb.utils.mkdirhier(pdir)
+ oe.path.copyhardlinktree(plain, pdir)
+
+ d.setVar('SSTATE_BUILDDIR', sstatebuild)
+ d.setVar('SSTATE_PKG', sstatepkg)
+
+ for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + \
+ ['sstate_create_package', 'sstate_sign_package'] + \
+ (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
+ # All hooks should run in SSTATE_BUILDDIR.
+ bb.build.exec_func(f, d, (sstatebuild,))
+
+ bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+
+ return
+
+def pstaging_fetch(sstatefetch, sstatepkg, d):
+ import bb.fetch2
+
+ # Only try and fetch if the user has configured a mirror
+ mirrors = d.getVar('SSTATE_MIRRORS', True)
+ if not mirrors:
+ return
+
+ # Copy the data object and override DL_DIR and SRC_URI
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ dldir = localdata.expand("${SSTATE_DIR}")
+ bb.utils.mkdirhier(dldir)
+
+ localdata.delVar('MIRRORS')
+ localdata.setVar('FILESPATH', dldir)
+ localdata.setVar('DL_DIR', dldir)
+ localdata.setVar('PREMIRRORS', mirrors)
+
+ # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
+ # we'll want to allow network access for the current set of fetches.
+ if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
+ localdata.delVar('BB_NO_NETWORK')
+
+ # Try a fetch from the sstate mirror, if it fails just return and
+ # we will build the package
+ uris = ['file://{0}'.format(sstatefetch),
+ 'file://{0}.siginfo'.format(sstatefetch)]
+ if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
+ uris += ['file://{0}.sig'.format(sstatefetch)]
+
+ for srcuri in uris:
+ localdata.setVar('SRC_URI', srcuri)
+ try:
+ fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.download()
+
+ # Need to optimise this, if using file:// urls, the fetcher just changes the local path
+ # For now work around by symlinking
+ localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
+ if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
+ os.symlink(localpath, sstatepkg)
+
+ except bb.fetch2.BBFetchException:
+ break
+
+def sstate_setscene(d):
+ shared_state = sstate_state_fromvars(d)
+ accelerate = sstate_installpkg(shared_state, d)
+ if not accelerate:
+ raise bb.build.FuncFailed("No suitable staging package found")
+
+python sstate_task_prefunc () {
+ shared_state = sstate_state_fromvars(d)
+ sstate_clean(shared_state, d)
+}
+sstate_task_prefunc[dirs] = "${WORKDIR}"
+
+python sstate_task_postfunc () {
+ shared_state = sstate_state_fromvars(d)
+
+ sstate_install(shared_state, d)
+ for intercept in shared_state['interceptfuncs']:
+ bb.build.exec_func(intercept, d, (d.getVar("WORKDIR", True),))
+ omask = os.umask(002)
+ if omask != 002:
+ bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
+ sstate_package(shared_state, d)
+ os.umask(omask)
+}
+sstate_task_postfunc[dirs] = "${WORKDIR}"
+
+
+#
+# Shell function to generate a sstate package from a directory
+# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
+#
+sstate_create_package () {
+ TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
+ # Need to handle empty directories
+ if [ "$(ls -A)" ]; then
+ set +e
+ tar -czf $TFILE *
+ ret=$?
+ if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
+ exit 1
+ fi
+ set -e
+ else
+ tar -cz --file=$TFILE --files-from=/dev/null
+ fi
+ chmod 0664 $TFILE
+ mv -f $TFILE ${SSTATE_PKG}
+
+ cd ${WORKDIR}
+ rm -rf ${SSTATE_BUILDDIR}
+}
+
+python sstate_sign_package () {
+ from oe.gpg_sign import get_signer
+
+ if d.getVar('SSTATE_SIG_KEY', True):
+ signer = get_signer(d, 'local')
+ sstate_pkg = d.getVar('SSTATE_PKG', True)
+ if os.path.exists(sstate_pkg + '.sig'):
+ os.unlink(sstate_pkg + '.sig')
+ signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
+ d.getVar('SSTATE_SIG_PASSPHRASE', True), armor=False)
+}
+
+#
+# Shell function to decompress and prepare a package for installation
+# Will be run from within SSTATE_INSTDIR.
+#
+sstate_unpack_package () {
+ tar -xvzf ${SSTATE_PKG}
+ # Use "! -w ||" to return true for read only files
+ [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
+ [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
+ [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+}
+
+BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
+
+def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
+
+ ret = []
+ missed = []
+ extension = ".tgz"
+ if siginfo:
+ extension = extension + ".siginfo"
+
+ def getpathcomponents(task, d):
+ # Magic data from BB_HASHFILENAME
+ splithashfn = sq_hashfn[task].split(" ")
+ spec = splithashfn[1]
+ if splithashfn[0] == "True":
+ extrapath = d.getVar("NATIVELSBSTRING", True) + "/"
+ else:
+ extrapath = ""
+
+ tname = sq_task[task][3:]
+
+ if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
+ spec = splithashfn[2]
+ extrapath = ""
+
+ return spec, extrapath, tname
+
+
+ for task in range(len(sq_fn)):
+
+ spec, extrapath, tname = getpathcomponents(task, d)
+
+ sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+
+ if os.path.exists(sstatefile):
+ bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
+ ret.append(task)
+ continue
+ else:
+ missed.append(task)
+ bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
+
+ mirrors = d.getVar("SSTATE_MIRRORS", True)
+ if mirrors:
+ # Copy the data object and override DL_DIR and SRC_URI
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ dldir = localdata.expand("${SSTATE_DIR}")
+ localdata.delVar('MIRRORS')
+ localdata.setVar('FILESPATH', dldir)
+ localdata.setVar('DL_DIR', dldir)
+ localdata.setVar('PREMIRRORS', mirrors)
+
+ bb.debug(2, "SState using premirror of: %s" % mirrors)
+
+ # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
+ # we'll want to allow network access for the current set of fetches.
+ if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
+ localdata.delVar('BB_NO_NETWORK')
+
+ from bb.fetch2 import FetchConnectionCache
+ def checkstatus_init(thread_worker):
+ thread_worker.connection_cache = FetchConnectionCache()
+
+ def checkstatus_end(thread_worker):
+ thread_worker.connection_cache.close_connections()
+
+ def checkstatus(thread_worker, arg):
+ (task, sstatefile) = arg
+
+ localdata2 = bb.data.createCopy(localdata)
+ srcuri = "file://" + sstatefile
+ localdata.setVar('SRC_URI', srcuri)
+ bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
+
+ try:
+ fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
+ connection_cache=thread_worker.connection_cache)
+ fetcher.checkstatus()
+ bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
+ ret.append(task)
+ if task in missed:
+ missed.remove(task)
+ except:
+ missed.append(task)
+ bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
+ pass
+
+ tasklist = []
+ for task in range(len(sq_fn)):
+ if task in ret:
+ continue
+ spec, extrapath, tname = getpathcomponents(task, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+ tasklist.append((task, sstatefile))
+
+ if tasklist:
+ bb.note("Checking sstate mirror object availability (for %s objects)" % len(tasklist))
+ import multiprocessing
+ nproc = min(multiprocessing.cpu_count(), len(tasklist))
+
+ pool = oe.utils.ThreadedPool(nproc, len(tasklist),
+ worker_init=checkstatus_init, worker_end=checkstatus_end)
+ for t in tasklist:
+ pool.add_task(checkstatus, t)
+ pool.start()
+ pool.wait_completion()
+
+ inheritlist = d.getVar("INHERIT", True)
+ if "toaster" in inheritlist:
+ evdata = {'missed': [], 'found': []};
+ for task in missed:
+ spec, extrapath, tname = getpathcomponents(task, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
+ evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
+ for task in ret:
+ spec, extrapath, tname = getpathcomponents(task, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
+ evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
+ bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
+
+ if hasattr(bb.parse.siggen, "checkhashes"):
+ bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
+
+ return ret
+
+BB_SETSCENE_DEPVALID = "setscene_depvalid"
+
+def setscene_depvalid(task, taskdependees, notneeded, d):
+ # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
+ # task is included in taskdependees too
+
+ bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
+
+ def isNativeCross(x):
+ return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
+
+ def isPostInstDep(x):
+ if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native", "ca-certificates-native"]:
+ return True
+ return False
+
+ # We only need to trigger populate_lic through direct dependencies
+ if taskdependees[task][1] == "do_populate_lic":
+ return True
+
+ # We only need to trigger packagedata through direct dependencies
+ # but need to preserve packagedata on packagedata links
+ if taskdependees[task][1] == "do_packagedata":
+ for dep in taskdependees:
+ if taskdependees[dep][1] == "do_packagedata":
+ return False
+ return True
+
+ for dep in taskdependees:
+ bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
+ if task == dep:
+ continue
+ if dep in notneeded:
+ continue
+ # do_package_write_* and do_package doesn't need do_package
+ if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
+ continue
+ # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
+ if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
+ return False
+ continue
+ # Native/Cross packages don't exist and are noexec anyway
+ if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
+ continue
+
+ # This is due to the [depends] in useradd.bbclass complicating matters
+ # The logic *is* reversed here due to the way hard setscene dependencies are injected
+ if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
+ continue
+
+ # Consider sysroot depending on sysroot tasks
+ if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
+ # base-passwd/shadow-sysroot don't need their dependencies
+ if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
+ continue
+ # Nothing need depend on libc-initial/gcc-cross-initial
+ if "-initial" in taskdependees[task][0]:
+ continue
+ # Native/Cross populate_sysroot need their dependencies
+ if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
+ return False
+ # Target populate_sysroot depended on by cross tools need to be installed
+ if isNativeCross(taskdependees[dep][0]):
+ return False
+ # Native/cross tools depended upon by target sysroot are not needed
+ if isNativeCross(taskdependees[task][0]):
+ continue
+ # Target populate_sysroot need their dependencies
+ return False
+
+ if taskdependees[task][1] == 'do_shared_workdir':
+ continue
+
+ if taskdependees[dep][1] == "do_populate_lic":
+ continue
+
+
+ # Safe fallthrough default
+ bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
+ return False
+ return True
+
+addhandler sstate_eventhandler
+sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
+python sstate_eventhandler() {
+ d = e.data
+ # When we write an sstate package we rewrite the SSTATE_PKG
+ spkg = d.getVar('SSTATE_PKG', True)
+ if not spkg.endswith(".tgz"):
+ taskname = d.getVar("BB_RUNTASK", True)[3:]
+ spec = d.getVar('SSTATE_PKGSPEC', True)
+ swspec = d.getVar('SSTATE_SWSPEC', True)
+ if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
+ d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
+ d.setVar("SSTATE_EXTRAPATH", "")
+ sstatepkg = d.getVar('SSTATE_PKG', True)
+ bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
+}
+
+SSTATE_PRUNE_OBSOLETEWORKDIR = "1"
+
+# Event handler which removes manifests and stamps file for
+# recipes which are no longer reachable in a build where they
+# once were.
+# Also optionally removes the workdir of those tasks/recipes
+#
+addhandler sstate_eventhandler2
+sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
+python sstate_eventhandler2() {
+ import glob
+ d = e.data
+ stamps = e.stamps.values()
+ removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
+ seen = []
+ for a in d.getVar("SSTATE_ARCHS", True).split():
+ toremove = []
+ i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
+ if not os.path.exists(i):
+ continue
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in lines:
+ (stamp, manifest, workdir) = l.split()
+ if stamp not in stamps:
+ toremove.append(l)
+ if stamp not in seen:
+ bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
+ seen.append(stamp)
+
+ if toremove:
+ bb.note("There are %d recipes to be removed from sysroot %s, removing..." % (len(toremove), a))
+
+ for r in toremove:
+ (stamp, manifest, workdir) = r.split()
+ for m in glob.glob(manifest + ".*"):
+ sstate_clean_manifest(m, d)
+ bb.utils.remove(stamp + "*")
+ if removeworkdir:
+ bb.utils.remove(workdir, recurse = True)
+ lines.remove(r)
+ with open(i, "w") as f:
+ for l in lines:
+ f.write(l)
+}
diff --git a/import-layers/yocto-poky/meta/classes/staging.bbclass b/import-layers/yocto-poky/meta/classes/staging.bbclass
new file mode 100644
index 000000000..bc5dfa81a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/staging.bbclass
@@ -0,0 +1,221 @@
+
+sysroot_stage_dir() {
+ src="$1"
+ dest="$2"
+ # if the src doesn't exist don't do anything
+ if [ ! -d "$src" ]; then
+ return
+ fi
+
+ mkdir -p "$dest"
+ (
+ cd $src
+ find . -print0 | cpio --null -pdlu $dest
+ )
+}
+
+sysroot_stage_libdir() {
+ src="$1"
+ dest="$2"
+
+ sysroot_stage_dir $src $dest
+}
+
+sysroot_stage_dirs() {
+ from="$1"
+ to="$2"
+
+ sysroot_stage_dir $from${includedir} $to${includedir}
+ if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
+ sysroot_stage_dir $from${bindir} $to${bindir}
+ sysroot_stage_dir $from${sbindir} $to${sbindir}
+ sysroot_stage_dir $from${base_bindir} $to${base_bindir}
+ sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
+ sysroot_stage_dir $from${libexecdir} $to${libexecdir}
+ sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
+ sysroot_stage_dir $from${localstatedir} $to${localstatedir}
+ fi
+ if [ -d $from${libdir} ]
+ then
+ sysroot_stage_libdir $from${libdir} $to${libdir}
+ fi
+ if [ -d $from${base_libdir} ]
+ then
+ sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
+ fi
+ if [ -d $from${nonarch_base_libdir} ]
+ then
+ sysroot_stage_libdir $from${nonarch_base_libdir} $to${nonarch_base_libdir}
+ fi
+ sysroot_stage_dir $from${datadir} $to${datadir}
+ # We don't care about docs/info/manpages/locales
+ rm -rf $to${mandir}/ $to${docdir}/ $to${infodir}/ ${to}${datadir}/locale/
+ rm -rf $to${datadir}/applications/ $to${datadir}/fonts/ $to${datadir}/pixmaps/
+}
+
+sysroot_stage_all() {
+ sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
+}
+
+python sysroot_strip () {
+ import stat, errno
+
+ dvar = d.getVar('SYSROOT_DESTDIR', True)
+ pn = d.getVar('PN', True)
+
+ os.chdir(dvar)
+
+ # Return type (bits):
+ # 0 - not elf
+ # 1 - ELF
+ # 2 - stripped
+ # 4 - executable
+ # 8 - shared library
+ # 16 - kernel module
+ def isELF(path):
+ type = 0
+ ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
+
+ if ret:
+ bb.error("split_and_strip_files: 'file %s' failed" % path)
+ return type
+
+ # Not stripped
+ if "ELF" in result:
+ type |= 1
+ if "not stripped" not in result:
+ type |= 2
+ if "executable" in result:
+ type |= 4
+ if "shared" in result:
+ type |= 8
+ return type
+
+
+ elffiles = {}
+ inodes = {}
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
+ if (d.getVar('INHIBIT_SYSROOT_STRIP', True) != '1'):
+ #
+ # First lets figure out all of the files we may have to process
+ #
+ for root, dirs, files in os.walk(dvar):
+ for f in files:
+ file = os.path.join(root, f)
+
+ try:
+ ltarget = oe.path.realpath(file, dvar, False)
+ s = os.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an excutable
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if os.path.islink(file):
+ continue
+
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ elf_file = isELF(file)
+ if elf_file & 1:
+ if elf_file & 2:
+ if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+ else:
+ bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn))
+ continue
+
+ if s.st_ino in inodes:
+ os.unlink(file)
+ os.link(inodes[s.st_ino], file)
+ else:
+ inodes[s.st_ino] = file
+ # break hardlink
+ bb.utils.copyfile(file, file)
+ elffiles[file] = elf_file
+
+ #
+ # Now strip them (in parallel)
+ #
+ strip = d.getVar("STRIP", True)
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ #bb.note("Strip %s" % file)
+ sfiles.append((file, elf_file, strip))
+
+ oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
+}
+
+do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
+do_populate_sysroot[umask] = "022"
+
+addtask populate_sysroot after do_install
+
+SYSROOT_PREPROCESS_FUNCS ?= ""
+SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
+SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
+
+# We clean out any existing sstate from the sysroot if we rerun configure
+python sysroot_cleansstate () {
+ ss = sstate_state_fromvars(d, "populate_sysroot")
+ sstate_clean(ss, d)
+}
+do_configure[prefuncs] += "sysroot_cleansstate"
+
+
+BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
+
+def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
+ problems = set()
+ configurefnids = set()
+ if not invalidtasks:
+ invalidtasks = xrange(len(tasknames))
+ for task in invalidtasks:
+ if tasknames[task] == "do_configure" and task not in covered:
+ configurefnids.add(fnids[task])
+ for task in covered:
+ if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
+ problems.add(task)
+ return problems
+
+python do_populate_sysroot () {
+ bb.build.exec_func("sysroot_stage_all", d)
+ bb.build.exec_func("sysroot_strip", d)
+ for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+ pn = d.getVar("PN", True)
+ multiprov = d.getVar("MULTI_PROVIDER_WHITELIST", True).split()
+ provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
+ bb.utils.mkdirhier(provdir)
+ for p in d.getVar("PROVIDES", True).split():
+ if p in multiprov:
+ continue
+ p = p.replace("/", "_")
+ with open(provdir + p, "w") as f:
+ f.write(pn)
+}
+
+do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
+do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
+
+SSTATETASKS += "do_populate_sysroot"
+do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
+do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
+do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
+
+python do_populate_sysroot_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_sysroot_setscene
+
+
diff --git a/import-layers/yocto-poky/meta/classes/syslinux.bbclass b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
new file mode 100644
index 000000000..4fcb0c5e7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
@@ -0,0 +1,192 @@
+# syslinux.bbclass
+# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
+# Released under the MIT license (see packages/COPYING)
+
+# Provide syslinux specific functions for building bootable images.
+
+# External variables
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
+# ${LABELS} - a list of targets for the automatic config
+# ${APPEND} - an override list of append strings for each label
+# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
+# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
+# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
+# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
+# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
+# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
+
+do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
+ syslinux-native:do_populate_sysroot"
+
+ISOLINUXDIR ?= "/isolinux"
+SYSLINUXDIR = "/"
+# The kernel has an internal default console, which you can override with
+# a console=...some_tty...
+SYSLINUX_DEFAULT_CONSOLE ?= ""
+SYSLINUX_SERIAL ?= "0 115200"
+SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
+SYSLINUX_PROMPT ?= "0"
+SYSLINUX_TIMEOUT ?= "50"
+AUTO_SYSLINUXMENU ?= "1"
+SYSLINUX_ROOT ?= "${ROOT}"
+SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg"
+SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
+APPEND ?= ""
+
+# Need UUID utility code.
+inherit fs-uuid
+
+syslinux_populate() {
+ DEST=$1
+ BOOTDIR=$2
+ CFGNAME=$3
+
+ install -d ${DEST}${BOOTDIR}
+
+ # Install the config files
+ install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME}
+ if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
+ install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
+ install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
+ install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
+ if [ "${SYSLINUX_SPLASH}" != "" ] ; then
+ install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
+ fi
+ fi
+}
+
+syslinux_iso_populate() {
+ iso_dir=$1
+ syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
+ install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
+ install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
+}
+
+syslinux_hddimg_populate() {
+ hdd_dir=$1
+ syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
+ install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
+}
+
+syslinux_hddimg_install() {
+ syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+}
+
+syslinux_hdddirect_install() {
+ DEST=$1
+ syslinux $DEST
+}
+
+python build_syslinux_cfg () {
+ import copy
+ import sys
+
+ workdir = d.getVar('WORKDIR', True)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ labels = d.getVar('LABELS', True)
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('SYSLINUX_CFG', True)
+ if not cfile:
+ raise bb.build.FuncFailed('Unable to read SYSLINUX_CFG')
+
+ try:
+ cfgfile = file(cfile, 'w')
+ except OSError:
+ raise bb.build.funcFailed('Unable to open %s' % (cfile))
+
+ cfgfile.write('# Automatically created by OE\n')
+
+ opts = d.getVar('SYSLINUX_OPTS', True)
+
+ if opts:
+ for opt in opts.split(';'):
+ cfgfile.write('%s\n' % opt)
+
+ cfgfile.write('ALLOWOPTIONS 1\n');
+ syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
+ syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
+ syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
+ if syslinux_serial:
+ cfgfile.write('SERIAL %s\n' % syslinux_serial)
+
+ menu = (d.getVar('AUTO_SYSLINUXMENU', True) == "1")
+
+ if menu and syslinux_serial:
+ cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
+ else:
+ cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
+
+ timeout = d.getVar('SYSLINUX_TIMEOUT', True)
+
+ if timeout:
+ cfgfile.write('TIMEOUT %s\n' % timeout)
+ else:
+ cfgfile.write('TIMEOUT 50\n')
+
+ prompt = d.getVar('SYSLINUX_PROMPT', True)
+ if prompt:
+ cfgfile.write('PROMPT %s\n' % prompt)
+ else:
+ cfgfile.write('PROMPT 1\n')
+
+ if menu:
+ cfgfile.write('ui vesamenu.c32\n')
+ cfgfile.write('menu title Select kernel options and boot kernel\n')
+ cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
+ splash = d.getVar('SYSLINUX_SPLASH', True)
+ if splash:
+ cfgfile.write('menu background splash.lss\n')
+
+ for label in labels.split():
+ localdata = bb.data.createCopy(d)
+
+ overrides = localdata.getVar('OVERRIDES', True)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
+ bb.data.update_data(localdata)
+
+ btypes = [ [ "", syslinux_default_console ] ]
+ if menu and syslinux_serial:
+ btypes = [ [ "Graphics console ", syslinux_default_console ],
+ [ "Serial console ", syslinux_serial_tty ] ]
+
+ root= d.getVar('SYSLINUX_ROOT', True)
+ if not root:
+ raise bb.build.FuncFailed('SYSLINUX_ROOT not defined')
+
+ for btype in btypes:
+ cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
+
+ exargs = d.getVar('SYSLINUX_KERNEL_ARGS', True)
+ if exargs:
+ btype[1] += " " + exargs
+
+ append = localdata.getVar('APPEND', True)
+ initrd = localdata.getVar('INITRD', True)
+
+ append = root + " " + append
+ cfgfile.write('APPEND ')
+
+ if initrd:
+ cfgfile.write('initrd=/initrd ')
+
+ cfgfile.write('LABEL=%s '% (label))
+ append = replace_rootfs_uuid(d, append)
+ cfgfile.write('%s %s\n' % (append, btype[1]))
+
+ cfgfile.close()
+}
diff --git a/import-layers/yocto-poky/meta/classes/systemd.bbclass b/import-layers/yocto-poky/meta/classes/systemd.bbclass
new file mode 100644
index 000000000..db7873fbe
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/systemd.bbclass
@@ -0,0 +1,207 @@
+# The list of packages that should have systemd packaging scripts added. For
+# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
+# files in this package. If this variable isn't set, [package].service is used.
+SYSTEMD_PACKAGES ?= "${PN}"
+SYSTEMD_PACKAGES_class-native ?= ""
+SYSTEMD_PACKAGES_class-nativesdk ?= ""
+
+# Whether to enable or disable the services on installation.
+SYSTEMD_AUTO_ENABLE ??= "enable"
+
+# This class will be included in any recipe that supports systemd init scripts,
+# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
+# directly but check the DISTRO_FEATURES first.
+python __anonymous() {
+ # If the distro features have systemd but not sysvinit, inhibit update-rcd
+ # from doing any work so that pure-systemd images don't have redundant init
+ # files.
+ if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
+ d.appendVar("DEPENDS", " systemd-systemctl-native")
+ if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
+ d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
+}
+
+systemd_postinst() {
+OPTS=""
+
+if [ -n "$D" ]; then
+ OPTS="--root=$D"
+fi
+
+if type systemctl >/dev/null 2>/dev/null; then
+ systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
+
+ if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ systemctl restart ${SYSTEMD_SERVICE}
+ fi
+fi
+}
+
+systemd_prerm() {
+OPTS=""
+
+if [ -n "$D" ]; then
+ OPTS="--root=$D"
+fi
+
+if type systemctl >/dev/null 2>/dev/null; then
+ if [ -z "$D" ]; then
+ systemctl stop ${SYSTEMD_SERVICE}
+ fi
+
+ systemctl $OPTS disable ${SYSTEMD_SERVICE}
+fi
+}
+
+
+systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
+systemd_populate_packages[vardepsexclude] += "OVERRIDES"
+
+
+python systemd_populate_packages() {
+ import re
+
+ if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
+ return
+
+ def get_package_var(d, var, pkg):
+ val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip()
+ if val == "":
+ val = (d.getVar(var, True) or "").strip()
+ return val
+
+ # Check if systemd-packages already included in PACKAGES
+ def systemd_check_package(pkg_systemd):
+ packages = d.getVar('PACKAGES', True)
+ if not pkg_systemd in packages.split():
+ bb.error('%s does not appear in package list, please add it' % pkg_systemd)
+
+
+ def systemd_generate_package_scripts(pkg):
+ bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
+
+ # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
+ # variable.
+ localdata = d.createCopy()
+ localdata.prependVar("OVERRIDES", pkg + ":")
+ bb.data.update_data(localdata)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += localdata.getVar('systemd_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ if not prerm:
+ prerm = '#!/bin/sh\n'
+ prerm += localdata.getVar('systemd_prerm', True)
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
+
+
+ # Add files to FILES_*-systemd if existent and not already done
+ def systemd_append_file(pkg_systemd, file_append):
+ appended = False
+ if os.path.exists(oe.path.join(d.getVar("D", True), file_append)):
+ var_name = "FILES_" + pkg_systemd
+ files = d.getVar(var_name, False) or ""
+ if file_append not in files.split():
+ d.appendVar(var_name, " " + file_append)
+ appended = True
+ return appended
+
+ # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
+ def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
+ # avoid infinite recursion
+ if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
+ fullpath = oe.path.join(d.getVar("D", True), path, service)
+ if service.find('.service') != -1:
+ # for *.service add *@.service
+ service_base = service.replace('.service', '')
+ systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
+ if service.find('.socket') != -1:
+ # for *.socket add *.service and *@.service
+ service_base = service.replace('.socket', '')
+ systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
+ systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
+ for key in keys.split():
+ # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
+ cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key)
+ pipe = os.popen(cmd, 'r')
+ line = pipe.readline()
+ while line:
+ line = line.replace('\n', '')
+ systemd_add_files_and_parse(pkg_systemd, path, line, keys)
+ line = pipe.readline()
+ pipe.close()
+
+ # Check service-files and call systemd_add_files_and_parse for each entry
+ def systemd_check_services():
+ searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),]
+ searchpaths.append(d.getVar("systemd_system_unitdir", True))
+ systemd_packages = d.getVar('SYSTEMD_PACKAGES', True)
+
+ keys = 'Also'
+ # scan for all in SYSTEMD_SERVICE[]
+ for pkg_systemd in systemd_packages.split():
+ for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
+ path_found = ''
+
+ # Deal with adding, for example, 'ifplugd@eth0.service' from
+ # 'ifplugd@.service'
+ base = None
+ if service.find('@') != -1:
+ base = re.sub('@[^.]+.', '@.', service)
+
+ for path in searchpaths:
+ if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
+ path_found = path
+ break
+ elif base is not None:
+ if os.path.exists(oe.path.join(d.getVar("D", True), path, base)):
+ path_found = path
+ break
+
+ if path_found != '':
+ systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
+ else:
+ raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \
+ (pkg_systemd, service))
+
+ # Run all modifications once when creating package
+ if os.path.exists(d.getVar("D", True)):
+ for pkg in d.getVar('SYSTEMD_PACKAGES', True).split():
+ systemd_check_package(pkg)
+ if d.getVar('SYSTEMD_SERVICE_' + pkg, True):
+ systemd_generate_package_scripts(pkg)
+ systemd_check_services()
+}
+
+PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
+
+python rm_systemd_unitdir (){
+ import shutil
+ if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
+ systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True))
+ if os.path.exists(systemd_unitdir):
+ shutil.rmtree(systemd_unitdir)
+ systemd_libdir = os.path.dirname(systemd_unitdir)
+ if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
+ os.rmdir(systemd_libdir)
+}
+do_install[postfuncs] += "rm_systemd_unitdir "
+
+python rm_sysvinit_initddir (){
+ import shutil
+ sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d"))
+
+ if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
+ not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
+ os.path.exists(sysv_initddir):
+ systemd_system_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_system_unitdir', True))
+
+ # If systemd_system_unitdir contains anything, delete sysv_initddir
+ if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
+ shutil.rmtree(sysv_initddir)
+}
+do_install[postfuncs] += "rm_sysvinit_initddir "
diff --git a/import-layers/yocto-poky/meta/classes/terminal.bbclass b/import-layers/yocto-poky/meta/classes/terminal.bbclass
new file mode 100644
index 000000000..9f4c24e90
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/terminal.bbclass
@@ -0,0 +1,96 @@
+OE_TERMINAL ?= 'auto'
+OE_TERMINAL[type] = 'choice'
+OE_TERMINAL[choices] = 'auto none \
+ ${@oe_terminal_prioritized()}'
+
+OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
+OE_TERMINAL_EXPORTS[type] = 'list'
+
+XAUTHORITY ?= "${HOME}/.Xauthority"
+SHELL ?= "bash"
+
+def oe_terminal_prioritized():
+ import oe.terminal
+ return " ".join(o.name for o in oe.terminal.prioritized())
+
+def emit_terminal_func(command, envdata, d):
+ cmd_func = 'do_terminal'
+
+ envdata.setVar(cmd_func, 'exec ' + command)
+ envdata.setVarFlag(cmd_func, 'func', '1')
+
+ runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
+ runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
+ runfile = os.path.join(d.getVar('T', True), runfile)
+ bb.utils.mkdirhier(os.path.dirname(runfile))
+
+ with open(runfile, 'w') as script:
+ script.write('#!/bin/sh -e\n')
+ bb.data.emit_func(cmd_func, script, envdata)
+ script.write(cmd_func)
+ script.write("\n")
+ os.chmod(runfile, 0755)
+
+ return runfile
+
+def oe_terminal(command, title, d):
+ import oe.data
+ import oe.terminal
+
+ envdata = bb.data.init()
+
+ for v in os.environ:
+ envdata.setVar(v, os.environ[v])
+ envdata.setVarFlag(v, 'export', '1')
+
+ for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
+ value = d.getVar(export, True)
+ if value is not None:
+ os.environ[export] = str(value)
+ envdata.setVar(export, str(value))
+ envdata.setVarFlag(export, 'export', '1')
+ if export == "PSEUDO_DISABLED":
+ if "PSEUDO_UNLOAD" in os.environ:
+ del os.environ["PSEUDO_UNLOAD"]
+ envdata.delVar("PSEUDO_UNLOAD")
+
+ # Add in all variables from the user's original environment which
+ # haven't subsequntly been set/changed
+ origbbenv = d.getVar("BB_ORIGENV", False) or {}
+ for key in origbbenv:
+ if key in envdata:
+ continue
+ value = origbbenv.getVar(key, True)
+ if value is not None:
+ os.environ[key] = str(value)
+ envdata.setVar(key, str(value))
+ envdata.setVarFlag(key, 'export', '1')
+
+ # A complex PS1 might need more escaping of chars.
+ # Lets not export PS1 instead.
+ envdata.delVar("PS1")
+
+ # Replace command with an executable wrapper script
+ command = emit_terminal_func(command, envdata, d)
+
+ terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
+ if terminal == 'none':
+ bb.fatal('Devshell usage disabled with OE_TERMINAL')
+ elif terminal != 'auto':
+ try:
+ oe.terminal.spawn(terminal, command, title, None, d)
+ return
+ except oe.terminal.UnsupportedTerminal:
+ bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
+ terminal)
+ except oe.terminal.ExecutionError as exc:
+ bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
+
+ try:
+ oe.terminal.spawn_preferred(command, title, None, d)
+ except oe.terminal.NoSupportedTerminals:
+ bb.fatal('No valid terminal found, unable to open devshell')
+ except oe.terminal.ExecutionError as exc:
+ bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
+
+oe_terminal[vardepsexclude] = "BB_ORIGENV"
diff --git a/import-layers/yocto-poky/meta/classes/testimage-auto.bbclass b/import-layers/yocto-poky/meta/classes/testimage-auto.bbclass
new file mode 100644
index 000000000..e0a22b773
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/testimage-auto.bbclass
@@ -0,0 +1,23 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+
+# Run tests automatically on an image after the image is constructed
+# (as opposed to testimage.bbclass alone where tests must be called
+# manually using bitbake -c testimage <image>).
+#
+# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to
+# inherit it since that will be done in image.bbclass when this variable
+# has been set.
+#
+# See testimage.bbclass for the test implementation.
+
+inherit testimage
+
+python do_testimage_auto() {
+ testimage_main(d)
+}
+addtask testimage_auto before do_build after do_image_complete
+do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}"
+do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}"
diff --git a/import-layers/yocto-poky/meta/classes/testimage.bbclass b/import-layers/yocto-poky/meta/classes/testimage.bbclass
new file mode 100644
index 000000000..e77bb1192
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/testimage.bbclass
@@ -0,0 +1,263 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+
+# testimage.bbclass enables testing of qemu images using python unittests.
+# Most of the tests are commands run on target image over ssh.
+# To use it add testimage to global inherit and call your target image with -c testimage
+# You can try it out like this:
+# - first build a qemu core-image-sato
+# - add INHERIT += "testimage" in local.conf
+# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
+
+# You can set (or append to) TEST_SUITES in local.conf to select the tests
+# which you want to run for your target.
+# The test names are the module names in meta/lib/oeqa/runtime.
+# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
+# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
+# Note that order in TEST_SUITES is relevant: tests are run in an order such that
+# tests mentioned in @skipUnlessPassed run before the tests that depend on them,
+# but without such dependencies, tests run in the order in which they are listed
+# in TEST_SUITES.
+#
+# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
+
+# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
+# Booting is handled by this class, and it's not a test in itself.
+# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
+
+TEST_LOG_DIR ?= "${WORKDIR}/testimage"
+
+TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
+TEST_EXPORT_ONLY ?= "0"
+
+RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'smart rpm', '', d)}"
+MINTESTSUITE = "ping"
+NETTESTSUITE = "${MINTESTSUITE} ssh df date scp syslog"
+DEVTESTSUITE = "gcc kernelmodule ldd"
+
+DEFAULT_TEST_SUITES = "${MINTESTSUITE} auto"
+DEFAULT_TEST_SUITES_pn-core-image-minimal = "${MINTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-minimal-dev = "${MINTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-full-cmdline = "${NETTESTSUITE} perl python logrotate"
+DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
+ ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)}"
+DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} connman xorg perl python \
+ ${DEVTESTSUITE} parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcvs buildiptables buildsudoku \
+ connman ${DEVTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
+
+# aarch64 has no graphics
+DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
+
+#qemumips is too slow for buildsudoku
+DEFAULT_TEST_SUITES_remove_qemumips = "buildsudoku"
+
+TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
+
+TEST_QEMUBOOT_TIMEOUT ?= "1000"
+TEST_TARGET ?= "qemu"
+TEST_TARGET_IP ?= ""
+TEST_SERVER_IP ?= ""
+
+TESTIMAGEDEPENDS = ""
+TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot"
+
+TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
+TESTIMAGELOCK_qemuall = ""
+
+TESTIMAGE_DUMP_DIR ?= "/tmp/oe-saved-tests/"
+
+testimage_dump_target () {
+ top -bn1
+ ps
+ free
+ df
+ # The next command will export the default gateway IP
+ export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
+ ping -c3 $DEFAULT_GATEWAY
+ dmesg
+ netstat -an
+ ip address
+ # Next command will dump logs from /var/log/
+ find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
+}
+
+testimage_dump_host () {
+ top -bn1
+ iostat -x -z -N -d -p ALL 20 2
+ ps -ef
+ free
+ df
+ memstat
+ dmesg
+ ip -s link
+ netstat -an
+}
+
+python do_testimage() {
+ testimage_main(d)
+}
+addtask testimage
+do_testimage[nostamp] = "1"
+do_testimage[depends] += "${TESTIMAGEDEPENDS}"
+do_testimage[lockfiles] += "${TESTIMAGELOCK}"
+
+def exportTests(d,tc):
+ import json
+ import shutil
+ import pkgutil
+ import re
+
+ exportpath = d.getVar("TEST_EXPORT_DIR", True)
+
+ savedata = {}
+ savedata["d"] = {}
+ savedata["target"] = {}
+ savedata["host_dumper"] = {}
+ for key in tc.__dict__:
+ # special cases
+ if key not in ['d', 'target', 'host_dumper', 'suite']:
+ savedata[key] = getattr(tc, key)
+ savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
+ savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
+
+ keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
+ and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func", True)]
+ for key in keys:
+ try:
+ savedata["d"][key] = d.getVar(key, True)
+ except bb.data_smart.ExpansionError:
+ # we don't care about those anyway
+ pass
+
+ savedata["host_dumper"]["parent_dir"] = tc.host_dumper.parent_dir
+ savedata["host_dumper"]["cmds"] = tc.host_dumper.cmds
+
+ json_file = os.path.join(exportpath, "testdata.json")
+ with open(json_file, "w") as f:
+ json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
+
+ # Replace absolute path with relative in the file
+ exclude_path = os.path.join(d.getVar("COREBASE", True),'meta','lib','oeqa')
+ f1 = open(json_file,'r').read()
+ f2 = open(json_file,'w')
+ m = f1.replace(exclude_path,'oeqa')
+ f2.write(m)
+ f2.close()
+
+ # now start copying files
+ # we'll basically copy everything under meta/lib/oeqa, with these exceptions
+ # - oeqa/targetcontrol.py - not needed
+ # - oeqa/selftest - something else
+ # That means:
+ # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
+ # - the contents of oeqa/utils and oeqa/runtime/files
+ # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
+ # - __init__.py files
+ bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
+ bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
+ # copy test modules, this should cover tests in other layers too
+ bbpath = d.getVar("BBPATH", True).split(':')
+ for t in tc.testslist:
+ isfolder = False
+ if re.search("\w+\.\w+\.test_\S+", t):
+ t = '.'.join(t.split('.')[:3])
+ mod = pkgutil.get_loader(t)
+ # More depth than usual?
+ if (t.count('.') > 2):
+ for p in bbpath:
+ foldername = os.path.join(p, 'lib', os.sep.join(t.split('.')).rsplit(os.sep, 1)[0])
+ if os.path.isdir(foldername):
+ isfolder = True
+ target_folder = os.path.join(exportpath, "oeqa", "runtime", os.path.basename(foldername))
+ if not os.path.exists(target_folder):
+ shutil.copytree(foldername, target_folder)
+ if not isfolder:
+ shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
+ # copy __init__.py files
+ oeqadir = pkgutil.get_loader("oeqa").filename
+ shutil.copy2(os.path.join(oeqadir, "__init__.py"), os.path.join(exportpath, "oeqa"))
+ shutil.copy2(os.path.join(oeqadir, "runtime/__init__.py"), os.path.join(exportpath, "oeqa/runtime"))
+ # copy oeqa/oetest.py and oeqa/runexported.py
+ shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
+ shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
+ # copy oeqa/utils/*.py
+ for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
+ for f in files:
+ if f.endswith(".py"):
+ shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
+ # copy oeqa/runtime/files/*
+ for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
+ for f in files:
+ shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
+
+ bb.plain("Exported tests to: %s" % exportpath)
+
+def testimage_main(d):
+ import unittest
+ import os
+ import oeqa.runtime
+ import time
+ import signal
+ from oeqa.oetest import ImageTestContext
+ from oeqa.targetcontrol import get_target_controller
+ from oeqa.utils.dump import get_host_dumper
+
+ pn = d.getVar("PN", True)
+ export = oe.utils.conditional("TEST_EXPORT_ONLY", "1", True, False, d)
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+ if export:
+ bb.utils.remove(d.getVar("TEST_EXPORT_DIR", True), recurse=True)
+ bb.utils.mkdirhier(d.getVar("TEST_EXPORT_DIR", True))
+
+ # we need the host dumper in test context
+ host_dumper = get_host_dumper(d)
+
+ # the robot dance
+ target = get_target_controller(d)
+
+ # test context
+ tc = ImageTestContext(d, target, host_dumper)
+
+ # this is a dummy load of tests
+ # we are doing that to find compile errors in the tests themselves
+ # before booting the image
+ try:
+ tc.loadTests()
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ if export:
+ signal.signal(signal.SIGTERM, tc.origsigtermhandler)
+ tc.origsigtermhandler = None
+ exportTests(d,tc)
+ else:
+ target.deploy()
+ try:
+ target.start()
+ starttime = time.time()
+ result = tc.runTests()
+ stoptime = time.time()
+ if result.wasSuccessful():
+ bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
+ msg = "%s - OK - All required tests passed" % pn
+ skipped = len(result.skipped)
+ if skipped:
+ msg += " (skipped=%d)" % skipped
+ bb.plain(msg)
+ else:
+ raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
+ finally:
+ signal.signal(signal.SIGTERM, tc.origsigtermhandler)
+ target.stop()
+
+testimage_main[vardepsexclude] =+ "BB_ORIGENV"
+
+inherit testsdk
diff --git a/import-layers/yocto-poky/meta/classes/testsdk.bbclass b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
new file mode 100644
index 000000000..f4dc2c36d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
@@ -0,0 +1,142 @@
+# Copyright (C) 2013 - 2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# testsdk.bbclass enables testing for SDK and Extensible SDK
+#
+# For run SDK tests you need to do,
+# - bitbake core-image-sato -c populate_sdk
+# - bitbake core-image-sato -c testsdk
+#
+# For run eSDK tests you need to do,
+# - bitbake core-image-sato -c populate_sdk_ext
+# - bitbake core-image-sato -c testsdkext
+
+TEST_LOG_DIR ?= "${WORKDIR}/testimage"
+TESTSDKLOCK = "${TMPDIR}/testsdk.lock"
+
+def run_test_context(CTestContext, d, testdir, tcname, pn, *args):
+ import glob
+ import time
+
+ targets = glob.glob(d.expand(testdir + "/tc/environment-setup-*"))
+ for sdkenv in targets:
+ bb.plain("Testing %s" % sdkenv)
+ tc = CTestContext(d, testdir, sdkenv, tcname, args)
+
+ # this is a dummy load of tests
+ # we are doing that to find compile errors in the tests themselves
+ # before booting the image
+ try:
+ tc.loadTests()
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ starttime = time.time()
+ result = tc.runTests()
+ stoptime = time.time()
+ if result.wasSuccessful():
+ bb.plain("%s SDK(%s):%s - Ran %d test%s in %.3fs" % (pn, os.path.basename(tcname), os.path.basename(sdkenv),result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
+ msg = "%s - OK - All required tests passed" % pn
+ skipped = len(result.skipped)
+ if skipped:
+ msg += " (skipped=%d)" % skipped
+ bb.plain(msg)
+ else:
+ raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
+
+def testsdk_main(d):
+ import os
+ import oeqa.sdk
+ import subprocess
+ from oeqa.oetest import SDKTestContext
+
+ pn = d.getVar("PN", True)
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+
+ tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
+ if not os.path.exists(tcname):
+ bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' .")
+
+ sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
+ bb.utils.remove(sdktestdir, True)
+ bb.utils.mkdirhier(sdktestdir)
+ try:
+ subprocess.check_output("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Couldn't install the SDK:\n%s" % e.output)
+
+ try:
+ run_test_context(SDKTestContext, d, sdktestdir, tcname, pn)
+ finally:
+ bb.utils.remove(sdktestdir, True)
+
+testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
+
+python do_testsdk() {
+ testsdk_main(d)
+}
+addtask testsdk
+do_testsdk[nostamp] = "1"
+do_testsdk[lockfiles] += "${TESTSDKLOCK}"
+
+TEST_LOG_SDKEXT_DIR ?= "${WORKDIR}/testsdkext"
+TESTSDKEXTLOCK = "${TMPDIR}/testsdkext.lock"
+
+def testsdkext_main(d):
+ import os
+ import oeqa.sdkext
+ import subprocess
+ from bb.utils import export_proxies
+ from oeqa.oetest import SDKTestContext, SDKExtTestContext
+ from oeqa.utils import avoid_paths_in_environ
+
+
+ # extensible sdk use network
+ export_proxies(d)
+
+ # extensible sdk can be contaminated if native programs are
+ # in PATH, i.e. use perl-native instead of eSDK one.
+ paths_to_avoid = [d.getVar('STAGING_DIR', True),
+ d.getVar('BASE_WORKDIR', True)]
+ os.environ['PATH'] = avoid_paths_in_environ(paths_to_avoid)
+
+ pn = d.getVar("PN", True)
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_SDKEXT_DIR", True))
+
+ tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.sh")
+ if not os.path.exists(tcname):
+ bb.fatal("The toolchain ext is not built. Build it before running the" \
+ " tests: 'bitbake <image> -c populate_sdk_ext' .")
+
+ testdir = d.expand("${WORKDIR}/testsdkext/")
+ bb.utils.remove(testdir, True)
+ bb.utils.mkdirhier(testdir)
+ try:
+ subprocess.check_output("%s -y -d %s/tc" % (tcname, testdir), shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Couldn't install the SDK EXT:\n%s" % e.output)
+
+ try:
+ bb.plain("Running SDK Compatibility tests ...")
+ run_test_context(SDKExtTestContext, d, testdir, tcname, pn, True)
+ finally:
+ pass
+
+ try:
+ bb.plain("Running Extensible SDK tests ...")
+ run_test_context(SDKExtTestContext, d, testdir, tcname, pn)
+ finally:
+ pass
+
+ bb.utils.remove(testdir, True)
+
+testsdkext_main[vardepsexclude] =+ "BB_ORIGENV"
+
+python do_testsdkext() {
+ testsdkext_main(d)
+}
+addtask testsdkext
+do_testsdkext[nostamp] = "1"
+do_testsdkext[lockfiles] += "${TESTSDKEXTLOCK}"
diff --git a/import-layers/yocto-poky/meta/classes/texinfo.bbclass b/import-layers/yocto-poky/meta/classes/texinfo.bbclass
new file mode 100644
index 000000000..92efbccdd
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/texinfo.bbclass
@@ -0,0 +1,15 @@
+# This class is inherited by recipes whose upstream packages invoke the
+# texinfo utilities at build-time. Native and cross recipes are made to use the
+# dummy scripts provided by texinfo-dummy-native, for improved performance.
+# Target architecture recipes use the genuine Texinfo utilities. By default,
+# they use the Texinfo utilities on the host system. If you want to use the
+# Texinfo recipe shipped with yoco, you can remove texinfo-native from
+# ASSUME_PROVIDED and makeinfo from SANITY_REQUIRED_UTILITIES.
+
+TEXDEP = "texinfo-native"
+TEXDEP_class-native = "texinfo-dummy-native"
+TEXDEP_class-cross = "texinfo-dummy-native"
+DEPENDS_append = " ${TEXDEP}"
+PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+
diff --git a/import-layers/yocto-poky/meta/classes/tinderclient.bbclass b/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
new file mode 100644
index 000000000..2bc75fc65
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
@@ -0,0 +1,368 @@
+def tinder_http_post(server, selector, content_type, body):
+ import httplib
+ # now post it
+ for i in range(0,5):
+ try:
+ h = httplib.HTTP(server)
+ h.putrequest('POST', selector)
+ h.putheader('content-type', content_type)
+ h.putheader('content-length', str(len(body)))
+ h.endheaders()
+ h.send(body)
+ errcode, errmsg, headers = h.getreply()
+ #print errcode, errmsg, headers
+ return (errcode,errmsg, headers, h.file)
+ except:
+ print "Error sending the report!"
+ # try again
+ pass
+
+ # return some garbage
+ return (-1, "unknown", "unknown", None)
+
+def tinder_form_data(bound, dict, log):
+ output = []
+ # for each key in the dictionary
+ for name in dict:
+ assert dict[name]
+ output.append( "--" + bound )
+ output.append( 'Content-Disposition: form-data; name="%s"' % name )
+ output.append( "" )
+ output.append( dict[name] )
+ if log:
+ output.append( "--" + bound )
+ output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
+ output.append( '' )
+ output.append( log )
+ output.append( '--' + bound + '--' )
+ output.append( '' )
+
+ return "\r\n".join(output)
+
+def tinder_time_string():
+ """
+ Return the time as GMT
+ """
+ return ""
+
+def tinder_format_http_post(d,status,log):
+ """
+ Format the Tinderbox HTTP post with the data needed
+ for the tinderbox to be happy.
+ """
+
+ import random
+
+ # the variables we will need to send on this form post
+ variables = {
+ "tree" : d.getVar('TINDER_TREE', True),
+ "machine_name" : d.getVar('TINDER_MACHINE', True),
+ "os" : os.uname()[0],
+ "os_version" : os.uname()[2],
+ "compiler" : "gcc",
+ "clobber" : d.getVar('TINDER_CLOBBER', True) or "0",
+ "srcdate" : d.getVar('SRCDATE', True),
+ "PN" : d.getVar('PN', True),
+ "PV" : d.getVar('PV', True),
+ "PR" : d.getVar('PR', True),
+ "FILE" : d.getVar('FILE', True) or "N/A",
+ "TARGETARCH" : d.getVar('TARGET_ARCH', True),
+ "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown",
+ "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown",
+ "MACHINE" : d.getVar('MACHINE', True) or "Unknown",
+ "DISTRO" : d.getVar('DISTRO', True) or "Unknown",
+ "zecke-rocks" : "sure",
+ }
+
+ # optionally add the status
+ if status:
+ variables["status"] = str(status)
+
+ # try to load the machine id
+ # we only need on build_status.pl but sending it
+ # always does not hurt
+ try:
+ f = file(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
+ id = f.read()
+ variables['machine_id'] = id
+ except:
+ pass
+
+ # the boundary we will need
+ boundary = "----------------------------------%d" % int(random.random()*1000000000000)
+
+ # now format the body
+ body = tinder_form_data( boundary, variables, log )
+
+ return ("multipart/form-data; boundary=%s" % boundary),body
+
+
+def tinder_build_start(d):
+ """
+ Inform the tinderbox that a build is starting. We do this
+ by posting our name and tree to the build_start.pl script
+ on the server.
+ """
+
+ # get the body and type
+ content_type, body = tinder_format_http_post(d,None,None)
+ server = d.getVar('TINDER_HOST', True )
+ url = d.getVar('TINDER_URL', True )
+
+ selector = url + "/xml/build_start.pl"
+
+ #print "selector %s and url %s" % (selector, url)
+
+ # now post it
+ errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
+ #print errcode, errmsg, headers
+ report = h_file.read()
+
+ # now let us find the machine id that was assigned to us
+ search = "<machine id='"
+ report = report[report.find(search)+len(search):]
+ report = report[0:report.find("'")]
+
+ bb.note("Machine ID assigned by tinderbox: %s" % report )
+
+ # now we will need to save the machine number
+ # we will override any previous numbers
+ f = file(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
+ f.write(report)
+
+
+def tinder_send_http(d, status, _log):
+ """
+ Send this log as build status
+ """
+
+ # get the body and type
+ server = d.getVar('TINDER_HOST', True)
+ url = d.getVar('TINDER_URL', True)
+
+ selector = url + "/xml/build_status.pl"
+
+ # now post it - in chunks of 10.000 characters
+ new_log = _log
+ while len(new_log) > 0:
+ content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
+ errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
+ #print errcode, errmsg, headers
+ #print h.file.read()
+ new_log = new_log[18000:]
+
+
+def tinder_print_info(d):
+ """
+ Print the TinderBox Info
+ Including informations of the BaseSystem and the Tree
+ we use.
+ """
+
+ # get the local vars
+ time = tinder_time_string()
+ ops = os.uname()[0]
+ version = os.uname()[2]
+ url = d.getVar( 'TINDER_URL' , True )
+ tree = d.getVar( 'TINDER_TREE', True )
+ branch = d.getVar( 'TINDER_BRANCH', True )
+ srcdate = d.getVar( 'SRCDATE', True )
+ machine = d.getVar( 'MACHINE', True )
+ distro = d.getVar( 'DISTRO', True )
+ bbfiles = d.getVar( 'BBFILES', True )
+ tarch = d.getVar( 'TARGET_ARCH', True )
+ fpu = d.getVar( 'TARGET_FPU', True )
+ oerev = d.getVar( 'OE_REVISION', True ) or "unknown"
+
+ # there is a bug with tipple quoted strings
+ # i will work around but will fix the original
+ # bug as well
+ output = []
+ output.append("== Tinderbox Info" )
+ output.append("Time: %(time)s" )
+ output.append("OS: %(ops)s" )
+ output.append("%(version)s" )
+ output.append("Compiler: gcc" )
+ output.append("Tinderbox Client: 0.1" )
+ output.append("Tinderbox Client Last Modified: yesterday" )
+ output.append("Tinderbox Protocol: 0.1" )
+ output.append("URL: %(url)s" )
+ output.append("Tree: %(tree)s" )
+ output.append("Config:" )
+ output.append("branch = '%(branch)s'" )
+ output.append("TARGET_ARCH = '%(tarch)s'" )
+ output.append("TARGET_FPU = '%(fpu)s'" )
+ output.append("SRCDATE = '%(srcdate)s'" )
+ output.append("MACHINE = '%(machine)s'" )
+ output.append("DISTRO = '%(distro)s'" )
+ output.append("BBFILES = '%(bbfiles)s'" )
+ output.append("OEREV = '%(oerev)s'" )
+ output.append("== End Tinderbox Client Info" )
+
+ # now create the real output
+ return "\n".join(output) % vars()
+
+
+def tinder_print_env():
+ """
+ Print the environment variables of this build
+ """
+ time_start = tinder_time_string()
+ time_end = tinder_time_string()
+
+ # build the environment
+ env = ""
+ for var in os.environ:
+ env += "%s=%s\n" % (var, os.environ[var])
+
+ output = []
+ output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
+ output.append( env )
+ output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
+
+ return "\n".join(output) % vars()
+
+def tinder_tinder_start(d, event):
+ """
+ PRINT the configuration of this build
+ """
+
+ time_start = tinder_time_string()
+ config = tinder_print_info(d)
+ #env = tinder_print_env()
+ time_end = tinder_time_string()
+ packages = " ".join( event.getPkgs() )
+
+ output = []
+ output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
+ output.append( config )
+ #output.append( env )
+ output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
+ output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
+ output.append( "<--- TINDERBOX STARTING BUILD NOW" )
+
+ output.append( "" )
+
+ return "\n".join(output) % vars()
+
+def tinder_do_tinder_report(event):
+ """
+ Report to the tinderbox:
+ On the BuildStart we will inform the box directly
+ On the other events we will write to the TINDER_LOG and
+ when the Task is finished we will send the report.
+
+ The above is not yet fully implemented. Currently we send
+ information immediately. The caching/queuing needs to be
+ implemented. Also sending more or less information is not
+ implemented yet.
+
+ We have two temporary files stored in the TMP directory. One file
+ contains the assigned machine id for the tinderclient. This id gets
+ assigned when we connect the box and start the build process the second
+ file is used to workaround an EventHandler limitation. If BitBake is ran
+ with the continue option we want the Build to fail even if we get the
+ BuildCompleted Event. In this case we have to look up the status and
+ send it instead of 100/success.
+ """
+ import glob
+
+ # variables
+ name = bb.event.getName(event)
+ log = ""
+ status = 1
+ # Check what we need to do Build* shows we start or are done
+ if name == "BuildStarted":
+ tinder_build_start(event.data)
+ log = tinder_tinder_start(event.data,event)
+
+ try:
+ # truncate the tinder log file
+ f = file(event.data.getVar('TINDER_LOG', True), 'w')
+ f.write("")
+ f.close()
+ except:
+ pass
+
+ try:
+ # write a status to the file. This is needed for the -k option
+ # of BitBake
+ g = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ g.write("")
+ g.close()
+ except IOError:
+ pass
+
+ # Append the Task-Log (compile,configure...) to the log file
+ # we will send to the server
+ if name == "TaskSucceeded" or name == "TaskFailed":
+ log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
+
+ if len(log_file) != 0:
+ to_file = event.data.getVar('TINDER_LOG', True)
+ log += "".join(open(log_file[0], 'r').readlines())
+
+ # set the right 'HEADER'/Summary for the TinderBox
+ if name == "TaskStarted":
+ log += "---> TINDERBOX Task %s started\n" % event.task
+ elif name == "TaskSucceeded":
+ log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
+ elif name == "TaskFailed":
+ log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
+ elif name == "PkgStarted":
+ log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True)
+ elif name == "PkgSucceeded":
+ log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True)
+ elif name == "PkgFailed":
+ if not event.data.getVar('TINDER_AUTOBUILD', True) == "0":
+ build.exec_task('do_clean', event.data)
+ log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
+ status = 200
+ # remember the failure for the -k case
+ h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ h.write("200")
+ elif name == "BuildCompleted":
+ log += "Build Completed\n"
+ status = 100
+ # Check if we have a old status...
+ try:
+ h = file(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
+ status = int(h.read())
+ except:
+ pass
+
+ elif name == "MultipleProviders":
+ log += "---> TINDERBOX Multiple Providers\n"
+ log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
+ log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
+ log += "is runtime: %d\n" % event.isRuntime()
+ log += "<--- TINDERBOX Multiple Providers\n"
+ elif name == "NoProvider":
+ log += "Error: No Provider for: %s\n" % event.getItem()
+ log += "Error:Was Runtime: %d\n" % event.isRuntime()
+ status = 200
+ # remember the failure for the -k case
+ h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ h.write("200")
+
+ # now post the log
+ if len(log) == 0:
+ return
+
+ # for now we will use the http post method as it is the only one
+ log_post_method = tinder_send_http
+ log_post_method(event.data, status, log)
+
+
+# we want to be an event handler
+addhandler tinderclient_eventhandler
+python tinderclient_eventhandler() {
+ if e.data is None or bb.event.getName(e) == "MsgNote":
+ return
+
+ do_tinder_report = e.data.getVar('TINDER_REPORT', True)
+ if do_tinder_report and do_tinder_report == "1":
+ tinder_do_tinder_report(e)
+
+ return
+}
diff --git a/import-layers/yocto-poky/meta/classes/toaster.bbclass b/import-layers/yocto-poky/meta/classes/toaster.bbclass
new file mode 100644
index 000000000..1a70f14a9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/toaster.bbclass
@@ -0,0 +1,397 @@
+#
+# Toaster helper class
+#
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+#
+# This bbclass is designed to extract data used by OE-Core during the build process,
+# for recording in the Toaster system.
+# The data access is synchronous, preserving the build data integrity across
+# different builds.
+#
+# The data is transferred through the event system, using the MetadataEvent objects.
+#
+# The model is to enable the datadump functions as postfuncs, and have the dump
+# executed after the real taskfunc has been executed. This prevents task signature changing
+# is toaster is enabled or not. Build performance is not affected if Toaster is not enabled.
+#
+# To enable, use INHERIT in local.conf:
+#
+# INHERIT += "toaster"
+#
+#
+#
+#
+
+# Find and dump layer info when we got the layers parsed
+
+
+
+python toaster_layerinfo_dumpdata() {
+ import subprocess
+
+ def _get_git_branch(layer_path):
+ branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0]
+ branch = branch.replace('refs/heads/', '').rstrip()
+ return branch
+
+ def _get_git_revision(layer_path):
+ revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
+ return revision
+
+ def _get_url_map_name(layer_name):
+ """ Some layers have a different name on openembedded.org site,
+ this method returns the correct name to use in the URL
+ """
+
+ url_name = layer_name
+ url_mapping = {'meta': 'openembedded-core'}
+
+ for key in url_mapping.keys():
+ if key == layer_name:
+ url_name = url_mapping[key]
+
+ return url_name
+
+ def _get_layer_version_information(layer_path):
+
+ layer_version_info = {}
+ layer_version_info['branch'] = _get_git_branch(layer_path)
+ layer_version_info['commit'] = _get_git_revision(layer_path)
+ layer_version_info['priority'] = 0
+
+ return layer_version_info
+
+
+ def _get_layer_dict(layer_path):
+
+ layer_info = {}
+ layer_name = layer_path.split('/')[-1]
+ layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/'
+ layer_url_name = _get_url_map_name(layer_name)
+
+ layer_info['name'] = layer_url_name
+ layer_info['local_path'] = layer_path
+ layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name)
+ layer_info['version'] = _get_layer_version_information(layer_path)
+
+ return layer_info
+
+
+ bblayers = e.data.getVar("BBLAYERS", True)
+
+ llayerinfo = {}
+
+ for layer in { l for l in bblayers.strip().split(" ") if len(l) }:
+ llayerinfo[layer] = _get_layer_dict(layer)
+
+
+ bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data)
+}
+
+# Dump package file info data
+
+def _toaster_load_pkgdatafile(dirpath, filepath):
+ import json
+ import re
+ pkgdata = {}
+ with open(os.path.join(dirpath, filepath), "r") as fin:
+ for line in fin:
+ try:
+ kn, kv = line.strip().split(": ", 1)
+ m = re.match(r"^PKG_([^A-Z:]*)", kn)
+ if m:
+ pkgdata['OPKGN'] = m.group(1)
+ kn = "_".join([x for x in kn.split("_") if x.isupper()])
+ pkgdata[kn] = kv.strip()
+ if kn == 'FILES_INFO':
+ pkgdata[kn] = json.loads(kv)
+
+ except ValueError:
+ pass # ignore lines without valid key: value pairs
+ return pkgdata
+
+python toaster_package_dumpdata() {
+ """
+ Dumps the data about the packages created by a recipe
+ """
+
+ # No need to try and dumpdata if the recipe isn't generating packages
+ if not d.getVar('PACKAGES', True):
+ return
+
+ pkgdatadir = d.getVar('PKGDESTWORK', True)
+ lpkgdata = {}
+ datadir = os.path.join(pkgdatadir, 'runtime')
+
+ # scan and send data for each generated package
+ for datafile in os.listdir(datadir):
+ if not datafile.endswith('.packaged'):
+ lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
+ # Fire an event containing the pkg data
+ bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
+}
+
+# 2. Dump output image files information
+
+python toaster_image_dumpdata() {
+ """
+ Image filename for output images is not standardized.
+ image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually
+ have hardcoded ways to create image file names in them.
+ So we look for files starting with the set name.
+
+ We also look for other files in the images/ directory which don't
+ match IMAGE_NAME, such as the kernel bzImage, modules tarball etc.
+ """
+
+ dir_to_walk = d.getVar('DEPLOY_DIR_IMAGE', True);
+ image_name = d.getVar('IMAGE_NAME', True);
+ image_info_data = {}
+ artifact_info_data = {}
+
+ # collect all images and artifacts in the images directory
+ for dirpath, dirnames, filenames in os.walk(dir_to_walk):
+ for filename in filenames:
+ full_path = os.path.join(dirpath, filename)
+ try:
+ if filename.startswith(image_name):
+ # image
+ image_info_data[full_path] = os.stat(full_path).st_size
+ else:
+ # other non-image artifact
+ if not os.path.islink(full_path):
+ artifact_info_data[full_path] = os.stat(full_path).st_size
+ except OSError as e:
+ bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
+
+ bb.event.fire(bb.event.MetadataEvent("ImageFileSize", image_info_data), d)
+ bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize", artifact_info_data), d)
+}
+
+python toaster_artifact_dumpdata() {
+ """
+ Dump data about artifacts in the SDK_DEPLOY directory
+ """
+
+ dir_to_walk = d.getVar("SDK_DEPLOY", True)
+ artifact_info_data = {}
+
+ # collect all artifacts in the sdk directory
+ for dirpath, dirnames, filenames in os.walk(dir_to_walk):
+ for filename in filenames:
+ full_path = os.path.join(dirpath, filename)
+ try:
+ if not os.path.islink(full_path):
+ artifact_info_data[full_path] = os.stat(full_path).st_size
+ except OSError as e:
+ bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
+
+ bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize", artifact_info_data), d)
+}
+
+# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
+
+python toaster_collect_task_stats() {
+ import bb.build
+ import bb.event
+ import bb.data
+ import bb.utils
+ import os
+
+ toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")
+
+ if not e.data.getVar('BUILDSTATS_BASE', True):
+ return # if we don't have buildstats, we cannot collect stats
+
+ def stat_to_float(value):
+ return float(value.strip('% \n\r'))
+
+ def _append_read_list(v):
+ lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True)
+
+ with open(toaster_statlist_file, "a") as fout:
+ taskdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}/${PF}")
+ fout.write("%s::%s::%s::%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}")))
+
+ bb.utils.unlockfile(lock)
+
+ def _read_stats(filename):
+ # seconds
+ cpu_time_user = 0
+ cpu_time_system = 0
+
+ # bytes
+ disk_io_read = 0
+ disk_io_write = 0
+
+ started = 0
+ ended = 0
+
+ taskname = ''
+
+ statinfo = {}
+
+ with open(filename, 'r') as task_bs:
+ for line in task_bs.readlines():
+ k,v = line.strip().split(": ", 1)
+ statinfo[k] = v
+
+ if "Started" in statinfo:
+ started = stat_to_float(statinfo["Started"])
+
+ if "Ended" in statinfo:
+ ended = stat_to_float(statinfo["Ended"])
+
+ if "Child rusage ru_utime" in statinfo:
+ cpu_time_user = cpu_time_user + stat_to_float(statinfo["Child rusage ru_utime"])
+
+ if "Child rusage ru_stime" in statinfo:
+ cpu_time_system = cpu_time_system + stat_to_float(statinfo["Child rusage ru_stime"])
+
+ if "IO write_bytes" in statinfo:
+ write_bytes = int(statinfo["IO write_bytes"].strip('% \n\r'))
+ disk_io_write = disk_io_write + write_bytes
+
+ if "IO read_bytes" in statinfo:
+ read_bytes = int(statinfo["IO read_bytes"].strip('% \n\r'))
+ disk_io_read = disk_io_read + read_bytes
+
+ return {
+ 'stat_file': filename,
+ 'cpu_time_user': cpu_time_user,
+ 'cpu_time_system': cpu_time_system,
+ 'disk_io_read': disk_io_read,
+ 'disk_io_write': disk_io_write,
+ 'started': started,
+ 'ended': ended
+ }
+
+ if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
+ _append_read_list(e)
+ pass
+
+ if isinstance(e, bb.event.BuildCompleted) and os.path.exists(toaster_statlist_file):
+ events = []
+ with open(toaster_statlist_file, "r") as fin:
+ for line in fin:
+ (taskfile, taskname, filename, recipename) = line.strip().split("::")
+ stats = _read_stats(filename)
+ events.append((taskfile, taskname, stats, recipename))
+ bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data)
+ os.unlink(toaster_statlist_file)
+}
+
+# dump relevant build history data as an event when the build is completed
+
+python toaster_buildhistory_dump() {
+ import re
+ BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
+ BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
+ pkgdata_dir = e.data.getVar("PKGDATA_DIR", True)
+
+
+ # scan the build targets for this build
+ images = {}
+ allpkgs = {}
+ files = {}
+ for target in e._pkgs:
+ target = target.split(':')[0] # strip ':<task>' suffix from the target
+ installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target))
+ if os.path.exists(installed_img_path):
+ images[target] = {}
+ files[target] = {}
+ files[target]['dirs'] = []
+ files[target]['syms'] = []
+ files[target]['files'] = []
+ with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
+ for line in fin:
+ line = line.rstrip(";")
+ psize, px = line.split("\t")
+ punit, pname = px.split(" ")
+ # this size is "installed-size" as it measures how much space it takes on disk
+ images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
+
+ with open("%s/depends.dot" % installed_img_path, "r") as fin:
+ p = re.compile(r' -> ')
+ dot = re.compile(r'.*style=dotted')
+ for line in fin:
+ line = line.rstrip(';')
+ linesplit = p.split(line)
+ if len(linesplit) == 2:
+ pname = linesplit[0].rstrip('"').strip('"')
+ dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"')
+ deptype = "depends"
+ if dot.match(line):
+ deptype = "recommends"
+ if not pname in images[target]:
+ images[target][pname] = {'size': 0, 'depends' : []}
+ if not dependsname in images[target]:
+ images[target][dependsname] = {'size': 0, 'depends' : []}
+ images[target][pname]['depends'].append((dependsname, deptype))
+
+ with open("%s/files-in-image.txt" % installed_img_path, "r") as fin:
+ for line in fin:
+ lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
+ if lc[0].startswith("l"):
+ files[target]['syms'].append(lc)
+ elif lc[0].startswith("d"):
+ files[target]['dirs'].append(lc)
+ else:
+ files[target]['files'].append(lc)
+
+ for pname in images[target]:
+ if not pname in allpkgs:
+ try:
+ pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname)
+ except IOError as err:
+ if err.errno == 2:
+ # We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime
+ continue
+ else:
+ raise
+ allpkgs[pname] = pkgdata
+
+
+ data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files }
+
+ bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data)
+
+}
+
+# dump information related to license manifest path
+
+python toaster_licensemanifest_dump() {
+ deploy_dir = d.getVar('DEPLOY_DIR', True);
+ image_name = d.getVar('IMAGE_NAME', True);
+
+ data = { 'deploy_dir' : deploy_dir, 'image_name' : image_name }
+
+ bb.event.fire(bb.event.MetadataEvent("LicenseManifestPath", data), d)
+}
+
+# set event handlers
+addhandler toaster_layerinfo_dumpdata
+toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted"
+
+addhandler toaster_collect_task_stats
+toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed"
+
+addhandler toaster_buildhistory_dump
+toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
+
+do_packagedata_setscene[postfuncs] += "toaster_package_dumpdata "
+do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
+
+do_package[postfuncs] += "toaster_package_dumpdata "
+do_package[vardepsexclude] += "toaster_package_dumpdata "
+
+do_image_complete[postfuncs] += "toaster_image_dumpdata "
+do_image_complete[vardepsexclude] += "toaster_image_dumpdata "
+
+do_rootfs[postfuncs] += "toaster_licensemanifest_dump "
+do_rootfs[vardepsexclude] += "toaster_licensemanifest_dump "
+
+do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
+do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
diff --git a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
new file mode 100644
index 000000000..2e2c93af4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -0,0 +1,157 @@
+inherit siteinfo kernel-arch
+
+# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
+# doesn't always match our expectations... but we default to the stock value
+REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
+TARGET_CC_ARCH_append_libc-uclibc = " -muclibc"
+TARGET_CC_ARCH_append_libc-musl = " -mmusl"
+
+# This function creates an environment-setup-script for use in a deployable SDK
+toolchain_create_sdk_env_script () {
+ # Create environment setup script
+ base_sbindir=${10:-${base_sbindir_nativesdk}}
+ base_bindir=${9:-${base_bindir_nativesdk}}
+ sbindir=${8:-${sbindir_nativesdk}}
+ sdkpathnative=${7:-${SDKPATHNATIVE}}
+ prefix=${6:-${prefix_nativesdk}}
+ bindir=${5:-${bindir_nativesdk}}
+ libdir=${4:-${libdir}}
+ sysroot=${3:-${SDKTARGETSYSROOT}}
+ multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
+ script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
+ rm -f $script
+ touch $script
+ echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
+ EXTRAPATH=""
+ for i in ${CANADIANEXTRAOS}; do
+ EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
+ done
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$CCACHE_PATH' >> $script
+ echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
+ echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
+ echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
+ echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
+ echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
+ echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
+ echo 'unset command_not_found_handle' >> $script
+
+ toolchain_shared_env_script
+}
+
+# This function creates an environment-setup-script in the TMPDIR which enables
+# a OE-core IDE to integrate with the build tree
+toolchain_create_tree_env_script () {
+ script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
+ rm -f $script
+ touch $script
+ echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
+ echo 'export CCACHE_PATH=${STAGING_DIR_NATIVE}/usr/bin:${CCACHE_PATH}' >> $script
+ echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
+ echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
+ echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
+ echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
+ echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
+ echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
+ echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
+
+ toolchain_shared_env_script
+}
+
+toolchain_shared_env_script () {
+ echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
+ echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
+ echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
+ echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
+ echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
+ echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
+ echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
+ echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
+ echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
+ echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
+ echo 'export AR=${TARGET_PREFIX}ar' >> $script
+ echo 'export NM=${TARGET_PREFIX}nm' >> $script
+ echo 'export M4=m4' >> $script
+ echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
+ echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
+ echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
+ echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
+ echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
+ echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
+ echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
+ echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
+ echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
+ echo 'export ARCH=${ARCH}' >> $script
+ echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
+
+ cat >> $script <<EOF
+
+# Append environment subscripts
+if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
+ for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
+ source \$envfile
+ done
+fi
+if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
+ for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
+ source \$envfile
+ done
+fi
+EOF
+}
+
+#we get the cached site config in the runtime
+TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
+TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
+TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
+
+#This function create a site config file
+toolchain_create_sdk_siteconfig () {
+ local siteconfig=$1
+
+ rm -f $siteconfig
+ touch $siteconfig
+
+ for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
+ cat $sitefile >> $siteconfig
+ done
+
+ #get cached site config
+ for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
+ # Resolve virtual/* names to the real recipe name using sysroot-providers info
+ case $sitefile in virtual/*)
+ sitefile=`echo $sitefile | tr / _`
+ sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
+ esac
+
+ if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
+ cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
+ fi
+ done
+}
+# The immediate expansion above can result in unwanted path dependencies here
+toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
+
+#This function create a version information file
+toolchain_create_sdk_version () {
+ local versionfile=$1
+ rm -f $versionfile
+ touch $versionfile
+ echo 'Distro: ${DISTRO}' >> $versionfile
+ echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
+ echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
+ echo 'Timestamp: ${DATETIME}' >> $versionfile
+}
+toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
+
+python __anonymous () {
+ import oe.classextend
+ deps = ""
+ for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
+ deps += " %s:do_populate_sysroot" % dep
+ for variant in (d.getVar('MULTILIB_VARIANTS', True) or "").split():
+ clsextend = oe.classextend.ClassExtender(variant, d)
+ newdep = clsextend.extend_name(dep)
+ deps += " %s:do_populate_sysroot" % newdep
+ d.appendVarFlag('do_configure', 'depends', deps)
+}
diff --git a/import-layers/yocto-poky/meta/classes/typecheck.bbclass b/import-layers/yocto-poky/meta/classes/typecheck.bbclass
new file mode 100644
index 000000000..6bff7c713
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/typecheck.bbclass
@@ -0,0 +1,12 @@
+# Check types of bitbake configuration variables
+#
+# See oe.types for details.
+
+python check_types() {
+ import oe.types
+ for key in e.data.keys():
+ if e.data.getVarFlag(key, "type", True):
+ oe.data.typed_value(key, e.data)
+}
+addhandler check_types
+check_types[eventmask] = "bb.event.ConfigParsed"
diff --git a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
new file mode 100644
index 000000000..cb061af34
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
@@ -0,0 +1,49 @@
+# Handle U-Boot config for a machine
+#
+# The format to specify it, in the machine, is:
+#
+# UBOOT_CONFIG ??= <default>
+# UBOOT_CONFIG[foo] = "config,images"
+#
+# or
+#
+# UBOOT_MACHINE = "config"
+#
+# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
+
+python () {
+ ubootmachine = d.getVar("UBOOT_MACHINE", True)
+ ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
+ # The "doc" varflag is special, we don't want to see it here
+ ubootconfigflags.pop('doc', None)
+
+ if not ubootmachine and not ubootconfigflags:
+ PN = d.getVar("PN", True)
+ FILE = os.path.basename(d.getVar("FILE", True))
+ bb.debug(1, "To build %s, see %s for instructions on \
+ setting up your machine config" % (PN, FILE))
+ raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True))
+
+ if ubootmachine and ubootconfigflags:
+ raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
+
+ if not ubootconfigflags:
+ return
+
+ ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
+ if len(ubootconfig) > 0:
+ for config in ubootconfig:
+ for f, v in ubootconfigflags.items():
+ if config == f:
+ items = v.split(',')
+ if items[0] and len(items) > 2:
+ raise bb.parse.SkipPackage('Only config,images can be specified!')
+ d.appendVar('UBOOT_MACHINE', ' ' + items[0])
+ # IMAGE_FSTYPES appending
+ if len(items) > 1 and items[1]:
+ bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
+ d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
+ break
+ elif len(ubootconfig) == 0:
+ raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
+}
diff --git a/import-layers/yocto-poky/meta/classes/uninative.bbclass b/import-layers/yocto-poky/meta/classes/uninative.bbclass
new file mode 100644
index 000000000..89cec07d7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/uninative.bbclass
@@ -0,0 +1,140 @@
+UNINATIVE_LOADER ?= "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', 'ld-linux.so.2', d)}"
+
+UNINATIVE_URL ?= "unset"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2"
+# Example checksums
+#UNINATIVE_CHECKSUM[i586] = "dead"
+#UNINATIVE_CHECKSUM[x86_64] = "dead"
+UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
+
+# https://wiki.debian.org/GCC5
+# We may see binaries built with gcc5 run or linked into gcc4 environment
+# so use the older libstdc++ standard for now until we don't support gcc4
+# on the host system.
+BUILD_CXXFLAGS_append = " -D_GLIBCXX_USE_CXX11_ABI=0"
+
+#
+# icu configure defaults to CXX11 if no -std= option is passed in CXXFLAGS
+# therefore pass one
+BUILD_CXXFLAGS_append_pn-icu-native = " -std=c++98"
+
+addhandler uninative_event_fetchloader
+uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
+
+addhandler uninative_event_enable
+uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
+
+python uninative_event_fetchloader() {
+ """
+ This event fires on the parent and will try to fetch the tarball if the
+ loader isn't already present.
+ """
+
+ chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH", True), True)
+ if not chksum:
+ bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH", True))
+
+ loader = d.getVar("UNINATIVE_LOADER", True)
+ loaderchksum = loader + ".chksum"
+ if os.path.exists(loader) and os.path.exists(loaderchksum):
+ with open(loaderchksum, "r") as f:
+ readchksum = f.read().strip()
+ if readchksum == chksum:
+ return
+
+ import subprocess
+ try:
+ # Save and restore cwd as Fetch.download() does a chdir()
+ olddir = os.getcwd()
+
+ tarball = d.getVar("UNINATIVE_TARBALL", True)
+ tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR", True), chksum)
+ tarballpath = os.path.join(tarballdir, tarball)
+
+ if not os.path.exists(tarballpath):
+ bb.utils.mkdirhier(tarballdir)
+ if d.getVar("UNINATIVE_URL", True) == "unset":
+ bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
+
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('FILESPATH', "")
+ localdata.setVar('DL_DIR', tarballdir)
+
+ srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
+ bb.note("Fetching uninative binary shim from %s" % srcuri)
+
+ fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.download()
+ localpath = fetcher.localpath(srcuri)
+ if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
+ os.symlink(localpath, tarballpath)
+
+ cmd = d.expand("mkdir -p ${STAGING_DIR}-uninative; cd ${STAGING_DIR}-uninative; tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; ${STAGING_DIR}-uninative/relocate_sdk.py ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ subprocess.check_call(cmd, shell=True)
+
+ with open(loaderchksum, "w") as f:
+ f.write(chksum)
+
+ enable_uninative(d)
+
+ except bb.fetch2.BBFetchException as exc:
+ bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
+ bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
+ except subprocess.CalledProcessError as exc:
+ bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
+ bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
+ finally:
+ os.chdir(olddir)
+}
+
+python uninative_event_enable() {
+ """
+ This event handler is called in the workers and is responsible for setting
+ up uninative if a loader is found.
+ """
+ enable_uninative(d)
+}
+
+def enable_uninative(d):
+ loader = d.getVar("UNINATIVE_LOADER", True)
+ if os.path.exists(loader):
+ bb.debug(2, "Enabling uninative")
+ d.setVar("NATIVELSBSTRING", "universal")
+ d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
+ d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
+
+python uninative_changeinterp () {
+ import subprocess
+ import stat
+ import oe.qa
+
+ if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
+ return
+
+ sstateinst = d.getVar('SSTATE_INSTDIR', True)
+ for walkroot, dirs, files in os.walk(sstateinst):
+ for file in files:
+ if file.endswith(".so") or ".so." in file:
+ continue
+ f = os.path.join(walkroot, file)
+ if os.path.islink(f):
+ continue
+ s = os.stat(f)
+ if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
+ continue
+ elf = oe.qa.ELFFile(f)
+ try:
+ elf.open()
+ except oe.qa.NotELFFileError:
+ continue
+ if not elf.isDynamic():
+ continue
+
+ try:
+ subprocess.check_output(("patchelf-uninative", "--set-interpreter",
+ d.getVar("UNINATIVE_LOADER", True), f),
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("'%s' failed with exit code %d and the following output:\n%s" %
+ (e.cmd, e.returncode, e.output))
+}
diff --git a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
new file mode 100644
index 000000000..70a818572
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
@@ -0,0 +1,267 @@
+# This class is used to help the alternatives system which is useful when
+# multiple sources provide same command. You can use update-alternatives
+# command directly in your recipe, but in most cases this class simplifies
+# that job.
+#
+# To use this class a number of variables should be defined:
+#
+# List all of the alternatives needed by a package:
+# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
+#
+# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
+#
+# The pathname of the link
+# ALTERNATIVE_LINK_NAME[name] = "target"
+#
+# This is the name of the binary once it's been installed onto the runtime.
+# This name is global to all split packages in this recipe, and should match
+# other recipes with the same functionality.
+# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
+#
+# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
+#
+# The default link to create for all targets
+# ALTERNATIVE_TARGET = "target"
+#
+# This is useful in a multicall binary case
+# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
+#
+# A non-default link to create for a target
+# ALTERNATIVE_TARGET[name] = "target"
+#
+# This is the name of the binary as it's been install by do_install
+# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
+#
+# A package specific link for a target
+# ALTERNATIVE_TARGET_<pkg>[name] = "target"
+#
+# This is useful when a recipe provides multiple alternatives for the
+# same item.
+#
+# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
+# from ALTERNATIVE_LINK_NAME.
+#
+# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
+# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
+# referenced has not been renamed, it will also be renamed. (This avoids
+# the need to rename alternative files in the do_install step, but still
+# supports it if necessary for some reason.)
+#
+# The default priority for any alternatives
+# ALTERNATIVE_PRIORITY = "priority"
+#
+# i.e. default is ALTERNATIVE_PRIORITY = "10"
+#
+# The non-default priority for a specific target
+# ALTERNATIVE_PRIORITY[name] = "priority"
+#
+# The package priority for a specific target
+# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
+
+ALTERNATIVE_PRIORITY = "10"
+
+# We need special processing for vardeps because it can not work on
+# modified flag values. So we aggregate the flags into a new variable
+# and include that vairable in the set.
+UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
+
+def gen_updatealternativesvardeps(d):
+ pkgs = (d.getVar("PACKAGES", True) or "").split()
+ vars = (d.getVar("UPDALTVARS", True) or "").split()
+
+ # First compute them for non_pkg versions
+ for v in vars:
+ for flag in (d.getVarFlags(v) or {}):
+ if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
+ continue
+ d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
+
+ for p in pkgs:
+ for v in vars:
+ for flag in (d.getVarFlags("%s_%s" % (v,p)) or {}):
+ if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
+ continue
+ d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
+
+def ua_extend_depends(d):
+ if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True):
+ d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
+
+python __anonymous() {
+ # Update Alternatives only works on target packages...
+ if bb.data.inherits_class('native', d) or \
+ bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
+ bb.data.inherits_class('cross-canadian', d):
+ return
+
+ # compute special vardeps
+ gen_updatealternativesvardeps(d)
+
+ # extend the depends to include virtual/update-alternatives
+ ua_extend_depends(d)
+}
+
+def gen_updatealternativesvars(d):
+ ret = []
+ pkgs = (d.getVar("PACKAGES", True) or "").split()
+ vars = (d.getVar("UPDALTVARS", True) or "").split()
+
+ for v in vars:
+ ret.append(v + "_VARDEPS")
+
+ for p in pkgs:
+ for v in vars:
+ ret.append(v + "_" + p)
+ ret.append(v + "_VARDEPS_" + p)
+ return " ".join(ret)
+
+# Now the new stuff, we use a custom function to generate the right values
+populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
+
+# We need to do the rename after the image creation step, but before
+# the split and strip steps.. packagecopy seems to be the earliest reasonable
+# place.
+python perform_packagecopy_append () {
+ # Check for deprecated usage...
+ pn = d.getVar('BPN', True)
+ if d.getVar('ALTERNATIVE_LINKS', True) != None:
+ bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
+
+ # Do actual update alternatives processing
+ pkgdest = d.getVar('PKGD', True)
+ for pkg in (d.getVar('PACKAGES', True) or "").split():
+ # If the src == dest, we know we need to rename the dest by appending ${BPN}
+ link_rename = {}
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
+ if not alt_link:
+ alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name)
+ d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
+
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ # Sometimes alt_target is specified as relative to the link name.
+ alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
+
+ # If the link and target are the same name, we need to rename the target.
+ if alt_link == alt_target:
+ src = '%s/%s' % (pkgdest, alt_target)
+ alt_target_rename = '%s.%s' % (alt_target, pn)
+ dest = '%s/%s' % (pkgdest, alt_target_rename)
+ if os.path.lexists(dest):
+ bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
+ elif os.path.lexists(src):
+ if os.path.islink(src):
+ # Delay rename of links
+ link_rename[alt_target] = alt_target_rename
+ else:
+ bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
+ os.rename(src, dest)
+ else:
+ bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
+ continue
+ d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
+
+ # Process delayed link names
+ # Do these after other renames so we can correct broken links
+ for alt_target in link_rename:
+ src = '%s/%s' % (pkgdest, alt_target)
+ dest = '%s/%s' % (pkgdest, link_rename[alt_target])
+ link = os.readlink(src)
+ link_target = oe.path.realpath(src, pkgdest, True)
+
+ if os.path.lexists(link_target):
+ # Ok, the link_target exists, we can rename
+ bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target]))
+ os.rename(src, dest)
+ else:
+ # Try to resolve the broken link to link.${BPN}
+ link_maybe = '%s.%s' % (os.readlink(src), pn)
+ if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
+ # Ok, the renamed link target exists.. create a new link, and remove the original
+ bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe))
+ os.symlink(link_maybe, dest)
+ os.unlink(src)
+ else:
+ bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
+}
+
+PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
+
+python populate_packages_updatealternatives () {
+ pn = d.getVar('BPN', True)
+
+ # Do actual update alternatives processing
+ pkgdest = d.getVar('PKGD', True)
+ for pkg in (d.getVar('PACKAGES', True) or "").split():
+ # Create post install/removal scripts
+ alt_setup_links = ""
+ alt_remove_links = ""
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ # Sometimes alt_target is specified as relative to the link name.
+ alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
+
+ alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True)
+ alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True)
+
+ # This shouldn't trigger, as it should have been resolved earlier!
+ if alt_link == alt_target:
+ bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
+ alt_target = '%s.%s' % (alt_target, pn)
+
+ if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
+ bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
+ continue
+
+ # Default to generate shell script.. eventually we may want to change this...
+ alt_target = os.path.normpath(alt_target)
+
+ alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
+ alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
+
+ if alt_setup_links:
+ # RDEPENDS setup
+ provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True)
+ if provider:
+ #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
+ d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
+
+ bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
+ bb.note('%s' % alt_setup_links)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
+ postinst += alt_setup_links
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ bb.note('%s' % alt_remove_links)
+ prerm = d.getVar('pkg_prerm_%s' % pkg, True) or '#!/bin/sh\n'
+ prerm += alt_remove_links
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
+}
+
+python package_do_filedeps_append () {
+ pn = d.getVar('BPN', True)
+ pkgdest = d.getVar('PKGDEST', True)
+
+ for pkg in packages.split():
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+
+ if alt_link == alt_target:
+ bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
+ alt_target = '%s.%s' % (alt_target, pn)
+
+ if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
+ continue
+
+ # Add file provide
+ trans_target = oe.package.file_translate(alt_target)
+ d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
+ if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""):
+ d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
new file mode 100644
index 000000000..2a0a74a5f
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
@@ -0,0 +1,133 @@
+UPDATERCPN ?= "${PN}"
+
+DEPENDS_append_class-target = " update-rc.d-native update-rc.d initscripts"
+UPDATERCD = "update-rc.d"
+UPDATERCD_class-cross = ""
+UPDATERCD_class-native = ""
+UPDATERCD_class-nativesdk = ""
+
+INITSCRIPT_PARAMS ?= "defaults"
+
+INIT_D_DIR = "${sysconfdir}/init.d"
+
+updatercd_preinst() {
+if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
+ ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
+fi
+if type update-rc.d >/dev/null 2>/dev/null; then
+ if [ -n "$D" ]; then
+ OPT="-f -r $D"
+ else
+ OPT="-f"
+ fi
+ update-rc.d $OPT ${INITSCRIPT_NAME} remove
+fi
+}
+
+updatercd_postinst() {
+if type update-rc.d >/dev/null 2>/dev/null; then
+ if [ -n "$D" ]; then
+ OPT="-r $D"
+ else
+ OPT="-s"
+ fi
+ update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
+fi
+}
+
+updatercd_prerm() {
+if [ -z "$D" ]; then
+ ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
+fi
+}
+
+updatercd_postrm() {
+if type update-rc.d >/dev/null 2>/dev/null; then
+ if [ -n "$D" ]; then
+ OPT="-f -r $D"
+ else
+ OPT="-f"
+ fi
+ update-rc.d $OPT ${INITSCRIPT_NAME} remove
+fi
+}
+
+
+def update_rc_after_parse(d):
+ if d.getVar('INITSCRIPT_PACKAGES', False) == None:
+ if d.getVar('INITSCRIPT_NAME', False) == None:
+ raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
+ if d.getVar('INITSCRIPT_PARAMS', False) == None:
+ raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
+
+python __anonymous() {
+ update_rc_after_parse(d)
+}
+
+PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd "
+PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
+
+populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
+populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
+
+python populate_packages_updatercd () {
+ def update_rcd_auto_depend(pkg):
+ import subprocess
+ import os
+ path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
+ if not os.path.exists(path):
+ return
+ statement = "grep -q -w '/etc/init.d/functions' %s" % path
+ if subprocess.call(statement, shell=True) == 0:
+ mlprefix = d.getVar('MLPREFIX', True) or ""
+ d.appendVar('RDEPENDS_' + pkg, ' %sinitscripts-functions' % (mlprefix))
+
+ def update_rcd_package(pkg):
+ bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
+
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", True)
+ localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
+ bb.data.update_data(localdata)
+
+ update_rcd_auto_depend(pkg)
+
+ preinst = d.getVar('pkg_preinst_%s' % pkg, True)
+ if not preinst:
+ preinst = '#!/bin/sh\n'
+ preinst += localdata.getVar('updatercd_preinst', True)
+ d.setVar('pkg_preinst_%s' % pkg, preinst)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += localdata.getVar('updatercd_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ if not prerm:
+ prerm = '#!/bin/sh\n'
+ prerm += localdata.getVar('updatercd_prerm', True)
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += localdata.getVar('updatercd_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+
+ d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
+
+ # Check that this class isn't being inhibited (generally, by
+ # systemd.bbclass) before doing any work.
+ if bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) or \
+ not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
+ pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
+ if pkgs == None:
+ pkgs = d.getVar('UPDATERCPN', True)
+ packages = (d.getVar('PACKAGES', True) or "").split()
+ if not pkgs in packages and packages != []:
+ pkgs = packages[0]
+ for pkg in pkgs.split():
+ update_rcd_package(pkg)
+}
diff --git a/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass b/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass
new file mode 100644
index 000000000..89556ed7d
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass
@@ -0,0 +1,5 @@
+# This class ensures that the upstream version check only
+# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
+# This scheme is used by Gnome and a number of other projects
+# to signify stable releases vs development releases.
+UPSTREAM_CHECK_REGEX = "(?P<pver>\d+\.(\d*[02468])+(\.\d+)+)"
diff --git a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
new file mode 100644
index 000000000..a9b506d05
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -0,0 +1,302 @@
+# In order to support a deterministic set of 'dynamic' users/groups,
+# we need a function to reformat the params based on a static file
+def update_useradd_static_config(d):
+ import argparse
+ import itertools
+ import re
+
+ class myArgumentParser( argparse.ArgumentParser ):
+ def _print_message(self, message, file=None):
+ bb.warn("%s - %s: %s" % (d.getVar('PN', True), pkg, message))
+
+ # This should never be called...
+ def exit(self, status=0, message=None):
+ message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN', True), pkg))
+ error(message)
+
+ def error(self, message):
+ raise bb.build.FuncFailed(message)
+
+ def list_extend(iterable, length, obj = None):
+ """Ensure that iterable is the specified length by extending with obj
+ and return it as a list"""
+ return list(itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length))
+
+ def merge_files(file_list, exp_fields):
+ """Read each passwd/group file in file_list, split each line and create
+ a dictionary with the user/group names as keys and the split lines as
+ values. If the user/group name already exists in the dictionary, then
+ update any fields in the list with the values from the new list (if they
+ are set)."""
+ id_table = dict()
+ for conf in file_list.split():
+ if os.path.exists(conf):
+ f = open(conf, "r")
+ for line in f:
+ if line.startswith('#'):
+ continue
+ # Make sure there always are at least exp_fields elements in
+ # the field list. This allows for leaving out trailing
+ # colons in the files.
+ fields = list_extend(line.rstrip().split(":"), exp_fields)
+ if fields[0] not in id_table:
+ id_table[fields[0]] = fields
+ else:
+ id_table[fields[0]] = list(itertools.imap(lambda x, y: x or y, fields, id_table[fields[0]]))
+
+ return id_table
+
+ # We parse and rewrite the useradd components
+ def rewrite_useradd(params):
+ # The following comes from --help on useradd from shadow
+ parser = myArgumentParser(prog='useradd')
+ parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
+ parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
+ parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
+ parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
+ parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
+ parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
+ parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
+ parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
+ parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
+ parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
+ parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
+ parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
+ parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
+ parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
+ parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
+ parser.add_argument("LOGIN", help="Login name of the new user")
+
+ # Return a list of configuration files based on either the default
+ # files/passwd or the contents of USERADD_UID_TABLES
+ # paths are resolved via BBPATH
+ def get_passwd_list(d):
+ str = ""
+ bbpath = d.getVar('BBPATH', True)
+ passwd_tables = d.getVar('USERADD_UID_TABLES', True)
+ if not passwd_tables:
+ passwd_tables = 'files/passwd'
+ for conf_file in passwd_tables.split():
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ return str
+
+ newparams = []
+ users = None
+ for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
+ param = param.strip()
+ if not param:
+ continue
+ try:
+ uaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
+ except:
+ raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+
+ # Read all passwd files specified in USERADD_UID_TABLES or files/passwd
+ # Use the standard passwd layout:
+ # username:password:user_id:group_id:comment:home_directory:login_shell
+ #
+ # If a field is left blank, the original value will be used. The 'username'
+ # field is required.
+ #
+ # Note: we ignore the password field, as including even the hashed password
+ # in the useradd command may introduce a security hole. It's assumed that
+ # all new users get the default ('*' which prevents login) until the user is
+ # specifically configured by the system admin.
+ if not users:
+ users = merge_files(get_passwd_list(d), 7)
+
+ if uaargs.LOGIN not in users:
+ continue
+
+ field = users[uaargs.LOGIN]
+
+ if uaargs.uid and field[2] and (uaargs.uid != field[2]):
+ bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
+ uaargs.uid = field[2] or uaargs.uid
+
+ # Determine the possible groupname
+ # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
+ #
+ # By default the system has creation of the matching groups enabled
+ # So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
+ # is used, and we disable the user_group option.
+ #
+ user_group = uaargs.user_group is None or uaargs.user_group is True
+ uaargs.groupname = uaargs.LOGIN if user_group else uaargs.gid
+ uaargs.groupid = field[3] or uaargs.gid or uaargs.groupname
+
+ if uaargs.groupid and uaargs.gid != uaargs.groupid:
+ newgroup = None
+ if not uaargs.groupid.isdigit():
+ # We don't have a group number, so we have to add a name
+ bb.debug(1, "Adding group %s!" % uaargs.groupid)
+ newgroup = "%s %s" % (' --system' if uaargs.system else '', uaargs.groupid)
+ elif uaargs.groupname and not uaargs.groupname.isdigit():
+ # We have a group name and a group number to assign it to
+ bb.debug(1, "Adding group %s (gid %s)!" % (uaargs.groupname, uaargs.groupid))
+ newgroup = "-g %s %s" % (uaargs.groupid, uaargs.groupname)
+ else:
+ # We want to add a group, but we don't know it's name... so we can't add the group...
+ # We have to assume the group has previously been added or we'll fail on the adduser...
+ # Note: specifying the actual gid is very rare in OE, usually the group name is specified.
+ bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupid))
+
+ uaargs.gid = uaargs.groupid
+ uaargs.user_group = None
+ if newgroup:
+ groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
+ if groupadd:
+ d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ else:
+ d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+
+ uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
+ uaargs.home_dir = field[5] or uaargs.home_dir
+ uaargs.shell = field[6] or uaargs.shell
+
+ # Should be an error if a specific option is set...
+ if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not ((uaargs.uid and uaargs.uid.isdigit()) and uaargs.gid):
+ #bb.error("Skipping recipe %s, package %s which adds username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
+ raise bb.build.FuncFailed("%s - %s: Username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
+
+ # Reconstruct the args...
+ newparam = ['', ' --defaults'][uaargs.defaults]
+ newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
+ newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
+ newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
+ newparam += ['', ' --expiredata %s' % uaargs.expiredate][uaargs.expiredate != None]
+ newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
+ newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
+ newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
+ newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None]
+ newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None]
+ newparam += ['', ' --no-log-init'][uaargs.no_log_init]
+ newparam += ['', ' --create-home'][uaargs.create_home is True]
+ newparam += ['', ' --no-create-home'][uaargs.create_home is False]
+ newparam += ['', ' --no-user-group'][uaargs.user_group is False]
+ newparam += ['', ' --non-unique'][uaargs.non_unique]
+ newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
+ newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
+ newparam += ['', ' --system'][uaargs.system]
+ newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
+ newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None]
+ newparam += ['', ' --user-group'][uaargs.user_group is True]
+ newparam += ' %s' % uaargs.LOGIN
+
+ newparams.append(newparam)
+
+ return ";".join(newparams).strip()
+
+ # We parse and rewrite the groupadd components
+ def rewrite_groupadd(params):
+ # The following comes from --help on groupadd from shadow
+ parser = myArgumentParser(prog='groupadd')
+ parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
+ parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("GROUP", help="Group name of the new group")
+
+ # Return a list of configuration files based on either the default
+ # files/group or the contents of USERADD_GID_TABLES
+ # paths are resolved via BBPATH
+ def get_group_list(d):
+ str = ""
+ bbpath = d.getVar('BBPATH', True)
+ group_tables = d.getVar('USERADD_GID_TABLES', True)
+ if not group_tables:
+ group_tables = 'files/group'
+ for conf_file in group_tables.split():
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ return str
+
+ newparams = []
+ groups = None
+ for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
+ param = param.strip()
+ if not param:
+ continue
+ try:
+ # If we're processing multiple lines, we could have left over values here...
+ gaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
+ except:
+ raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+
+ # Read all group files specified in USERADD_GID_TABLES or files/group
+ # Use the standard group layout:
+ # groupname:password:group_id:group_members
+ #
+ # If a field is left blank, the original value will be used. The 'groupname' field
+ # is required.
+ #
+ # Note: similar to the passwd file, the 'password' filed is ignored
+ # Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
+ if not groups:
+ groups = merge_files(get_group_list(d), 4)
+
+ if gaargs.GROUP not in groups:
+ continue
+
+ field = groups[gaargs.GROUP]
+
+ if field[2]:
+ if gaargs.gid and (gaargs.gid != field[2]):
+ bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
+ gaargs.gid = field[2]
+
+ if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not (gaargs.gid and gaargs.gid.isdigit()):
+ #bb.error("Skipping recipe %s, package %s which adds groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
+ raise bb.build.FuncFailed("%s - %s: Groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
+
+ # Reconstruct the args...
+ newparam = ['', ' --force'][gaargs.force]
+ newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
+ newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
+ newparam += ['', ' --non-unique'][gaargs.non_unique]
+ newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
+ newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
+ newparam += ['', ' --system'][gaargs.system]
+ newparam += ' %s' % gaargs.GROUP
+
+ newparams.append(newparam)
+
+ return ";".join(newparams).strip()
+
+ # Load and process the users and groups, rewriting the adduser/addgroup params
+ useradd_packages = d.getVar('USERADD_PACKAGES', True)
+
+ for pkg in useradd_packages.split():
+ # Groupmems doesn't have anything we might want to change, so simply validating
+ # is a bit of a waste -- only process useradd/groupadd
+ useradd_param = d.getVar('USERADD_PARAM_%s' % pkg, True)
+ if useradd_param:
+ #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
+ d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param))
+ #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg, True)))
+
+ groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg, True)
+ if groupadd_param:
+ #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
+ d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param))
+ #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg, True)))
+
+
+
+python __anonymous() {
+ if not bb.data.inherits_class('nativesdk', d) \
+ and not bb.data.inherits_class('native', d):
+ try:
+ update_useradd_static_config(d)
+ except bb.build.FuncFailed as f:
+ bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN', True), f))
+ raise bb.parse.SkipPackage(f)
+}
diff --git a/import-layers/yocto-poky/meta/classes/useradd.bbclass b/import-layers/yocto-poky/meta/classes/useradd.bbclass
new file mode 100644
index 000000000..ee402acef
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/useradd.bbclass
@@ -0,0 +1,252 @@
+inherit useradd_base
+
+# base-passwd-cross provides the default passwd and group files in the
+# target sysroot, and shadow -native and -sysroot provide the utilities
+# and support files needed to add and modify user and group accounts
+DEPENDS_append = "${USERADDDEPENDS}"
+USERADDDEPENDS = " base-files shadow-native shadow-sysroot shadow"
+USERADDDEPENDS_class-cross = ""
+USERADDDEPENDS_class-native = ""
+USERADDDEPENDS_class-nativesdk = ""
+
+# This preinstall function can be run in four different contexts:
+#
+# a) Before do_install
+# b) At do_populate_sysroot_setscene when installing from sstate packages
+# c) As the preinst script in the target package at do_rootfs time
+# d) As the preinst script in the target package on device as a package upgrade
+#
+useradd_preinst () {
+OPT=""
+SYSROOT=""
+
+if test "x$D" != "x"; then
+ # Installing into a sysroot
+ SYSROOT="$D"
+ OPT="--root $D"
+
+ # Make sure login.defs is there, this is to make debian package backend work
+ # correctly while doing rootfs.
+ # The problem here is that if /etc/login.defs is treated as a config file for
+ # shadow package, then while performing preinsts for packages that depend on
+ # shadow, there might only be /etc/login.def.dpkg-new there in root filesystem.
+ if [ ! -e $D${sysconfdir}/login.defs -a -e $D${sysconfdir}/login.defs.dpkg-new ]; then
+ cp $D${sysconfdir}/login.defs.dpkg-new $D${sysconfdir}/login.defs
+ fi
+
+ # user/group lookups should match useradd/groupadd --root
+ export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}"
+fi
+
+# If we're not doing a special SSTATE/SYSROOT install
+# then set the values, otherwise use the environment
+if test "x$UA_SYSROOT" = "x"; then
+ # Installing onto a target
+ # Add groups and users defined only for this package
+ GROUPADD_PARAM="${GROUPADD_PARAM}"
+ USERADD_PARAM="${USERADD_PARAM}"
+ GROUPMEMS_PARAM="${GROUPMEMS_PARAM}"
+fi
+
+# Perform group additions first, since user additions may depend
+# on these groups existing
+if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
+ echo "Running groupadd commands..."
+ # Invoke multiple instances of groupadd for parameter lists
+ # separated by ';'
+ opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
+ remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
+ while test "x$opts" != "x"; do
+ perform_groupadd "$SYSROOT" "$OPT $opts"
+ if test "x$opts" = "x$remaining"; then
+ break
+ fi
+ opts=`echo "$remaining" | cut -d ';' -f 1`
+ remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ done
+fi
+
+if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
+ echo "Running useradd commands..."
+ # Invoke multiple instances of useradd for parameter lists
+ # separated by ';'
+ opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
+ remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
+ while test "x$opts" != "x"; do
+ perform_useradd "$SYSROOT" "$OPT $opts"
+ if test "x$opts" = "x$remaining"; then
+ break
+ fi
+ opts=`echo "$remaining" | cut -d ';' -f 1`
+ remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ done
+fi
+
+if test "x`echo $GROUPMEMS_PARAM | tr -d '[:space:]'`" != "x"; then
+ echo "Running groupmems commands..."
+ # Invoke multiple instances of groupmems for parameter lists
+ # separated by ';'
+ opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
+ remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
+ while test "x$opts" != "x"; do
+ perform_groupmems "$SYSROOT" "$OPT $opts"
+ if test "x$opts" = "x$remaining"; then
+ break
+ fi
+ opts=`echo "$remaining" | cut -d ';' -f 1`
+ remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ done
+fi
+}
+
+useradd_sysroot () {
+ # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
+ # at this point so we're explicit about the environment so pseudo can load if
+ # not already present.
+ export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+
+ # Explicitly set $D since it isn't set to anything
+ # before do_install
+ D=${STAGING_DIR_TARGET}
+
+ # Add groups and users defined for all recipe packages
+ GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
+ USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
+ GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
+
+ # Tell the system to use the environment vars
+ UA_SYSROOT=1
+
+ useradd_preinst
+}
+
+useradd_sysroot_sstate () {
+ if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
+ then
+ useradd_sysroot
+ fi
+}
+
+userdel_sysroot_sstate () {
+if test "x${STAGING_DIR_TARGET}" != "x"; then
+ if [ "${BB_CURRENTTASK}" = "configure" -o "${BB_CURRENTTASK}" = "clean" ]; then
+ export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+ OPT="--root ${STAGING_DIR_TARGET}"
+
+ # Remove groups and users defined for package
+ GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
+ USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
+
+ if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
+ user=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ perform_userdel "${STAGING_DIR_TARGET}" "$OPT $user"
+ fi
+
+ if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
+ group=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ perform_groupdel "${STAGING_DIR_TARGET}" "$OPT $group"
+ fi
+
+ fi
+fi
+}
+
+SSTATECLEANFUNCS = "userdel_sysroot_sstate"
+SSTATECLEANFUNCS_class-cross = ""
+SSTATECLEANFUNCS_class-native = ""
+SSTATECLEANFUNCS_class-nativesdk = ""
+
+do_install[prefuncs] += "${SYSROOTFUNC}"
+SYSROOTFUNC = "useradd_sysroot"
+SYSROOTFUNC_class-cross = ""
+SYSROOTFUNC_class-native = ""
+SYSROOTFUNC_class-nativesdk = ""
+SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}"
+SYSROOTPOSTFUNC = "useradd_sysroot_sstate"
+SYSROOTPOSTFUNC_class-cross = ""
+SYSROOTPOSTFUNC_class-native = ""
+SYSROOTPOSTFUNC_class-nativesdk = ""
+
+USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS_class-cross = ""
+USERADDSETSCENEDEPS_class-native = ""
+USERADDSETSCENEDEPS_class-nativesdk = ""
+do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
+do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
+
+# Recipe parse-time sanity checks
+def update_useradd_after_parse(d):
+ useradd_packages = d.getVar('USERADD_PACKAGES', True)
+
+ if not useradd_packages:
+ raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
+
+ for pkg in useradd_packages.split():
+ if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
+ bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
+
+python __anonymous() {
+ if not bb.data.inherits_class('nativesdk', d) \
+ and not bb.data.inherits_class('native', d):
+ update_useradd_after_parse(d)
+}
+
+# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
+# [group|user]add parameters for all USERADD_PACKAGES in this recipe
+def get_all_cmd_params(d, cmd_type):
+ import string
+
+ param_type = cmd_type.upper() + "_PARAM_%s"
+ params = []
+
+ useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ for pkg in useradd_packages.split():
+ param = d.getVar(param_type % pkg, True)
+ if param:
+ params.append(param)
+
+ return "; ".join(params)
+
+# Adds the preinst script into generated packages
+fakeroot python populate_packages_prepend () {
+ def update_useradd_package(pkg):
+ bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
+
+ """
+ useradd preinst is appended here because pkg_preinst may be
+ required to execute on the target. Not doing so may cause
+ useradd preinst to be invoked twice, causing unwanted warnings.
+ """
+ preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
+ if not preinst:
+ preinst = '#!/bin/sh\n'
+ preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
+ preinst += 'bbwarn () {\n\techo "WARNING: $*"\n}\n'
+ preinst += 'bbfatal () {\n\techo "ERROR: $*"\n\texit 1\n}\n'
+ preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True)
+ preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True)
+ preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True)
+ preinst += d.getVar('useradd_preinst', True)
+ d.setVar('pkg_preinst_%s' % pkg, preinst)
+
+ # RDEPENDS setup
+ rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
+ rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
+ rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
+ # base-files is where the default /etc/skel is packaged
+ rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
+ d.setVar("RDEPENDS_%s" % pkg, rdepends)
+
+ # Add the user/group preinstall scripts and RDEPENDS requirements
+ # to packages specified by USERADD_PACKAGES
+ if not bb.data.inherits_class('nativesdk', d) \
+ and not bb.data.inherits_class('native', d):
+ useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ for pkg in useradd_packages.split():
+ update_useradd_package(pkg)
+}
+
+# Use the following to extend the useradd with custom functions
+USERADDEXTENSION ?= ""
+
+inherit ${USERADDEXTENSION}
diff --git a/import-layers/yocto-poky/meta/classes/useradd_base.bbclass b/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
new file mode 100644
index 000000000..0d81accd1
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
@@ -0,0 +1,151 @@
+# This bbclass provides basic functionality for user/group settings.
+# This bbclass is intended to be inherited by useradd.bbclass and
+# extrausers.bbclass.
+
+# The following functions basically have similar logic.
+# *) Perform necessary checks before invoking the actual command
+# *) Invoke the actual command with flock
+# *) Error out if an error occurs.
+
+# Note that before invoking these functions, make sure the global variable
+# PSEUDO is set up correctly.
+
+perform_groupadd () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing groupadd with [$opts]"
+ local groupname=`echo "$opts" | awk '{ print $NF }'`
+ local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" = "x"; then
+ opts=`echo $opts | sed s/\'/\"/g`
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
+ group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" = "x"; then
+ bbfatal "${PN}: groupadd command did not succeed."
+ fi
+ else
+ bbnote "${PN}: group $groupname already exists, not re-creating it"
+ fi
+}
+
+perform_useradd () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing useradd with [$opts]"
+ local username=`echo "$opts" | awk '{ print $NF }'`
+ local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" = "x"; then
+ opts=`echo $opts | sed s/\'/\"/g`
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO useradd \$opts\" || true
+ user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" = "x"; then
+ bbfatal "${PN}: useradd command did not succeed."
+ fi
+ else
+ bbnote "${PN}: user $username already exists, not re-creating it"
+ fi
+}
+
+perform_groupmems () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing groupmems with [$opts]"
+ local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
+ local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
+ bbnote "${PN}: Running groupmems command with group $groupname and user $username"
+ # groupmems fails if /etc/gshadow does not exist
+ local gshadow=""
+ if [ -f $rootdir${sysconfdir}/gshadow ]; then
+ gshadow="yes"
+ else
+ gshadow="no"
+ touch $rootdir${sysconfdir}/gshadow
+ fi
+ local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+ if test "x$mem_exists" = "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
+ mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+ if test "x$mem_exists" = "x"; then
+ bbfatal "${PN}: groupmems command did not succeed."
+ fi
+ else
+ bbnote "${PN}: group $groupname already contains $username, not re-adding it"
+ fi
+ if test "x$gshadow" = "xno"; then
+ rm -f $rootdir${sysconfdir}/gshadow
+ rm -f $rootdir${sysconfdir}/gshadow-
+ fi
+}
+
+perform_groupdel () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing groupdel with [$opts]"
+ local groupname=`echo "$opts" | awk '{ print $NF }'`
+ local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
+ group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" != "x"; then
+ bbfatal "${PN}: groupdel command did not succeed."
+ fi
+ else
+ bbnote "${PN}: group $groupname doesn't exist, not removing it"
+ fi
+}
+
+perform_userdel () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing userdel with [$opts]"
+ local username=`echo "$opts" | awk '{ print $NF }'`
+ local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO userdel \$opts\" || true
+ user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" != "x"; then
+ bbfatal "${PN}: userdel command did not succeed."
+ fi
+ else
+ bbnote "${PN}: user $username doesn't exist, not removing it"
+ fi
+}
+
+perform_groupmod () {
+ # Other than the return value of groupmod, there's no simple way to judge whether the command
+ # succeeds, so we disable -e option temporarily
+ set +e
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing groupmod with [$opts]"
+ local groupname=`echo "$opts" | awk '{ print $NF }'`
+ local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmod \$opts\"
+ if test $? != 0; then
+ bbwarn "${PN}: groupmod command did not succeed."
+ fi
+ else
+ bbwarn "${PN}: group $groupname doesn't exist, unable to modify it"
+ fi
+ set -e
+}
+
+perform_usermod () {
+ # Same reason with groupmod, temporarily disable -e option
+ set +e
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing usermod with [$opts]"
+ local username=`echo "$opts" | awk '{ print $NF }'`
+ local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO usermod \$opts\"
+ if test $? != 0; then
+ bbfatal "${PN}: usermod command did not succeed."
+ fi
+ else
+ bbwarn "${PN}: user $username doesn't exist, unable to modify it"
+ fi
+ set -e
+}
diff --git a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
new file mode 100644
index 000000000..5bcfd0b72
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
@@ -0,0 +1,66 @@
+addtask listtasks
+do_listtasks[nostamp] = "1"
+python do_listtasks() {
+ taskdescs = {}
+ maxlen = 0
+ for e in d.keys():
+ if d.getVarFlag(e, 'task', True):
+ maxlen = max(maxlen, len(e))
+ if e.endswith('_setscene'):
+ desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc', True) or '')
+ else:
+ desc = d.getVarFlag(e, 'doc', True) or ''
+ taskdescs[e] = desc
+
+ tasks = sorted(taskdescs.keys())
+ for taskname in tasks:
+ bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
+}
+
+CLEANFUNCS ?= ""
+
+T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
+addtask clean
+do_clean[nostamp] = "1"
+python do_clean() {
+ """clear the build and temp directories"""
+ dir = d.expand("${WORKDIR}")
+ bb.note("Removing " + dir)
+ oe.path.remove(dir)
+
+ dir = "%s.*" % bb.data.expand(d.getVar('STAMP', False), d)
+ bb.note("Removing " + dir)
+ oe.path.remove(dir)
+
+ for f in (d.getVar('CLEANFUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+}
+
+addtask checkuri
+do_checkuri[nostamp] = "1"
+python do_checkuri() {
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ fetcher.checkstatus()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
+}
+
+addtask checkuriall after do_checkuri
+do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri"
+do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_checkuriall[nostamp] = "1"
+do_checkuriall() {
+ :
+}
+
+addtask fetchall after do_fetch
+do_fetchall[recrdeptask] = "do_fetchall do_fetch"
+do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}"
+do_fetchall() {
+ :
+}
diff --git a/import-layers/yocto-poky/meta/classes/utils.bbclass b/import-layers/yocto-poky/meta/classes/utils.bbclass
new file mode 100644
index 000000000..81b92cb5e
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/utils.bbclass
@@ -0,0 +1,380 @@
+# For compatibility
+def base_path_join(a, *p):
+ return oe.path.join(a, *p)
+
+def base_path_relative(src, dest):
+ return oe.path.relative(src, dest)
+
+def base_path_out(path, d):
+ return oe.path.format_display(path, d)
+
+def base_read_file(filename):
+ return oe.utils.read_file(filename)
+
+def base_ifelse(condition, iftrue = True, iffalse = False):
+ return oe.utils.ifelse(condition, iftrue, iffalse)
+
+def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
+ return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
+
+def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
+
+def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
+
+def base_contains(variable, checkvalues, truevalue, falsevalue, d):
+ return bb.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
+
+def base_both_contain(variable1, variable2, checkvalue, d):
+ return oe.utils.both_contain(variable1, variable2, checkvalue, d)
+
+def base_prune_suffix(var, suffixes, d):
+ return oe.utils.prune_suffix(var, suffixes, d)
+
+def oe_filter(f, str, d):
+ return oe.utils.str_filter(f, str, d)
+
+def oe_filter_out(f, str, d):
+ return oe.utils.str_filter_out(f, str, d)
+
+def machine_paths(d):
+ """List any existing machine specific filespath directories"""
+ machine = d.getVar("MACHINE", True)
+ filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
+ for basepath in d.getVar("FILESPATHBASE", True).split(":"):
+ for pkgpath in filespathpkg:
+ machinepath = os.path.join(basepath, pkgpath, machine)
+ if os.path.isdir(machinepath):
+ yield machinepath
+
+def is_machine_specific(d):
+ """Determine whether the current recipe is machine specific"""
+ machinepaths = set(machine_paths(d))
+ srcuri = d.getVar("SRC_URI", True).split()
+ for url in srcuri:
+ fetcher = bb.fetch2.Fetch([srcuri], d)
+ if url.startswith("file://"):
+ if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
+ return True
+
+oe_soinstall() {
+ # Purpose: Install shared library file and
+ # create the necessary links
+ # Example:
+ #
+ # oe_
+ #
+ #bbnote installing shared library $1 to $2
+ #
+ libname=`basename $1`
+ install -m 755 $1 $2/$libname
+ sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
+ ln -sf $libname $2/$sonamelink
+ ln -sf $libname $2/$solink
+}
+
+oe_libinstall() {
+ # Purpose: Install a library, in all its forms
+ # Example
+ #
+ # oe_libinstall libltdl ${STAGING_LIBDIR}/
+ # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
+ dir=""
+ libtool=""
+ silent=""
+ require_static=""
+ require_shared=""
+ staging_install=""
+ while [ "$#" -gt 0 ]; do
+ case "$1" in
+ -C)
+ shift
+ dir="$1"
+ ;;
+ -s)
+ silent=1
+ ;;
+ -a)
+ require_static=1
+ ;;
+ -so)
+ require_shared=1
+ ;;
+ -*)
+ bbfatal "oe_libinstall: unknown option: $1"
+ ;;
+ *)
+ break;
+ ;;
+ esac
+ shift
+ done
+
+ libname="$1"
+ shift
+ destpath="$1"
+ if [ -z "$destpath" ]; then
+ bbfatal "oe_libinstall: no destination path specified"
+ fi
+ if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
+ then
+ staging_install=1
+ fi
+
+ __runcmd () {
+ if [ -z "$silent" ]; then
+ echo >&2 "oe_libinstall: $*"
+ fi
+ $*
+ }
+
+ if [ -z "$dir" ]; then
+ dir=`pwd`
+ fi
+
+ dotlai=$libname.lai
+
+ # Sanity check that the libname.lai is unique
+ number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
+ if [ $number_of_files -gt 1 ]; then
+ bbfatal "oe_libinstall: $dotlai is not unique in $dir"
+ fi
+
+
+ dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
+ olddir=`pwd`
+ __runcmd cd $dir
+
+ lafile=$libname.la
+
+ # If such file doesn't exist, try to cut version suffix
+ if [ ! -f "$lafile" ]; then
+ libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
+ lafile1=$libname.la
+ if [ -f "$lafile1" ]; then
+ libname=$libname1
+ lafile=$lafile1
+ fi
+ fi
+
+ if [ -f "$lafile" ]; then
+ # libtool archive
+ eval `cat $lafile|grep "^library_names="`
+ libtool=1
+ else
+ library_names="$libname.so* $libname.dll.a $libname.*.dylib"
+ fi
+
+ __runcmd install -d $destpath/
+ dota=$libname.a
+ if [ -f "$dota" -o -n "$require_static" ]; then
+ rm -f $destpath/$dota
+ __runcmd install -m 0644 $dota $destpath/
+ fi
+ if [ -f "$dotlai" -a -n "$libtool" ]; then
+ rm -f $destpath/$libname.la
+ __runcmd install -m 0644 $dotlai $destpath/$libname.la
+ fi
+
+ for name in $library_names; do
+ files=`eval echo $name`
+ for f in $files; do
+ if [ ! -e "$f" ]; then
+ if [ -n "$libtool" ]; then
+ bbfatal "oe_libinstall: $dir/$f not found."
+ fi
+ elif [ -L "$f" ]; then
+ __runcmd cp -P "$f" $destpath/
+ elif [ ! -L "$f" ]; then
+ libfile="$f"
+ rm -f $destpath/$libfile
+ __runcmd install -m 0755 $libfile $destpath/
+ fi
+ done
+ done
+
+ if [ -z "$libfile" ]; then
+ if [ -n "$require_shared" ]; then
+ bbfatal "oe_libinstall: unable to locate shared library"
+ fi
+ elif [ -z "$libtool" ]; then
+ # special case hack for non-libtool .so.#.#.# links
+ baselibfile=`basename "$libfile"`
+ if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
+ sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
+ if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
+ __runcmd ln -sf $baselibfile $destpath/$sonamelink
+ fi
+ __runcmd ln -sf $baselibfile $destpath/$solink
+ fi
+ fi
+
+ __runcmd cd "$olddir"
+}
+
+oe_machinstall() {
+ # Purpose: Install machine dependent files, if available
+ # If not available, check if there is a default
+ # If no default, just touch the destination
+ # Example:
+ # $1 $2 $3 $4
+ # oe_machinstall -m 0644 fstab ${D}/etc/fstab
+ #
+ # TODO: Check argument number?
+ #
+ filename=`basename $3`
+ dirname=`dirname $3`
+
+ for o in `echo ${OVERRIDES} | tr ':' ' '`; do
+ if [ -e $dirname/$o/$filename ]; then
+ bbnote $dirname/$o/$filename present, installing to $4
+ install $1 $2 $dirname/$o/$filename $4
+ return
+ fi
+ done
+# bbnote overrides specific file NOT present, trying default=$3...
+ if [ -e $3 ]; then
+ bbnote $3 present, installing to $4
+ install $1 $2 $3 $4
+ else
+ bbnote $3 NOT present, touching empty $4
+ touch $4
+ fi
+}
+
+create_cmdline_wrapper () {
+ # Create a wrapper script where commandline options are needed
+ #
+ # These are useful to work around relocation issues, by passing extra options
+ # to a program
+ #
+ # Usage: create_cmdline_wrapper FILENAME <extra-options>
+
+ cmd=$1
+ shift
+
+ echo "Generating wrapper script for $cmd"
+
+ mv $cmd $cmd.real
+ cmdname=`basename $cmd`
+ cat <<END >$cmd
+#!/bin/bash
+realpath=\`readlink -fn \$0\`
+exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $@ "\$@"
+END
+ chmod +x $cmd
+}
+
+create_wrapper () {
+ # Create a wrapper script where extra environment variables are needed
+ #
+ # These are useful to work around relocation issues, by setting environment
+ # variables which point to paths in the filesystem.
+ #
+ # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
+
+ cmd=$1
+ shift
+
+ echo "Generating wrapper script for $cmd"
+
+ mv $cmd $cmd.real
+ cmdname=`basename $cmd`
+ cat <<END >$cmd
+#!/bin/bash
+realpath=\`readlink -fn \$0\`
+export $@
+exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
+END
+ chmod +x $cmd
+}
+
+# Copy files/directories from $1 to $2 but using hardlinks
+# (preserve symlinks)
+hardlinkdir () {
+ from=$1
+ to=$2
+ (cd $from; find . -print0 | cpio --null -pdlu $to)
+}
+
+
+def check_app_exists(app, d):
+ app = d.expand(app)
+ path = d.getVar('PATH', d, True)
+ return bool(bb.utils.which(path, app))
+
+def explode_deps(s):
+ return bb.utils.explode_deps(s)
+
+def base_set_filespath(path, d):
+ filespath = []
+ extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ # Remove default flag which was used for checking
+ extrapaths = extrapaths.replace("__default:", "")
+ # Don't prepend empty strings to the path list
+ if extrapaths != "":
+ path = extrapaths.split(":") + path
+ # The ":" ensures we have an 'empty' override
+ overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":")
+ overrides.reverse()
+ for o in overrides:
+ for p in path:
+ if p != "":
+ filespath.append(os.path.join(p, o))
+ return ":".join(filespath)
+
+def extend_variants(d, var, extend, delim=':'):
+ """Return a string of all bb class extend variants for the given extend"""
+ variants = []
+ whole = d.getVar(var, True) or ""
+ for ext in whole.split():
+ eext = ext.split(delim)
+ if len(eext) > 1 and eext[0] == extend:
+ variants.append(eext[1])
+ return " ".join(variants)
+
+def multilib_pkg_extend(d, pkg):
+ variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split()
+ if not variants:
+ return pkg
+ pkgs = pkg
+ for v in variants:
+ pkgs = pkgs + " " + v + "-" + pkg
+ return pkgs
+
+def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
+ """Return a string of all ${var} in all multilib tune configuration"""
+ values = []
+ value = d.getVar(var, True) or ""
+ if value != "":
+ if need_split:
+ for item in value.split(delim):
+ values.append(item)
+ else:
+ values.append(value)
+ variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ for item in variants.split():
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", item + "-")
+ bb.data.update_data(localdata)
+ value = localdata.getVar(var, True) or ""
+ if value != "":
+ if need_split:
+ for item in value.split(delim):
+ values.append(item)
+ else:
+ values.append(value)
+ if unique:
+ #we do this to keep order as much as possible
+ ret = []
+ for value in values:
+ if not value in ret:
+ ret.append(value)
+ else:
+ ret = values
+ return " ".join(ret)
diff --git a/import-layers/yocto-poky/meta/classes/vala.bbclass b/import-layers/yocto-poky/meta/classes/vala.bbclass
new file mode 100644
index 000000000..615eb379a
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/vala.bbclass
@@ -0,0 +1,24 @@
+# Everyone needs vala-native and targets need vala, too,
+# because that is where target builds look for .vapi files.
+#
+VALADEPENDS = ""
+VALADEPENDS_class-target = "vala"
+DEPENDS_append = " vala-native ${VALADEPENDS}"
+
+# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
+export STAGING_DATADIR
+# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
+export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+
+# Package additional files
+FILES_${PN}-dev += "\
+ ${datadir}/vala/vapi/*.vapi \
+ ${datadir}/vala/vapi/*.deps \
+ ${datadir}/gir-1.0 \
+"
+
+# Remove vapigen.m4 that is bundled with tarballs
+# because it does not yet have our cross-compile fixes
+do_configure_prepend() {
+ rm -f ${S}/m4/vapigen.m4
+}
diff --git a/import-layers/yocto-poky/meta/classes/waf.bbclass b/import-layers/yocto-poky/meta/classes/waf.bbclass
new file mode 100644
index 000000000..5e55833ca
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/waf.bbclass
@@ -0,0 +1,38 @@
+# avoids build breaks when using no-static-libs.inc
+DISABLE_STATIC = ""
+
+def get_waf_parallel_make(d):
+ pm = d.getVar('PARALLEL_MAKE', True)
+ if pm:
+ # look for '-j' and throw other options (e.g. '-l') away
+ # because they might have different meaning in bjam
+ pm = pm.split()
+ while pm:
+ v = None
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ v = None
+
+ if v:
+ v = min(64, int(v))
+ return '-j' + str(v)
+
+ return ""
+
+waf_do_configure() {
+ ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
+}
+
+waf_do_compile() {
+ ${S}/waf build ${@get_waf_parallel_make(d)}
+}
+
+waf_do_install() {
+ ${S}/waf install --destdir=${D}
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
OpenPOWER on IntegriCloud