summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/meta/classes')
-rw-r--r--import-layers/yocto-poky/meta/classes/allarch.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/archiver.bbclass169
-rw-r--r--import-layers/yocto-poky/meta/classes/autotools.bbclass145
-rw-r--r--import-layers/yocto-poky/meta/classes/base.bbclass155
-rw-r--r--import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/binconfig.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/blacklist.bbclass27
-rw-r--r--import-layers/yocto-poky/meta/classes/bugzilla.bbclass28
-rw-r--r--import-layers/yocto-poky/meta/classes/buildhistory.bbclass150
-rw-r--r--import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/buildstats.bbclass45
-rw-r--r--import-layers/yocto-poky/meta/classes/ccache.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/chrpath.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/cmake.bbclass28
-rw-r--r--import-layers/yocto-poky/meta/classes/cml1.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/compress_doc.bbclass42
-rw-r--r--import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass40
-rw-r--r--import-layers/yocto-poky/meta/classes/core-image.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/cpan-base.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/cross-canadian.bbclass44
-rw-r--r--import-layers/yocto-poky/meta/classes/cross.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/crosssdk.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/cve-check.bbclass59
-rw-r--r--import-layers/yocto-poky/meta/classes/debian.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/devshell.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/devupstream.bbclass48
-rw-r--r--import-layers/yocto-poky/meta/classes/distro_features_check.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/distrodata.bbclass152
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-base.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-tools.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3-base.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/externalsrc.bbclass79
-rw-r--r--import-layers/yocto-poky/meta/classes/extrausers.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/fontcache.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/fs-uuid.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/gconf.bbclass15
-rw-r--r--import-layers/yocto-poky/meta/classes/gettext.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/go.bbclass77
-rw-r--r--import-layers/yocto-poky/meta/classes/goarch.bbclass53
-rw-r--r--import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/grub-efi.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/gsettings.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-doc.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/gummiboot.bbclass121
-rw-r--r--import-layers/yocto-poky/meta/classes/gzipnative.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/icecc.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass21
-rw-r--r--import-layers/yocto-poky/meta/classes/image-container.bbclass21
-rw-r--r--import-layers/yocto-poky/meta/classes/image-live.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/image-vm.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/image.bbclass156
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types.bbclass131
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass23
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types_wic.bbclass117
-rw-r--r--import-layers/yocto-poky/meta/classes/insane.bbclass258
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-arch.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass69
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-grub.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass51
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass34
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel.bbclass120
-rw-r--r--import-layers/yocto-poky/meta/classes/kernelsrc.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-common.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-package.bbclass95
-rw-r--r--import-layers/yocto-poky/meta/classes/license.bbclass199
-rw-r--r--import-layers/yocto-poky/meta/classes/live-vm-common.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/manpages.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/metadata_scm.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/mime.bbclass15
-rw-r--r--import-layers/yocto-poky/meta/classes/mirrors.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/module.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib.bbclass39
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_global.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_header.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/native.bbclass32
-rw-r--r--import-layers/yocto-poky/meta/classes/nativesdk.bbclass22
-rw-r--r--import-layers/yocto-poky/meta/classes/npm.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/oelint.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/own-mirrors.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/package.bbclass350
-rw-r--r--import-layers/yocto-poky/meta/classes/package_deb.bbclass133
-rw-r--r--import-layers/yocto-poky/meta/classes/package_ipk.bbclass132
-rw-r--r--import-layers/yocto-poky/meta/classes/package_rpm.bbclass277
-rw-r--r--import-layers/yocto-poky/meta/classes/package_tar.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/packagedata.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass20
-rw-r--r--import-layers/yocto-poky/meta/classes/packagegroup.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/patch.bbclass157
-rw-r--r--import-layers/yocto-poky/meta/classes/perl-version.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/pixbufcache.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass187
-rw-r--r--import-layers/yocto-poky/meta/classes/prexport.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/ptest.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/qemu.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/qemuboot.bbclass79
-rw-r--r--import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass37
-rw-r--r--import-layers/yocto-poky/meta/classes/relative_symlinks.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/relocatable.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/report-error.bbclass22
-rw-r--r--import-layers/yocto-poky/meta/classes/rm_work.bbclass67
-rw-r--r--import-layers/yocto-poky/meta/classes/rm_work_and_downloads.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass92
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass28
-rw-r--r--import-layers/yocto-poky/meta/classes/sanity.bbclass199
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_ipk.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_rpm.bbclass19
-rw-r--r--import-layers/yocto-poky/meta/classes/siteconfig.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/siteinfo.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/spdx.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/sstate.bbclass380
-rw-r--r--import-layers/yocto-poky/meta/classes/staging.bbclass531
-rw-r--r--import-layers/yocto-poky/meta/classes/syslinux.bbclass35
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd-boot.bbclass25
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd.bbclass46
-rw-r--r--import-layers/yocto-poky/meta/classes/terminal.bbclass23
-rw-r--r--import-layers/yocto-poky/meta/classes/testexport.bbclass233
-rw-r--r--import-layers/yocto-poky/meta/classes/testimage.bbclass302
-rw-r--r--import-layers/yocto-poky/meta/classes/testsdk.bbclass198
-rw-r--r--import-layers/yocto-poky/meta/classes/texinfo.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/tinderclient.bbclass82
-rw-r--r--import-layers/yocto-poky/meta/classes/toaster.bbclass76
-rw-r--r--import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/typecheck.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-config.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-sign.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/uninative.bbclass40
-rw-r--r--import-layers/yocto-poky/meta/classes/update-alternatives.bbclass72
-rw-r--r--import-layers/yocto-poky/meta/classes/update-rc.d.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass122
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd.bbclass120
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd_base.bbclass18
-rw-r--r--import-layers/yocto-poky/meta/classes/utility-tasks.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/utils.bbclass70
-rw-r--r--import-layers/yocto-poky/meta/classes/waf.bbclass4
149 files changed, 4633 insertions, 3450 deletions
diff --git a/import-layers/yocto-poky/meta/classes/allarch.bbclass b/import-layers/yocto-poky/meta/classes/allarch.bbclass
index ddc2a8505..a7ce02464 100644
--- a/import-layers/yocto-poky/meta/classes/allarch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/allarch.bbclass
@@ -2,16 +2,12 @@
# This class is used for architecture independent recipes/data files (usually scripts)
#
-# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
-# point elsewhere after these changes.
-STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
-
PACKAGE_ARCH = "all"
python () {
# Allow this class to be included but overridden - only set
# the values if we're still "all" package arch.
- if d.getVar("PACKAGE_ARCH", True) == "all":
+ if d.getVar("PACKAGE_ARCH") == "all":
# No need for virtual/libc or a cross compiler
d.setVar("INHIBIT_DEFAULT_DEPS","1")
@@ -25,13 +21,16 @@ python () {
d.setVar("TARGET_AS_ARCH", "none")
d.setVar("TARGET_FPU", "")
d.setVar("TARGET_PREFIX", "")
- d.setVar("PACKAGE_EXTRA_ARCHS", "")
+ # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
+ # (this removes any dependencies from the hash perspective)
+ d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
d.setVar("SDK_ARCH", "none")
d.setVar("SDK_CC_ARCH", "none")
d.setVar("TARGET_CPPFLAGS", "none")
d.setVar("TARGET_CFLAGS", "none")
d.setVar("TARGET_CXXFLAGS", "none")
d.setVar("TARGET_LDFLAGS", "none")
+ d.setVar("POPULATESYSROOTDEPS", "")
# Avoid this being unnecessarily different due to nuances of
# the target machine that aren't important for "all" arch
@@ -47,6 +46,6 @@ python () {
d.setVarFlag("emit_pkgdata", "vardepsexclude", "MULTILIB_VARIANTS")
d.setVarFlag("write_specfile", "vardepsexclude", "MULTILIBS")
elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
- bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True))
+ bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
diff --git a/import-layers/yocto-poky/meta/classes/archiver.bbclass b/import-layers/yocto-poky/meta/classes/archiver.bbclass
index 188f8c042..18c5b9668 100644
--- a/import-layers/yocto-poky/meta/classes/archiver.bbclass
+++ b/import-layers/yocto-poky/meta/classes/archiver.bbclass
@@ -52,10 +52,10 @@ do_deploy_all_archives[dirs] = "${WORKDIR}"
python () {
- pn = d.getVar('PN', True)
- assume_provided = (d.getVar("ASSUME_PROVIDED", True) or "").split()
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
if pn in assume_provided:
- for p in d.getVar("PROVIDES", True).split():
+ for p in d.getVar("PROVIDES").split():
if p != pn:
pn = p
break
@@ -67,18 +67,29 @@ python () {
else:
bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return
+
# We just archive gcc-source for all the gcc related recipes
- if d.getVar('BPN', True) in ['gcc', 'libgcc'] \
+ if d.getVar('BPN') in ['gcc', 'libgcc'] \
and not pn.startswith('gcc-source'):
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
- ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
- ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
- ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
+ ar_src = d.getVarFlag('ARCHIVER_MODE', 'src')
+ ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata')
+ ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe')
if ar_src == "original":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
+ # 'patched' and 'configured' invoke do_unpack_and_patch because
+ # do_ar_patched resp. do_ar_configured depend on it, but for 'original'
+ # we have to add it explicitly.
+ if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
+ d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_unpack_and_patch' % pn)
elif ar_src == "patched":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
@@ -104,9 +115,9 @@ python () {
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
# Output the srpm package
- ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True)
+ ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm')
if ar_srpm == "1":
- if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm':
+ if d.getVar('PACKAGES') != '' and d.getVar('IMAGE_PKGTYPE') == 'rpm':
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
if ar_dumpdata == "1":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
@@ -127,12 +138,12 @@ python do_ar_original() {
import shutil, tempfile
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original":
+ if d.getVarFlag('ARCHIVER_MODE', 'src') != "original":
return
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
bb.note('Archiving the original source...')
- urls = d.getVar("SRC_URI", True).split()
+ urls = d.getVar("SRC_URI").split()
# destsuffix (git fetcher) and subdir (everything else) are allowed to be
# absolute paths (for example, destsuffix=${S}/foobar).
# That messes with unpacking inside our tmpdir below, because the fetchers
@@ -157,7 +168,7 @@ python do_ar_original() {
if os.path.isfile(local):
shutil.copy(local, ar_outdir)
elif os.path.isdir(local):
- tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
+ tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR'))
fetch.unpack(tmpdir, (url,))
# To handle recipes with more than one source, we add the "name"
# URL parameter as suffix. We treat it as an error when
@@ -166,12 +177,18 @@ python do_ar_original() {
# to be set when using the git fetcher, otherwise SRCREV cannot
# be set separately for each URL.
params = bb.fetch2.decodeurl(url)[5]
+ type = bb.fetch2.decodeurl(url)[0]
+ location = bb.fetch2.decodeurl(url)[2]
name = params.get('name', '')
- if name in tarball_suffix:
- if not name:
- bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
- else:
- bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
+ if type.lower() == 'file':
+ name_tmp = location.rstrip("*").rstrip("/")
+ name = os.path.basename(name_tmp)
+ else:
+ if name in tarball_suffix:
+ if not name:
+ bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
+ else:
+ bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
tarball_suffix[name] = url
create_tarball(d, tmpdir + '/.', name, ar_outdir)
@@ -191,28 +208,32 @@ python do_ar_original() {
python do_ar_patched() {
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched':
+ if d.getVarFlag('ARCHIVER_MODE', 'src') != 'patched':
return
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
- ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
bb.note('Archiving the patched source...')
d.setVar('WORKDIR', ar_workdir)
- create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
+ create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
}
python do_ar_configured() {
import shutil
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
+ # Forcibly expand the sysroot paths as we're about to change WORKDIR
+ d.setVar('RECIPE_SYSROOT', d.getVar('RECIPE_SYSROOT'))
+ d.setVar('RECIPE_SYSROOT_NATIVE', d.getVar('RECIPE_SYSROOT_NATIVE'))
+
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
+ if d.getVarFlag('ARCHIVER_MODE', 'src') == 'configured':
bb.note('Archiving the configured source...')
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
# "gcc-source-${PV}" recipes don't have "do_configure"
# task, so we need to run "do_preconfigure" instead
if pn.startswith("gcc-source-"):
- d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+ d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
bb.build.exec_func('do_preconfigure', d)
# The libtool-native's do_configure will remove the
@@ -221,26 +242,26 @@ python do_ar_configured() {
# instead of.
elif pn != 'libtool-native':
# Change the WORKDIR to make do_configure run in another dir.
- d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+ d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
if bb.data.inherits_class('kernel-yocto', d):
bb.build.exec_func('do_kernel_configme', d)
if bb.data.inherits_class('cmake', d):
bb.build.exec_func('do_generate_toolchain_file', d)
- prefuncs = d.getVarFlag('do_configure', 'prefuncs', True)
+ prefuncs = d.getVarFlag('do_configure', 'prefuncs')
for func in (prefuncs or '').split():
if func != "sysroot_cleansstate":
bb.build.exec_func(func, d)
bb.build.exec_func('do_configure', d)
- postfuncs = d.getVarFlag('do_configure', 'postfuncs', True)
+ postfuncs = d.getVarFlag('do_configure', 'postfuncs')
for func in (postfuncs or '').split():
if func != "do_qa_configure":
bb.build.exec_func(func, d)
- srcdir = d.getVar('S', True)
- builddir = d.getVar('B', True)
+ srcdir = d.getVar('S')
+ builddir = d.getVar('B')
if srcdir != builddir:
if os.path.exists(builddir):
oe.path.copytree(builddir, os.path.join(srcdir, \
- 'build.%s.ar_configured' % d.getVar('PF', True)))
+ 'build.%s.ar_configured' % d.getVar('PF')))
create_tarball(d, srcdir, 'configured', ar_outdir)
}
@@ -251,14 +272,14 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
import tarfile
# Make sure we are only creating a single tarball for gcc sources
- if (d.getVar('SRC_URI', True) == ""):
+ if (d.getVar('SRC_URI') == ""):
return
bb.utils.mkdirhier(ar_outdir)
if suffix:
- filename = '%s-%s.tar.gz' % (d.getVar('PF', True), suffix)
+ filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
else:
- filename = '%s.tar.gz' % d.getVar('PF', True)
+ filename = '%s.tar.gz' % d.getVar('PF')
tarname = os.path.join(ar_outdir, filename)
bb.note('Creating %s' % tarname)
@@ -279,57 +300,78 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
# exclude.
src_patched = src + '.patched'
oe.path.copyhardlinktree(src, src_patched)
- for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split():
+ for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude').split():
bb.utils.remove(os.path.join(src_orig, i), recurse=True)
bb.utils.remove(os.path.join(src_patched, i), recurse=True)
dirname = os.path.dirname(src)
basename = os.path.basename(src)
- os.chdir(dirname)
- out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True))
- diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
- subprocess.call(diff_cmd, shell=True)
- bb.utils.remove(src_patched, recurse=True)
+ bb.utils.mkdirhier(ar_outdir)
+ cwd = os.getcwd()
+ try:
+ os.chdir(dirname)
+ out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF'))
+ diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
+ subprocess.check_call(diff_cmd, shell=True)
+ bb.utils.remove(src_patched, recurse=True)
+ finally:
+ os.chdir(cwd)
# Run do_unpack and do_patch
python do_unpack_and_patch() {
- if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \
+ if d.getVarFlag('ARCHIVER_MODE', 'src') not in \
[ 'patched', 'configured'] and \
- d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
+ d.getVarFlag('ARCHIVER_MODE', 'diff') != '1':
return
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
- ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
- pn = d.getVar('PN', True)
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
# The kernel class functions require it to be on work-shared, so we dont change WORKDIR
if not (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source')):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', ar_sysroot_native)
# The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
# possibly requiring of the following tasks (such as some recipes's
# do_patch required 'B' existed).
- bb.utils.mkdirhier(d.getVar('B', True))
+ bb.utils.mkdirhier(d.getVar('B'))
bb.build.exec_func('do_unpack', d)
# Save the original source for creating the patches
- if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
- src = d.getVar('S', True).rstrip('/')
+ if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
+ src = d.getVar('S').rstrip('/')
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
# Make sure gcc and kernel sources are patched only once
- if not (d.getVar('SRC_URI', True) == "" or (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source'))):
+ if not (d.getVar('SRC_URI') == "" or (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source'))):
bb.build.exec_func('do_patch', d)
# Create the patches
- if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
+ if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
bb.note('Creating diff gz...')
create_diff_gz(d, src_orig, src, ar_outdir)
bb.utils.remove(src_orig, recurse=True)
}
+# BBINCLUDED is special (excluded from basehash signature
+# calculation). Using it in a task signature can cause "basehash
+# changed" errors.
+#
+# Depending on BBINCLUDED also causes do_ar_recipe to run again
+# for unrelated changes, like adding or removing buildhistory.bbclass.
+#
+# For these reasons we ignore the dependency completely. The versioning
+# of the output file ensures that we create it each time the recipe
+# gets rebuilt, at least as long as a PR server is used. We also rely
+# on that mechanism to catch changes in the file content, because the
+# file content is not part of of the task signature either.
+do_ar_recipe[vardepsexclude] += "BBINCLUDED"
python do_ar_recipe () {
"""
archive the recipe, including .bb and .inc.
@@ -339,14 +381,14 @@ python do_ar_recipe () {
require_re = re.compile( r"require\s+(.+)" )
include_re = re.compile( r"include\s+(.+)" )
- bbfile = d.getVar('FILE', True)
- outdir = os.path.join(d.getVar('WORKDIR', True), \
- '%s-recipe' % d.getVar('PF', True))
+ bbfile = d.getVar('FILE')
+ outdir = os.path.join(d.getVar('WORKDIR'), \
+ '%s-recipe' % d.getVar('PF'))
bb.utils.mkdirhier(outdir)
shutil.copy(bbfile, outdir)
- pn = d.getVar('PN', True)
- bbappend_files = d.getVar('BBINCLUDED', True).split()
+ pn = d.getVar('PN')
+ bbappend_files = d.getVar('BBINCLUDED').split()
# If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend
# Files like aa1.bbappend or aa1_1.1.bbappend must be excluded.
bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" % re.escape(pn))
@@ -356,7 +398,7 @@ python do_ar_recipe () {
shutil.copy(file, outdir)
dirname = os.path.dirname(bbfile)
- bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
+ bbpath = '%s:%s' % (dirname, d.getVar('BBPATH'))
f = open(bbfile, 'r')
for line in f.readlines():
incfile = None
@@ -365,12 +407,12 @@ python do_ar_recipe () {
elif include_re.match(line):
incfile = include_re.match(line).group(1)
if incfile:
- incfile = bb.data.expand(incfile, d)
+ incfile = d.expand(incfile)
incfile = bb.utils.which(bbpath, incfile)
if incfile:
shutil.copy(incfile, outdir)
- create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True))
+ create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR'))
bb.utils.remove(outdir, recurse=True)
}
@@ -379,8 +421,8 @@ python do_dumpdata () {
dump environment data to ${PF}-showdata.dump
"""
- dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
- '%s-showdata.dump' % d.getVar('PF', True))
+ dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR'), \
+ '%s-showdata.dump' % d.getVar('PF'))
bb.note('Dumping metadata into %s' % dumpfile)
with open(dumpfile, "w") as f:
# emit variables and shell functions
@@ -419,7 +461,10 @@ do_deploy_all_archives() {
}
python () {
- # Add tasks in the correct order, specifically for linux-yocto to avoid race condition
+ # Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
+ # sstatesig.py:sstate_rundepfilter has special support that excludes this dependency
+ # so that do_kernel_configme does not need to run again when do_unpack_and_patch
+ # gets added or removed (by adding or removing archiver.bbclass).
if bb.data.inherits_class('kernel-yocto', d):
bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
}
diff --git a/import-layers/yocto-poky/meta/classes/autotools.bbclass b/import-layers/yocto-poky/meta/classes/autotools.bbclass
index c43ea9a7e..ac04a07cb 100644
--- a/import-layers/yocto-poky/meta/classes/autotools.bbclass
+++ b/import-layers/yocto-poky/meta/classes/autotools.bbclass
@@ -1,8 +1,8 @@
def autotools_dep_prepend(d):
- if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
+ if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
return ''
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
deps = ''
if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
@@ -14,7 +14,7 @@ def autotools_dep_prepend(d):
if not bb.data.inherits_class('native', d) \
and not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('cross', d) \
- and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
+ and not d.getVar('INHIBIT_DEFAULT_DEPS'):
deps += 'libtool-cross '
return deps + 'gnu-config-native '
@@ -27,7 +27,7 @@ inherit siteinfo
# results for autoconf tests we cannot run at build time.
export CONFIG_SITE = "${@siteinfo_get_files(d)}"
-acpaths = "default"
+acpaths ?= "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
@@ -131,133 +131,18 @@ EXTRACONFFUNCS ??= ""
EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
-do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
+do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
do_configure[postfuncs] += "autotools_postconfigure"
-ACLOCALDIR = "${WORKDIR}/aclocal-copy"
-
-python autotools_copy_aclocals () {
- import copy
-
- s = d.getVar("AUTOTOOLS_SCRIPT_PATH", True)
- if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
- if not d.getVar("AUTOTOOLS_COPYACLOCAL", False):
- return
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- #bb.warn(str(taskdepdata))
- pn = d.getVar("PN", True)
- aclocaldir = d.getVar("ACLOCALDIR", True)
- oe.path.remove(aclocaldir)
- bb.utils.mkdirhier(aclocaldir)
- start = None
- configuredeps = []
- # Detect bitbake -b usage
- # Everything but quilt-native would have dependencies
- nodeps = (pn != "quilt-native")
-
- for dep in taskdepdata:
- data = taskdepdata[dep]
- if data[1] == "do_configure" and data[0] == pn:
- start = dep
- if not nodeps and start:
- break
- if nodeps and data[0] != pn:
- nodeps = False
- if start is None:
- bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
-
- # We need to figure out which m4 files we need to expose to this do_configure task.
- # This needs to match what would get restored from sstate, which is controlled
- # ultimately by calls from bitbake to setscene_depvalid().
- # That function expects a setscene dependency tree. We build a dependency tree
- # condensed to do_populate_sysroot -> do_populate_sysroot dependencies, similar to
- # that used by setscene tasks. We can then call into setscene_depvalid() and decide
- # which dependencies we can "see" and should expose the m4 files for.
- setscenedeps = copy.deepcopy(taskdepdata)
-
- start = set([start])
-
- # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
- for dep in taskdepdata:
- data = setscenedeps[dep]
- if data[1] != "do_populate_sysroot":
- for dep2 in setscenedeps:
- data2 = setscenedeps[dep2]
- if dep in data2[3]:
- data2[3].update(setscenedeps[dep][3])
- data2[3].remove(dep)
- if dep in start:
- start.update(setscenedeps[dep][3])
- start.remove(dep)
- del setscenedeps[dep]
-
- # Remove circular references
- for dep in setscenedeps:
- if dep in setscenedeps[dep][3]:
- setscenedeps[dep][3].remove(dep)
-
- # Direct dependencies should be present and can be depended upon
- for dep in start:
- configuredeps.append(setscenedeps[dep][0])
-
- # Call into setscene_depvalid for each sub-dependency and only copy m4 files
- # for ones that would be restored from sstate.
- done = list(start)
- next = list(start)
- while next:
- new = []
- for dep in next:
- data = setscenedeps[dep]
- for datadep in data[3]:
- if datadep in done:
- continue
- taskdeps = {}
- taskdeps[dep] = setscenedeps[dep][:2]
- taskdeps[datadep] = setscenedeps[datadep][:2]
- retval = setscene_depvalid(datadep, taskdeps, [], d)
- if retval:
- bb.note("Skipping setscene dependency %s for m4 macro copying" % datadep)
- continue
- done.append(datadep)
- new.append(datadep)
- configuredeps.append(setscenedeps[datadep][0])
- next = new
-
- cp = []
- if nodeps:
- bb.warn("autotools: Unable to find task dependencies, -b being used? Pulling in all m4 files")
- for l in [d.expand("${STAGING_DATADIR_NATIVE}/aclocal/"), d.expand("${STAGING_DATADIR}/aclocal/")]:
- cp.extend(os.path.join(l, f) for f in os.listdir(l))
-
- for c in configuredeps:
- if c.endswith("-native"):
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
- elif c.startswith("nativesdk-"):
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
- elif "-cross-" in c or "-crosssdk" in c:
- continue
- else:
- manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
- try:
- f = open(manifest, "r")
- for l in f:
- if "/aclocal/" in l and l.strip().endswith(".m4"):
- cp.append(l.strip())
- elif "config_site.d/" in l:
- cp.append(l.strip())
- except:
- bb.warn("%s not found" % manifest)
-
- for c in cp:
- t = os.path.join(aclocaldir, os.path.basename(c))
- if not os.path.exists(t):
- os.symlink(c, t)
+ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
+ACLOCALEXTRAPATH = ""
+ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+python autotools_aclocals () {
# Refresh variable with cache files
d.setVar("CONFIG_SITE", siteinfo_get_files(d, aclocalcache=True))
}
-autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
@@ -279,6 +164,7 @@ autotools_do_configure() {
if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
olddir=`pwd`
cd ${AUTOTOOLS_SCRIPT_PATH}
+ mkdir -p ${ACLOCALDIR}
ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
if [ x"${acpaths}" = xdefault ]; then
acpaths=
@@ -289,6 +175,7 @@ autotools_do_configure() {
else
acpaths="${acpaths}"
fi
+ acpaths="$acpaths ${ACLOCALEXTRAPATH}"
AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
automake --version
echo "AUTOV is $AUTOV"
@@ -306,14 +193,14 @@ autotools_do_configure() {
else
CONFIGURE_AC=configure.ac
fi
- if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
- if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
+ if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
+ if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
: do nothing -- we still have an old unmodified configure.ac
else
bbnote Executing glib-gettextize --force --copy
echo "no" | glib-gettextize --force --copy
fi
- elif grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ elif grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
# We'd call gettextize here if it wasn't so broken...
cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
if [ -d ${S}/po/ ]; then
@@ -325,7 +212,7 @@ autotools_do_configure() {
PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
fi
mkdir -p m4
- if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
+ if grep -q "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC; then
if ! echo "${DEPENDS}" | grep -q intltool-native; then
bbwarn "Missing DEPENDS on intltool-native"
fi
diff --git a/import-layers/yocto-poky/meta/classes/base.bbclass b/import-layers/yocto-poky/meta/classes/base.bbclass
index 024fe4331..d95afb7b9 100644
--- a/import-layers/yocto-poky/meta/classes/base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/base.bbclass
@@ -10,13 +10,13 @@ inherit utility-tasks
inherit metadata_scm
inherit logging
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license"
OE_IMPORTS[type] = "list"
def oe_import(d):
import sys
- bbpath = d.getVar("BBPATH", True).split(":")
+ bbpath = d.getVar("BBPATH").split(":")
sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
def inject(name, value):
@@ -37,7 +37,7 @@ def oe_import(d):
OE_IMPORTED := "${@oe_import(d)}"
def lsb_distro_identifier(d):
- adjust = d.getVar('LSB_DISTRO_ADJUST', True)
+ adjust = d.getVar('LSB_DISTRO_ADJUST')
adjust_func = None
if adjust:
try:
@@ -72,7 +72,7 @@ def base_dep_prepend(d):
# we need that built is the responsibility of the patch function / class, not
# the application.
if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
- if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
+ if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
return deps
@@ -83,11 +83,11 @@ DEPENDS_prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
# THISDIR only works properly with imediate expansion as it has to run
# in the context of the location its used (:=)
-THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
+THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
def extra_path_elements(d):
path = ""
- elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
+ elements = (d.getVar('EXTRANATIVEPATH') or "").split()
for e in elements:
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
return path
@@ -96,8 +96,11 @@ PATH_prepend = "${@extra_path_elements(d)}"
def get_lic_checksum_file_list(d):
filelist = []
- lic_files = d.getVar("LIC_FILES_CHKSUM", True) or ''
- tmpdir = d.getVar("TMPDIR", True)
+ lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
+ tmpdir = d.getVar("TMPDIR")
+ s = d.getVar("S")
+ b = d.getVar("B")
+ workdir = d.getVar("WORKDIR")
urls = lic_files.split()
for url in urls:
@@ -109,13 +112,32 @@ def get_lic_checksum_file_list(d):
raise bb.fetch.MalformedUrl(url)
if path[0] == '/':
- if path.startswith(tmpdir):
+ if path.startswith((tmpdir, s, b, workdir)):
continue
filelist.append(path + ":" + str(os.path.exists(path)))
except bb.fetch.MalformedUrl:
- bb.fatal(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+ bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
return " ".join(filelist)
+def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
+ tools = d.getVar(toolsvar).split()
+ origbbenv = d.getVar("BB_ORIGENV", False)
+ path = origbbenv.getVar("PATH")
+ bb.utils.mkdirhier(dest)
+ notfound = []
+ for tool in tools:
+ desttool = os.path.join(dest, tool)
+ if not os.path.exists(desttool):
+ srctool = bb.utils.which(path, tool, executable=True)
+ if "ccache" in srctool:
+ srctool = bb.utils.which(path, tool, executable=True, direction=1)
+ if srctool:
+ os.symlink(srctool, desttool)
+ else:
+ notfound.append(tool)
+ if notfound and fatal:
+ bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
+
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
@@ -123,7 +145,7 @@ do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
do_fetch[vardeps] += "SRCREV"
python base_do_fetch() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
@@ -138,31 +160,31 @@ addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
python () {
- if d.getVar('S', True) != d.getVar('WORKDIR', True):
+ if d.getVar('S') != d.getVar('WORKDIR'):
d.setVarFlag('do_unpack', 'cleandirs', '${S}')
else:
d.setVarFlag('do_unpack', 'cleandirs', os.path.join('${S}', 'patches'))
}
python base_do_unpack() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.unpack(d.getVar('WORKDIR', True))
+ fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
bb.fatal(str(e))
}
def pkgarch_mapping(d):
# Compatibility mappings of TUNE_PKGARCH (opt in)
- if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
- if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
+ if d.getVar("PKGARCHCOMPAT_ARMV7A"):
+ if d.getVar("TUNE_PKGARCH") == "armv7a-vfp-neon":
d.setVar("TUNE_PKGARCH", "armv7a")
def get_layers_branch_rev(d):
- layers = (d.getVar("BBLAYERS", True) or "").split()
+ layers = (d.getVar("BBLAYERS") or "").split()
layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None)) \
@@ -189,7 +211,7 @@ BUILDCFG_FUNCS[type] = "list"
def buildcfg_vars(d):
statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
for var in statusvars:
- value = d.getVar(var, True)
+ value = d.getVar(var)
if value is not None:
yield '%-17s = "%s"' % (var, value)
@@ -197,7 +219,7 @@ def buildcfg_neededvars(d):
needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
pesteruser = []
for v in needed_vars:
- val = d.getVar(v, True)
+ val = d.getVar(v)
if not val or val == 'INVALID':
pesteruser.append(v)
@@ -216,10 +238,12 @@ python base_eventhandler() {
pkgarch_mapping(e.data)
oe.utils.features_backfill("DISTRO_FEATURES", e.data)
oe.utils.features_backfill("MACHINE_FEATURES", e.data)
+ # Works with the line in layer.conf which changes PATH to point here
+ setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
+ setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
if isinstance(e, bb.event.BuildStarted):
localdata = bb.data.createCopy(e.data)
- bb.data.update_data(localdata)
statuslines = []
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
g = globals()
@@ -230,7 +254,7 @@ python base_eventhandler() {
if flines:
statuslines.extend(flines)
- statusheader = e.data.getVar('BUILDCFG_HEADER', True)
+ statusheader = e.data.getVar('BUILDCFG_HEADER')
if statusheader:
bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
@@ -238,7 +262,7 @@ python base_eventhandler() {
# target ones and we'd see dulpicate key names overwriting each other
# for various PREFERRED_PROVIDERS
if isinstance(e, bb.event.RecipePreFinalise):
- if e.data.getVar("TARGET_PREFIX", True) == e.data.getVar("SDK_PREFIX", True):
+ if e.data.getVar("TARGET_PREFIX") == e.data.getVar("SDK_PREFIX"):
e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
@@ -264,14 +288,14 @@ python base_eventhandler() {
# sysroot since they're now "unreachable". This makes switching virtual/kernel work in
# particular.
#
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
- provs = (d.getVar("PROVIDES", True) or "").split()
- multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ provs = (d.getVar("PROVIDES") or "").split()
+ multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
for p in provs:
if p.startswith("virtual/") and p not in multiwhitelist:
- profprov = d.getVar("PREFERRED_PROVIDER_" + p, True)
+ profprov = d.getVar("PREFERRED_PROVIDER_" + p)
if profprov and pn != profprov:
raise bb.parse.SkipPackage("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
}
@@ -281,7 +305,7 @@ CLEANBROKEN = "0"
addtask configure after do_patch
do_configure[dirs] = "${B}"
-do_configure[deptask] = "do_populate_sysroot"
+do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
base_do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
@@ -333,9 +357,9 @@ def set_packagetriplet(d):
tos = []
tvs = []
- archs.append(d.getVar("PACKAGE_ARCHS", True).split())
- tos.append(d.getVar("TARGET_OS", True))
- tvs.append(d.getVar("TARGET_VENDOR", True))
+ archs.append(d.getVar("PACKAGE_ARCHS").split())
+ tos.append(d.getVar("TARGET_OS"))
+ tvs.append(d.getVar("TARGET_VENDOR"))
def settriplet(d, varname, archs, tos, tvs):
triplets = []
@@ -347,16 +371,15 @@ def set_packagetriplet(d):
settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ variants = d.getVar("MULTILIB_VARIANTS") or ""
for item in variants.split():
localdata = bb.data.createCopy(d)
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
localdata.setVar("OVERRIDES", overrides)
- bb.data.update_data(localdata)
- archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
- tos.append(localdata.getVar("TARGET_OS", True))
- tvs.append(localdata.getVar("TARGET_VENDOR", True))
+ archs.append(localdata.getVar("PACKAGE_ARCHS").split())
+ tos.append(localdata.getVar("TARGET_OS"))
+ tvs.append(localdata.getVar("TARGET_VENDOR"))
settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
@@ -371,10 +394,10 @@ python () {
# PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
if pkgconfigflags:
- pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
- pn = d.getVar("PN", True)
+ pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
+ pn = d.getVar("PN")
- mlprefix = d.getVar("MLPREFIX", True)
+ mlprefix = d.getVar("MLPREFIX")
def expandFilter(appends, extension, prefix):
appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
@@ -416,7 +439,7 @@ python () {
num = len(items)
if num > 4:
bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend can be specified!"
- % (d.getVar('PN', True), flag))
+ % (d.getVar('PN'), flag))
if flag in pkgconfig:
if num >= 3 and items[2]:
@@ -431,8 +454,8 @@ python () {
appendVar('RDEPENDS_${PN}', extrardeps)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
- pn = d.getVar('PN', True)
- license = d.getVar('LICENSE', True)
+ pn = d.getVar('PN')
+ license = d.getVar('LICENSE')
if license == "INVALID":
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
@@ -462,26 +485,26 @@ python () {
d.setVarFlag('do_devshell', 'fakeroot', '1')
d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- need_machine = d.getVar('COMPATIBLE_MACHINE', True)
+ need_machine = d.getVar('COMPATIBLE_MACHINE')
if need_machine:
import re
- compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
+ compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
for m in compat_machines:
if re.match(need_machine, m):
break
else:
- raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
+ raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
- need_host = d.getVar('COMPATIBLE_HOST', True)
+ need_host = d.getVar('COMPATIBLE_HOST')
if need_host:
import re
- this_host = d.getVar('HOST_SYS', True)
+ this_host = d.getVar('HOST_SYS')
if not re.match(need_host, this_host):
raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
- bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
check_license = False if pn.startswith("nativesdk-") else True
for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
@@ -500,21 +523,21 @@ python () {
for lic in bad_licenses:
spdx_license = return_spdx(d, lic)
for w in ["LGPLv2_WHITELIST_", "WHITELIST_"]:
- whitelist.extend((d.getVar(w + lic, True) or "").split())
+ whitelist.extend((d.getVar(w + lic) or "").split())
if spdx_license:
- whitelist.extend((d.getVar(w + spdx_license, True) or "").split())
+ whitelist.extend((d.getVar(w + spdx_license) or "").split())
'''
We need to track what we are whitelisting and why. If pn is
incompatible we need to be able to note that the image that
is created may infact contain incompatible licenses despite
INCOMPATIBLE_LICENSE being set.
'''
- incompatwl.extend((d.getVar(w + lic, True) or "").split())
+ incompatwl.extend((d.getVar(w + lic) or "").split())
if spdx_license:
- incompatwl.extend((d.getVar(w + spdx_license, True) or "").split())
+ incompatwl.extend((d.getVar(w + spdx_license) or "").split())
if not pn in whitelist:
- pkgs = d.getVar('PACKAGES', True).split()
+ pkgs = d.getVar('PACKAGES').split()
skipped_pkgs = []
unskipped_pkgs = []
for pkg in pkgs:
@@ -526,13 +549,13 @@ python () {
if unskipped_pkgs:
for pkg in skipped_pkgs:
bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + license)
- mlprefix = d.getVar('MLPREFIX', True)
+ mlprefix = d.getVar('MLPREFIX')
d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
for pkg in unskipped_pkgs:
bb.debug(1, "INCLUDING the package " + pkg)
elif all_skipped or incompatible_license(d, bad_licenses):
bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
- raise bb.parse.SkipPackage("incompatible with license %s" % license)
+ raise bb.parse.SkipPackage("it has an incompatible license: %s" % license)
elif pn in whitelist:
if pn in incompatwl:
bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
@@ -542,8 +565,8 @@ python () {
# matching of license expressions - just check that all license strings
# in LICENSE_<pkg> are found in LICENSE.
license_set = oe.license.list_licenses(license)
- for pkg in d.getVar('PACKAGES', True).split():
- pkg_license = d.getVar('LICENSE_' + pkg, True)
+ for pkg in d.getVar('PACKAGES').split():
+ pkg_license = d.getVar('LICENSE_' + pkg)
if pkg_license:
unlisted = oe.license.list_licenses(pkg_license) - license_set
if unlisted:
@@ -551,7 +574,7 @@ python () {
"listed in LICENSE" % (pkg, ' '.join(unlisted)))
needsrcrev = False
- srcuri = d.getVar('SRC_URI', True)
+ srcuri = d.getVar('SRC_URI')
for uri in srcuri.split():
(scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
@@ -611,8 +634,8 @@ python () {
set_packagetriplet(d)
# 'multimachine' handling
- mach_arch = d.getVar('MACHINE_ARCH', True)
- pkg_arch = d.getVar('PACKAGE_ARCH', True)
+ mach_arch = d.getVar('MACHINE_ARCH')
+ pkg_arch = d.getVar('PACKAGE_ARCH')
if (pkg_arch == mach_arch):
# Already machine specific - nothing further to do
@@ -622,11 +645,11 @@ python () {
# We always try to scan SRC_URI for urls with machine overrides
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
#
- override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
+ override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
if override != '0':
paths = []
- fpaths = (d.getVar('FILESPATH', True) or '').split(':')
- machine = d.getVar('MACHINE', True)
+ fpaths = (d.getVar('FILESPATH') or '').split(':')
+ machine = d.getVar('MACHINE')
for p in fpaths:
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
@@ -643,16 +666,16 @@ python () {
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
return
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
for pkg in packages:
- pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
+ pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
# We could look for != PACKAGE_ARCH here but how to choose
# if multiple differences are present?
# Look through PACKAGE_ARCHS for the priority order?
if pkgarch and pkgarch == mach_arch:
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
- bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
+ bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
}
addtask cleansstate after do_clean
@@ -663,7 +686,7 @@ addtask cleanall after do_cleansstate
do_cleansstate[nostamp] = "1"
python do_cleanall() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
diff --git a/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass b/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass
index 602a669aa..096b670e1 100644
--- a/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass
+++ b/import-layers/yocto-poky/meta/classes/binconfig-disabled.bbclass
@@ -15,6 +15,7 @@ do_install_append () {
echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
echo "echo '--should-not-have-used-$x'" >> ${D}$x
echo "exit 1" >> ${D}$x
+ chmod +x ${D}$x
done
}
diff --git a/import-layers/yocto-poky/meta/classes/binconfig.bbclass b/import-layers/yocto-poky/meta/classes/binconfig.bbclass
index cbc417360..39c3e2b17 100644
--- a/import-layers/yocto-poky/meta/classes/binconfig.bbclass
+++ b/import-layers/yocto-poky/meta/classes/binconfig.bbclass
@@ -13,16 +13,16 @@ def get_binconfig_mangle(d):
s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
+ s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
+ s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
- s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
- s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
- if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
- s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
+ if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
+ s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
return s
diff --git a/import-layers/yocto-poky/meta/classes/blacklist.bbclass b/import-layers/yocto-poky/meta/classes/blacklist.bbclass
index a0141a82c..e58564c34 100644
--- a/import-layers/yocto-poky/meta/classes/blacklist.bbclass
+++ b/import-layers/yocto-poky/meta/classes/blacklist.bbclass
@@ -12,33 +12,8 @@
# PNBLACKLIST[pn] = "message"
#
-# Cope with PNBLACKLIST flags for multilib case
-addhandler blacklist_multilib_eventhandler
-blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
-python blacklist_multilib_eventhandler() {
- multilibs = e.data.getVar('MULTILIBS', True)
- if not multilibs:
- return
-
- # this block has been copied from base.bbclass so keep it in sync
- prefixes = []
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib':
- prefixes.append(eext[1])
-
- blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
- for pkg, reason in blacklists.items():
- if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
- continue
- for p in prefixes:
- newpkg = p + "-" + pkg
- if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
- e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
-}
-
python () {
- blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
+ blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
if blacklist:
raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
diff --git a/import-layers/yocto-poky/meta/classes/bugzilla.bbclass b/import-layers/yocto-poky/meta/classes/bugzilla.bbclass
index 3fc895642..8909c2734 100644
--- a/import-layers/yocto-poky/meta/classes/bugzilla.bbclass
+++ b/import-layers/yocto-poky/meta/classes/bugzilla.bbclass
@@ -110,14 +110,14 @@ python bugzilla_eventhandler() {
return
if name == "TaskFailed":
- xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
- user = data.getVar("BUGZILLA_USER", True)
- passw = data.getVar("BUGZILLA_PASS", True)
- product = data.getVar("BUGZILLA_PRODUCT", True)
- compon = data.getVar("BUGZILLA_COMPONENT", True)
- version = data.getVar("BUGZILLA_VERSION", True)
-
- proxy = data.getVar('http_proxy', True )
+ xmlrpc = data.getVar("BUGZILLA_XMLRPC")
+ user = data.getVar("BUGZILLA_USER")
+ passw = data.getVar("BUGZILLA_PASS")
+ product = data.getVar("BUGZILLA_PRODUCT")
+ compon = data.getVar("BUGZILLA_COMPONENT")
+ version = data.getVar("BUGZILLA_VERSION")
+
+ proxy = data.getVar('http_proxy')
if (proxy):
import urllib2
s, u, p, hostport = urllib2._parse_proxy(proxy)
@@ -133,14 +133,14 @@ python bugzilla_eventhandler() {
'component': compon}
# evil hack to figure out what is going on
- debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
+ debug_file = open(os.path.join(data.getVar("TMPDIR"),"..","bugzilla-log"),"a")
file = None
- bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
- "pv" : data.getVar("PV", True),
+ bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN"),
+ "pv" : data.getVar("PV"),
}
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
- text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
+ log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T'), event.task))
+ text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN"), data.getVar('DATETIME'), data.getVar('MACHINE') )
if len(log_file) != 0:
print >> debug_file, "Adding log file %s" % log_file[0]
file = open(log_file[0], 'r')
@@ -168,7 +168,7 @@ python bugzilla_eventhandler() {
if bug_number and log:
print >> debug_file, "The bug is known as '%s'" % bug_number
- desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
+ desc = "Build log for machine %s" % (data.getVar('MACHINE'))
if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
else:
diff --git a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
index 3a5bc2c3e..3823c664a 100644
--- a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
@@ -47,6 +47,11 @@ sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
+# Similarly for our function that gets the output signatures
+SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs"
+sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
+SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
+
# All items excepts those listed here will be removed from a recipe's
# build history directory by buildhistory_emit_pkghistory(). This is
# necessary because some of these items (package directories, files that
@@ -64,18 +69,18 @@ PATCH_GIT_USER_NAME ?= "OpenEmbedded"
# Write out metadata about this package for comparison when writing future packages
#
python buildhistory_emit_pkghistory() {
- if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
+ if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
return 0
- if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
+ if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
return 0
import re
import json
import errno
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
- oldpkghistdir = d.getVar('BUILDHISTORY_OLD_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
+ oldpkghistdir = d.getVar('BUILDHISTORY_OLD_DIR_PACKAGE')
class RecipeInfo:
def __init__(self, name):
@@ -86,6 +91,7 @@ python buildhistory_emit_pkghistory() {
self.depends = ""
self.packages = ""
self.srcrev = ""
+ self.layer = ""
class PackageInfo:
@@ -182,12 +188,13 @@ python buildhistory_emit_pkghistory() {
items.sort()
return ' '.join(items)
- pn = d.getVar('PN', True)
- pe = d.getVar('PE', True) or "0"
- pv = d.getVar('PV', True)
- pr = d.getVar('PR', True)
+ pn = d.getVar('PN')
+ pe = d.getVar('PE') or "0"
+ pv = d.getVar('PV')
+ pr = d.getVar('PR')
+ layer = bb.utils.get_file_layer(d.getVar('FILE', True), d)
- pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
packages = ""
try:
with open(os.path.join(pkgdata_dir, pn)) as f:
@@ -203,7 +210,7 @@ python buildhistory_emit_pkghistory() {
raise
packagelist = packages.split()
- preserve = d.getVar('BUILDHISTORY_PRESERVE', True).split()
+ preserve = d.getVar('BUILDHISTORY_PRESERVE').split()
if not os.path.exists(pkghistdir):
bb.utils.mkdirhier(pkghistdir)
else:
@@ -223,11 +230,12 @@ python buildhistory_emit_pkghistory() {
rcpinfo.pe = pe
rcpinfo.pv = pv
rcpinfo.pr = pr
- rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS', True) or ""))
+ rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
rcpinfo.packages = packages
+ rcpinfo.layer = layer
write_recipehistory(rcpinfo, d)
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
for pkg in packagelist:
pkgdata = {}
with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
@@ -289,11 +297,46 @@ python buildhistory_emit_pkghistory() {
bb.build.exec_func("buildhistory_list_pkg_files", d)
}
+python buildhistory_emit_outputsigs() {
+ if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
+ return
+
+ import hashlib
+
+ taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task', 'output')
+ bb.utils.mkdirhier(taskoutdir)
+ currenttask = d.getVar('BB_CURRENTTASK')
+ pn = d.getVar('PN')
+ taskfile = os.path.join(taskoutdir, '%s.%s' % (pn, currenttask))
+
+ cwd = os.getcwd()
+ filesigs = {}
+ for root, _, files in os.walk(cwd):
+ for fname in files:
+ if fname == 'fixmepath':
+ continue
+ fullpath = os.path.join(root, fname)
+ try:
+ if os.path.islink(fullpath):
+ sha256 = hashlib.sha256(os.readlink(fullpath).encode('utf-8')).hexdigest()
+ elif os.path.isfile(fullpath):
+ sha256 = bb.utils.sha256_file(fullpath)
+ else:
+ continue
+ except OSError:
+ bb.warn('buildhistory: unable to read %s to get output signature' % fullpath)
+ continue
+ filesigs[os.path.relpath(fullpath, cwd)] = sha256
+ with open(taskfile, 'w') as f:
+ for fpath, fsig in sorted(filesigs.items(), key=lambda item: item[0]):
+ f.write('%s %s\n' % (fpath, fsig))
+}
+
def write_recipehistory(rcpinfo, d):
bb.debug(2, "Writing recipe history")
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
infofile = os.path.join(pkghistdir, "latest")
with open(infofile, "w") as f:
@@ -303,12 +346,13 @@ def write_recipehistory(rcpinfo, d):
f.write(u"PR = %s\n" % rcpinfo.pr)
f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
+ f.write(u"LAYER = %s\n" % rcpinfo.layer)
def write_pkghistory(pkginfo, d):
bb.debug(2, "Writing package history for package %s" % pkginfo.name)
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
pkgpath = os.path.join(pkghistdir, pkginfo.name)
if not os.path.exists(pkgpath):
@@ -369,7 +413,7 @@ def buildhistory_list_installed(d, rootfs_type="image"):
pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
for output_type, output_file in process_list:
- output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
+ output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
with open(output_file_full, 'w') as output:
output.write(format_pkg_list(pkgs, output_type))
@@ -402,19 +446,26 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp && \
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
rm ${WORKDIR}/bh_installed_pkgs_deps.txt
- # Change delimiter from pipe to -> and set style for recommend lines
- sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
+ # Remove lines with rpmlib(...) and config(...) dependencies, change the
+ # delimiter from pipe to "->", set the style for recommend lines and
+ # turn versioned dependencies into edge labels.
+ sed -i -e '/rpmlib(/d' \
+ -e '/config(/d' \
+ -e 's:|: -> :' \
+ -e 's:"\[REC\]":[style=dotted]:' \
+ -e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
+ $1/depends.tmp
# Add header, sorted and de-duped contents and footer and then delete the temp file
printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
- cat $1/depends.tmp | sort | uniq >> $1/depends.dot
+ cat $1/depends.tmp | sort -u >> $1/depends.dot
echo "}" >> $1/depends.dot
rm $1/depends.tmp
# Produce installed package sizes list
oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
- cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB " $1}' | sort -n -r > $1/installed-package-sizes.txt
+ cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
rm $1/installed-package-sizes.tmp
# We're now done with the cache, delete it
@@ -550,7 +601,9 @@ END
python buildhistory_get_extra_sdkinfo() {
import operator
import math
- if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+
+ if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext' and \
+ "sdk" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
tasksizes = {}
filesizes = {}
for root, _, files in os.walk(d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')):
@@ -573,10 +626,14 @@ python buildhistory_get_extra_sdkinfo() {
# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
# unneeded packages but before the removal of packaging files
-ROOTFS_POSTUNINSTALL_COMMAND += " buildhistory_list_installed_image ;\
- buildhistory_get_image_installed ; "
+ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image ;"
+ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed ;"
+ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image ;| buildhistory_get_image_installed ;"
+ROOTFS_POSTUNINSTALL_COMMAND[vardepsexclude] += "buildhistory_list_installed_image buildhistory_get_image_installed"
-IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
+IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo ;"
+IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;"
+IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
# We want these to be the last run so that we get called after complementary package installation
POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
@@ -590,11 +647,21 @@ POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_insta
SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+python buildhistory_write_sigs() {
+ if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
+ return
+
+ # Create sigs file
+ if hasattr(bb.parse.siggen, 'dump_siglist'):
+ taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
+ bb.utils.mkdirhier(taskoutdir)
+ bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'))
+}
+
def buildhistory_get_build_id(d):
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
statuslines = []
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
g = globals()
@@ -605,12 +672,12 @@ def buildhistory_get_build_id(d):
if flines:
statuslines.extend(flines)
- statusheader = d.getVar('BUILDCFG_HEADER', True)
+ statusheader = d.getVar('BUILDCFG_HEADER')
return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
def buildhistory_get_metadata_revs(d):
# We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
- layers = (d.getVar("BBLAYERS", True) or "").split()
+ layers = (d.getVar("BBLAYERS") or "").split()
medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None)) \
@@ -622,7 +689,7 @@ def outputvars(vars, listvars, d):
listvars = listvars.split()
ret = ""
for var in vars:
- value = d.getVar(var, True) or ""
+ value = d.getVar(var) or ""
if var in listvars:
# Squash out spaces
value = oe.utils.squashspaces(value)
@@ -630,17 +697,17 @@ def outputvars(vars, listvars, d):
return ret.rstrip('\n')
def buildhistory_get_imagevars(d):
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
return outputvars(imagevars, listvars, d)
def buildhistory_get_sdkvars(d):
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
- if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+ if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
@@ -735,16 +802,16 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
- reset = e.data.getVar("BUILDHISTORY_RESET", True)
- olddir = e.data.getVar("BUILDHISTORY_OLD_DIR", True)
+ if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ reset = e.data.getVar("BUILDHISTORY_RESET")
+ olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
if reset:
import shutil
# Clean up after potentially interrupted build.
if os.path.isdir(olddir):
shutil.rmtree(olddir)
- rootdir = e.data.getVar("BUILDHISTORY_DIR", True)
+ rootdir = e.data.getVar("BUILDHISTORY_DIR")
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
@@ -754,8 +821,9 @@ python buildhistory_eventhandler() {
if reset:
import shutil
shutil.rmtree(olddir)
- if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
+ if e.data.getVar("BUILDHISTORY_COMMIT") == "1":
bb.note("Writing buildhistory")
+ bb.build.exec_func("buildhistory_write_sigs", d)
localdata = bb.data.createCopy(e.data)
localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
interrupted = getattr(e, '_interrupted', 0)
@@ -774,7 +842,7 @@ def _get_srcrev_values(d):
"""
scms = []
- fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
+ fetcher = bb.fetch.Fetch(d.getVar('SRC_URI').split(), d)
urldata = fetcher.ud
for u in urldata:
if urldata[u].method.supports_srcrev():
@@ -806,7 +874,7 @@ def _get_srcrev_values(d):
do_fetch[postfuncs] += "write_srcrev"
do_fetch[vardepsexclude] += "write_srcrev"
python write_srcrev() {
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
srcrevs, tag_srcrevs = _get_srcrev_values(d)
@@ -833,12 +901,12 @@ python write_srcrev() {
f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
else:
- f.write('SRCREV = "%s"\n' % srcrevs.values())
+ f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
if len(tag_srcrevs) > 0:
for name, srcrev in tag_srcrevs.items():
f.write('# tag_%s = "%s"\n' % (name, srcrev))
if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN', True)
+ pkg = d.getVar('PN')
bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
diff --git a/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass b/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
index b86abcc3f..f9b241b6c 100644
--- a/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
@@ -7,7 +7,7 @@ python buildstats_summary () {
if not os.path.exists(bsdir):
return
- sstatetasks = (e.data.getVar('SSTATETASKS', True) or '').split()
+ sstatetasks = (e.data.getVar('SSTATETASKS') or '').split()
built = collections.defaultdict(lambda: [set(), set()])
for pf in os.listdir(bsdir):
taskdir = os.path.join(bsdir, pf)
diff --git a/import-layers/yocto-poky/meta/classes/buildstats.bbclass b/import-layers/yocto-poky/meta/classes/buildstats.bbclass
index 599a21998..960653c70 100644
--- a/import-layers/yocto-poky/meta/classes/buildstats.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildstats.bbclass
@@ -31,6 +31,11 @@ def get_process_cputime(pid):
i = f.readline().strip()
if not i:
break
+ if not ":" in i:
+ # one more extra line is appended (empty or containing "0")
+ # most probably due to race condition in kernel while
+ # updating IO stats
+ break
i = i.split(": ")
iostats[i[0]] = i[1]
resources = resource.getrusage(resource.RUSAGE_SELF)
@@ -75,13 +80,13 @@ def get_buildtimedata(var, d):
return timediff, cpuperc
def write_task_data(status, logfile, e, d):
- bn = d.getVar('BUILDNAME', True)
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
+ bn = d.getVar('BUILDNAME')
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
with open(os.path.join(logfile), "a") as f:
elapsedtime = get_timedata("__timedata_task", d, e.time)
if elapsedtime:
- f.write(d.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
- (e.task, elapsedtime)))
+ f.write(d.expand("${PF}: %s\n" % e.task))
+ f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
cpu, iostats, resources, childres = get_process_cputime(os.getpid())
if cpu:
f.write("utime: %s\n" % cpu['utime'])
@@ -106,9 +111,9 @@ python run_buildstats () {
import bb.event
import time, subprocess, platform
- bn = d.getVar('BUILDNAME', True)
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
- taskdir = os.path.join(bsdir, d.getVar('PF', True))
+ bn = d.getVar('BUILDNAME')
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ taskdir = os.path.join(bsdir, d.getVar('PF'))
if isinstance(e, bb.event.BuildStarted):
########################################################################
@@ -162,7 +167,7 @@ python run_buildstats () {
if e.task == "do_rootfs":
bs = os.path.join(bsdir, "build_stats")
with open(bs, "a") as f:
- rootfs = d.getVar('IMAGE_ROOTFS', True)
+ rootfs = d.getVar('IMAGE_ROOTFS')
if os.path.isdir(rootfs):
try:
rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
@@ -188,3 +193,27 @@ python run_buildstats () {
addhandler run_buildstats
run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
+python runqueue_stats () {
+ import buildstats
+ from bb import event, runqueue
+ # We should not record any samples before the first task has started,
+ # because that's the first activity shown in the process chart.
+ # Besides, at that point we are sure that the build variables
+ # are available that we need to find the output directory.
+ # The persistent SystemStats is stored in the datastore and
+ # closed when the build is done.
+ system_stats = d.getVar('_buildstats_system_stats', False)
+ if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
+ system_stats = buildstats.SystemStats(d)
+ d.setVar('_buildstats_system_stats', system_stats)
+ if system_stats:
+ # Ensure that we sample at important events.
+ done = isinstance(e, bb.event.BuildCompleted)
+ system_stats.sample(e, force=done)
+ if done:
+ system_stats.close()
+ d.delVar('_buildstats_system_stats')
+}
+
+addhandler runqueue_stats
+runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/import-layers/yocto-poky/meta/classes/ccache.bbclass b/import-layers/yocto-poky/meta/classes/ccache.bbclass
index 2e9837cf0..d58c8f6e5 100644
--- a/import-layers/yocto-poky/meta/classes/ccache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/ccache.bbclass
@@ -1,6 +1,15 @@
-CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
-export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
+CCACHE = "${@bb.utils.which(d.getVar('PATH'), 'ccache') and 'ccache '}"
+export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_TARGET_SYS}/${PN}"
CCACHE_DISABLE[unexport] = "1"
+# We need to stop ccache considering the current directory or the
+# debug-prefix-map target directory to be significant when calculating
+# its hash. Without this the cache would be invalidated every time
+# ${PV} or ${PR} change.
+export CCACHE_NOHASHDIR ?= "1"
+
+DEPENDS_append_class-target = " ccache-native"
+DEPENDS[vardepvalueexclude] = " ccache-native"
+
do_configure[dirs] =+ "${CCACHE_DIR}"
do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
diff --git a/import-layers/yocto-poky/meta/classes/chrpath.bbclass b/import-layers/yocto-poky/meta/classes/chrpath.bbclass
index 3b5cd37f7..ad3c3975a 100644
--- a/import-layers/yocto-poky/meta/classes/chrpath.bbclass
+++ b/import-layers/yocto-poky/meta/classes/chrpath.bbclass
@@ -17,19 +17,24 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
# Throw away everything other than the rpath list
curr_rpath = out.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
- rpaths = curr_rpath.split(":")
+ rpaths = curr_rpath.strip().split(":")
new_rpaths = []
modified = False
for rpath in rpaths:
# If rpath is already dynamic copy it to new_rpath and continue
if rpath.find("$ORIGIN") != -1:
- new_rpaths.append(rpath.strip())
+ new_rpaths.append(rpath)
continue
rpath = os.path.normpath(rpath)
if baseprefix not in rpath and tmpdir not in rpath:
- new_rpaths.append(rpath.strip())
+ # Skip standard search paths
+ if rpath in ['/lib', '/usr/lib', '/lib64/', '/usr/lib64']:
+ bb.warn("Skipping RPATH %s as is a standard search path for %s" % (rpath, fpath))
+ modified = True
+ continue
+ new_rpaths.append(rpath)
continue
- new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/"))))
+ new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/"))))
modified = True
# if we have modified some rpaths call chrpath to update the binary
@@ -39,7 +44,7 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
if p.returncode != 0:
- bb.fatal("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
+ bb.fatal("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN'), p.returncode, out, err))
def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
import subprocess as sub
@@ -67,7 +72,7 @@ def process_dir (rootdir, directory, d):
cmd = d.expand('${CHRPATH_BIN}')
tmpdir = os.path.normpath(d.getVar('TMPDIR', False))
baseprefix = os.path.normpath(d.expand('${base_prefix}'))
- hostos = d.getVar("HOST_OS", True)
+ hostos = d.getVar("HOST_OS")
#bb.debug("Checking %s for binaries to process" % directory)
if not os.path.exists(directory):
diff --git a/import-layers/yocto-poky/meta/classes/cmake.bbclass b/import-layers/yocto-poky/meta/classes/cmake.bbclass
index fad0baa51..12df617ad 100644
--- a/import-layers/yocto-poky/meta/classes/cmake.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cmake.bbclass
@@ -1,5 +1,5 @@
# Path to the CMake file to process.
-OECMAKE_SOURCEPATH ?= "${S}"
+OECMAKE_SOURCEPATH ??= "${S}"
DEPENDS_prepend = "cmake-native "
B = "${WORKDIR}/build"
@@ -42,11 +42,15 @@ def map_target_arch_to_uname_arch(target_arch):
return target_arch
cmake_do_generate_toolchain_file() {
+ if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
+ cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
+ fi
cat > ${WORKDIR}/toolchain.cmake <<EOF
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
+$cmake_crosscompiling
set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH', True))} )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
@@ -103,24 +107,24 @@ cmake_do_configure() {
# Just like autotools cmake can use a site file to cache result that need generated binaries to run
if [ -e ${WORKDIR}/site-file.cmake ] ; then
- OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
+ oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
else
- OECMAKE_SITEFILE=""
+ oecmake_sitefile=
fi
cmake \
- ${OECMAKE_SITEFILE} \
+ $oecmake_sitefile \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix'))} \
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir', True), d. getVar('prefix', True))} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix'))} \
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir', True), d.getVar('prefix', True))} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix'))} \
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
-DCMAKE_VERBOSE_MAKEFILE=1 \
diff --git a/import-layers/yocto-poky/meta/classes/cml1.bbclass b/import-layers/yocto-poky/meta/classes/cml1.bbclass
index 583480626..38e6613c4 100644
--- a/import-layers/yocto-poky/meta/classes/cml1.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cml1.bbclass
@@ -26,8 +26,8 @@ python do_menuconfig() {
except OSError:
mtime = 0
- oe_terminal("${SHELL} -c \"make %s; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND', True),
- d.getVar('PN', True ) + ' Configuration', d)
+ oe_terminal("${SHELL} -c \"make %s; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
if hasattr(bb.build, 'write_taint'):
@@ -49,7 +49,7 @@ python do_diffconfig() {
import shutil
import subprocess
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
fragment = workdir + '/fragment.cfg'
configorig = '.config.orig'
config = '.config'
diff --git a/import-layers/yocto-poky/meta/classes/compress_doc.bbclass b/import-layers/yocto-poky/meta/classes/compress_doc.bbclass
index 8073c173e..069d53492 100644
--- a/import-layers/yocto-poky/meta/classes/compress_doc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/compress_doc.bbclass
@@ -31,25 +31,25 @@ DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
python package_do_compress_doc() {
- compress_mode = d.getVar('DOC_COMPRESS', True)
- compress_list = (d.getVar('DOC_COMPRESS_LIST', True) or '').split()
+ compress_mode = d.getVar('DOC_COMPRESS')
+ compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
if compress_mode not in compress_list:
bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
compress_cmds = {}
decompress_cmds = {}
for mode in compress_list:
- compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode, True)
- decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode, True)
+ compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
+ decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
- mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True))
+ mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
if os.path.exists(mandir):
# Decompress doc files which format is not compress_mode
decompress_doc(mandir, compress_mode, decompress_cmds)
compress_doc(mandir, compress_mode, compress_cmds)
- infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir", True))
+ infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
if os.path.exists(infodir):
# Decompress doc files which format is not compress_mode
decompress_doc(infodir, compress_mode, decompress_cmds)
@@ -218,18 +218,18 @@ python compress_doc_updatealternatives () {
if not bb.data.inherits_class('update-alternatives', d):
return
- mandir = d.getVar("mandir", True)
- infodir = d.getVar("infodir", True)
- compress_mode = d.getVar('DOC_COMPRESS', True)
- for pkg in (d.getVar('PACKAGES', True) or "").split():
- old_names = (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split()
+ mandir = d.getVar("mandir")
+ infodir = d.getVar("infodir")
+ compress_mode = d.getVar('DOC_COMPRESS')
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
new_names = []
for old_name in old_names:
- old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name, True)
- old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True) or \
- d.getVarFlag('ALTERNATIVE_TARGET', old_name, True) or \
- d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or \
- d.getVar('ALTERNATIVE_TARGET', True) or \
+ old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
+ old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_TARGET') or \
old_link
# Sometimes old_target is specified as relative to the link name.
old_target = os.path.join(os.path.dirname(old_link), old_target)
@@ -241,15 +241,15 @@ python compress_doc_updatealternatives () {
new_target = old_target + '.' + compress_mode
d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
- if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True):
+ if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
- elif d.getVarFlag('ALTERNATIVE_TARGET', old_name, True):
+ elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
d.delVarFlag('ALTERNATIVE_TARGET', old_name)
d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
- elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True):
+ elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
- elif d.getVar('ALTERNATIVE_TARGET', old_name, True):
+ elif d.getVar('ALTERNATIVE_TARGET'):
d.setVar('ALTERNATIVE_TARGET', new_target)
new_names.append(new_name)
diff --git a/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass b/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass
index 907c1836b..eabf12ce7 100644
--- a/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass
+++ b/import-layers/yocto-poky/meta/classes/copyleft_compliance.bbclass
@@ -13,7 +13,7 @@ python do_prepare_copyleft_sources () {
import os.path
import shutil
- p = d.getVar('P', True)
+ p = d.getVar('P')
included, reason = copyleft_should_include(d)
if not included:
bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
@@ -21,13 +21,13 @@ python do_prepare_copyleft_sources () {
else:
bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
- sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
- dl_dir = d.getVar('DL_DIR', True)
- src_uri = d.getVar('SRC_URI', True).split()
+ sources_dir = d.getVar('COPYLEFT_SOURCES_DIR')
+ dl_dir = d.getVar('DL_DIR')
+ src_uri = d.getVar('SRC_URI').split()
fetch = bb.fetch2.Fetch(src_uri, d)
ud = fetch.ud
- pf = d.getVar('PF', True)
+ pf = d.getVar('PF')
dest = os.path.join(sources_dir, pf)
shutil.rmtree(dest, ignore_errors=True)
bb.utils.mkdirhier(dest)
diff --git a/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass b/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass
index 46be7f7d2..c36bce431 100644
--- a/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass
+++ b/import-layers/yocto-poky/meta/classes/copyleft_filter.bbclass
@@ -6,7 +6,7 @@
#
# vi:sts=4:sw=4:et
-COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
+COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*'
COPYLEFT_LICENSE_INCLUDE[type] = 'list'
COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
@@ -47,32 +47,32 @@ def copyleft_should_include(d):
import oe.license
from fnmatch import fnmatchcase as fnmatch
- included, motive = False, 'recipe did not match anything'
-
- recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
+ recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE')
if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
- include, motive = False, 'recipe type "%s" is excluded' % recipe_type
+ included, motive = False, 'recipe type "%s" is excluded' % recipe_type
+ else:
+ included, motive = False, 'recipe did not match anything'
- include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
- exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
+ include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
+ exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
- try:
- is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
- else:
- if is_included:
- if reason:
- included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
- else:
- included, motive = False, 'recipe does not include a copyleft license'
+ try:
+ is_included, reason = oe.license.is_included(d.getVar('LICENSE'), include, exclude)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('PF'), exc))
else:
- included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
+ if is_included:
+ if reason:
+ included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
+ else:
+ included, motive = False, 'recipe does not include a copyleft license'
+ else:
+ included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
- if any(fnmatch(d.getVar('PN', True), name) \
+ if any(fnmatch(d.getVar('PN'), name) \
for name in oe.data.typed_value('COPYLEFT_PN_INCLUDE', d)):
included, motive = True, 'recipe included by name'
- if any(fnmatch(d.getVar('PN', True), name) \
+ if any(fnmatch(d.getVar('PN'), name) \
for name in oe.data.typed_value('COPYLEFT_PN_EXCLUDE', d)):
included, motive = False, 'recipe excluded by name'
diff --git a/import-layers/yocto-poky/meta/classes/core-image.bbclass b/import-layers/yocto-poky/meta/classes/core-image.bbclass
index 8431440db..a9a2cec68 100644
--- a/import-layers/yocto-poky/meta/classes/core-image.bbclass
+++ b/import-layers/yocto-poky/meta/classes/core-image.bbclass
@@ -24,11 +24,15 @@
# - hwcodecs - Install hardware acceleration codecs
# - package-management - installs package management tools and preserves the package manager database
# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
+# - empty-root-password
+# - allow-empty-password
+# - post-install-logging
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
# - doc-pkgs - documentation packages for all installed packages in the rootfs
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
# - read-only-rootfs - tweaks an image to support read-only rootfs
+# - splash - bootup splash screen
#
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
diff --git a/import-layers/yocto-poky/meta/classes/cpan-base.bbclass b/import-layers/yocto-poky/meta/classes/cpan-base.bbclass
index 55ac05269..577fcd63c 100644
--- a/import-layers/yocto-poky/meta/classes/cpan-base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cpan-base.bbclass
@@ -7,27 +7,7 @@ FILES_${PN} += "${libdir}/perl ${datadir}/perl"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
-PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
-
-# Determine the staged version of perl from the perl configuration file
-# Assign vardepvalue, because otherwise signature is changed before and after
-# perl is built (from None to real version in config.sh).
-get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
-def get_perl_version(d):
- import re
- cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
- try:
- f = open(cfg, 'r')
- except IOError:
- return None
- l = f.readlines();
- f.close();
- r = re.compile("^version='(\d*\.\d*\.\d*)'")
- for s in l:
- m = r.match(s)
- if m:
- return m.group(1)
- return None
+inherit perl-version
def is_target(d):
if not bb.data.inherits_class('native', d):
@@ -36,5 +16,3 @@ def is_target(d):
PERLLIBDIRS = "${libdir}/perl"
PERLLIBDIRS_class-native = "${libdir}/perl-native"
-PERLVERSION := "${@get_perl_version(d)}"
-PERLVERSION[vardepvalue] = ""
diff --git a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
index 21921b3dd..49388d4cf 100644
--- a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -20,25 +20,25 @@ CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
CANADIANEXTRAVENDOR = ""
MODIFYTOS ??= "1"
python () {
- archs = d.getVar('PACKAGE_ARCHS', True).split()
+ archs = d.getVar('PACKAGE_ARCHS').split()
sdkarchs = []
for arch in archs:
sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
# Allow the following code segment to be disabled, e.g. meta-environment
- if d.getVar("MODIFYTOS", True) != "1":
+ if d.getVar("MODIFYTOS") != "1":
return
- if d.getVar("TCLIBC", True) == "baremetal":
+ if d.getVar("TCLIBC") == "baremetal":
return
- tos = d.getVar("TARGET_OS", True)
+ tos = d.getVar("TARGET_OS")
whitelist = []
extralibcs = [""]
- if "uclibc" in d.getVar("BASECANADIANEXTRAOS", True):
+ if "uclibc" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("uclibc")
- if "musl" in d.getVar("BASECANADIANEXTRAOS", True):
+ if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
for variant in ["", "spe", "x32", "eabi", "n32"]:
for libc in extralibcs:
@@ -51,33 +51,33 @@ python () {
entry = entry + "-" + libc
whitelist.append(entry)
if tos not in whitelist:
- bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS", True))
+ bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
for n in ["PROVIDES", "DEPENDS"]:
- d.setVar(n, d.getVar(n, True))
- d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN", True))
+ d.setVar(n, d.getVar(n))
+ d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
n = prefix + "_FOR_TARGET"
- d.setVar(n, d.getVar(n, True))
+ d.setVar(n, d.getVar(n))
# This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
# however we need the old value in some variables. We expand those here first.
- tarch = d.getVar("TARGET_ARCH", True)
+ tarch = d.getVar("TARGET_ARCH")
if tarch == "x86_64":
d.setVar("LIBCEXTENSION", "")
d.setVar("ABIEXTENSION", "")
d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
- for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
elif tarch == "powerpc":
# PowerPC can build "linux" and "linux-gnuspe"
d.setVar("LIBCEXTENSION", "")
d.setVar("ABIEXTENSION", "")
d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
- for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
elif tarch == "mips64":
d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
- for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
if tarch == "arm" or tarch == "armeb":
d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi linux-uclibceabi")
@@ -86,10 +86,10 @@ python () {
d.setVar("TARGET_OS", "linux")
# Also need to handle multilib target vendors
- vendors = d.getVar("CANADIANEXTRAVENDOR", True)
+ vendors = d.getVar("CANADIANEXTRAVENDOR")
if not vendors:
vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
- origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL", True)
+ origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
if origvendor:
d.setVar("TARGET_VENDOR", origvendor)
if origvendor not in vendors.split():
@@ -100,9 +100,9 @@ MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
INHIBIT_DEFAULT_DEPS = "1"
-STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
+STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
-TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
+TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
@@ -115,8 +115,13 @@ HOST_CC_ARCH = "${SDK_CC_ARCH}"
HOST_LD_ARCH = "${SDK_LD_ARCH}"
HOST_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
+TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
+TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
+
#assign DPKG_ARCH
-DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH', True), '')}"
+DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
@@ -169,6 +174,7 @@ USE_NLS = "${SDKUSE_NLS}"
# and not any particular tune that is enabled.
TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
+PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
# If MLPREFIX is set by multilib code, shlibs
# points to the wrong place so force it
SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
diff --git a/import-layers/yocto-poky/meta/classes/cross.bbclass b/import-layers/yocto-poky/meta/classes/cross.bbclass
index 01b09337a..4feb01ecc 100644
--- a/import-layers/yocto-poky/meta/classes/cross.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross.bbclass
@@ -19,15 +19,20 @@ HOST_AS_ARCH = "${BUILD_AS_ARCH}"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
-STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
+STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
PACKAGE_ARCH = "${BUILD_ARCH}"
-MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
+MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = ""
+TARGET_CPPFLAGS = ""
+TARGET_CFLAGS = ""
+TARGET_CXXFLAGS = ""
+TARGET_LDFLAGS = ""
+
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
CXXFLAGS = "${BUILD_CFLAGS}"
@@ -38,6 +43,10 @@ TOOLCHAIN_OPTIONS = ""
DEPENDS_GETTEXT = "gettext-native"
+# This class encodes staging paths into its scripts data so can only be
+# reused if we manipulate the paths.
+SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
+
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
diff --git a/import-layers/yocto-poky/meta/classes/crosssdk.bbclass b/import-layers/yocto-poky/meta/classes/crosssdk.bbclass
index 7315c38f1..ddb98d22b 100644
--- a/import-layers/yocto-poky/meta/classes/crosssdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/crosssdk.bbclass
@@ -5,12 +5,15 @@ MACHINEOVERRIDES = ""
PACKAGE_ARCH = "${SDK_ARCH}"
python () {
# set TUNE_PKGARCH to SDK_ARCH
- d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
+ d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
}
-STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+# This class encodes staging paths into its scripts data so can only be
+# reused if we manipulate the paths.
+SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
+
TARGET_ARCH = "${SDK_ARCH}"
TARGET_VENDOR = "${SDK_VENDOR}"
TARGET_OS = "${SDK_OS}"
@@ -18,8 +21,13 @@ TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
+TARGET_CFLAGS = "${BUILD_CFLAGS}"
+TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
+TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
TARGET_FPU = ""
+
target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
diff --git a/import-layers/yocto-poky/meta/classes/cve-check.bbclass b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
index 75b8fa9ab..13ec62ec9 100644
--- a/import-layers/yocto-poky/meta/classes/cve-check.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
@@ -22,7 +22,7 @@
# The product name that the CVE database uses. Defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
-CVE_PRODUCT ?= "${BPN}"
+CVE_PRODUCT ??= "${BPN}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvd.db"
@@ -51,7 +51,7 @@ python do_cve_check () {
Check recipe for patched and unpatched CVEs
"""
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE", True)):
+ if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
patched_cves = get_patches_cves(d)
patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
@@ -62,7 +62,7 @@ python do_cve_check () {
}
addtask cve_check after do_unpack before do_build
-do_cve_check[depends] = "cve-check-tool-native:do_populate_cve_db"
+do_cve_check[depends] = "cve-check-tool-native:do_populate_sysroot cve-check-tool-native:do_populate_cve_db"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -70,7 +70,7 @@ python cve_check_cleanup () {
Delete the file used to gather all the CVE information.
"""
- bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE", True))
+ bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
}
addhandler cve_check_cleanup
@@ -83,12 +83,12 @@ python cve_check_write_rootfs_manifest () {
import shutil
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE", True)):
+ if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE", True)
- link_name = d.getVar("IMAGE_LINK_NAME", True)
- manifest_name = d.getVar("CVE_CHECK_MANIFEST", True)
- cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE", True)
+ deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
+ link_name = d.getVar("IMAGE_LINK_NAME")
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST")
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
shutil.copyfile(cve_tmp_file, manifest_name)
@@ -101,7 +101,7 @@ python cve_check_write_rootfs_manifest () {
bb.plain("Image CVE report stored in: %s" % manifest_name)
}
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST', True) == '1' else ''}"
+ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def get_patches_cves(d):
"""
@@ -110,7 +110,7 @@ def get_patches_cves(d):
import re
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
patched_cves = set()
bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
@@ -149,15 +149,15 @@ def check_cves(d, patched_cves):
cves_patched = []
cves_unpatched = []
bpn = d.getVar("CVE_PRODUCT")
- pv = d.getVar("PV", True).split("git+")[0]
+ pv = d.getVar("PV").split("git+")[0]
cves = " ".join(patched_cves)
- cve_db_dir = d.getVar("CVE_CHECK_DB_DIR", True)
- cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST", True))
+ cve_db_dir = d.getVar("CVE_CHECK_DB_DIR")
+ cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST"))
cve_cmd = "cve-check-tool"
cmd = [cve_cmd, "--no-html", "--csv", "--not-affected", "-t", "faux", "-d", cve_db_dir]
# If the recipe has been whitlisted we return empty lists
- if d.getVar("PN", True) in d.getVar("CVE_CHECK_PN_WHITELIST", True).split():
+ if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
return ([], [])
@@ -210,7 +210,7 @@ def get_cve_info(d, cves):
from pysqlite2 import dbapi2 as sqlite3
cve_data = {}
- db_file = d.getVar("CVE_CHECK_DB_FILE", True)
+ db_file = d.getVar("CVE_CHECK_DB_FILE")
placeholder = ",".join("?" * len(cves))
query = "SELECT * FROM NVD WHERE id IN (%s)" % placeholder
conn = sqlite3.connect(db_file)
@@ -231,39 +231,40 @@ def cve_write_data(d, patched, unpatched, cve_data):
CVE manifest if enabled.
"""
- cve_file = d.getVar("CVE_CHECK_LOCAL_FILE", True)
+ cve_file = d.getVar("CVE_CHECK_LOCAL_FILE")
nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
write_string = ""
- first_alert = True
- bb.utils.mkdirhier(d.getVar("CVE_CHECK_LOCAL_DIR", True))
+ unpatched_cves = []
+ bb.utils.mkdirhier(d.getVar("CVE_CHECK_LOCAL_DIR"))
for cve in sorted(cve_data):
- write_string += "PACKAGE NAME: %s\n" % d.getVar("PN", True)
- write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV", True)
+ write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
+ write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV")
write_string += "CVE: %s\n" % cve
if cve in patched:
write_string += "CVE STATUS: Patched\n"
else:
+ unpatched_cves.append(cve)
write_string += "CVE STATUS: Unpatched\n"
- if first_alert:
- bb.warn("Found unpatched CVE, for more information check %s" % cve_file)
- first_alert = False
write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["score"]
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
+ if unpatched_cves:
+ bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
+
with open(cve_file, "w") as f:
bb.note("Writing file %s with CVE information" % cve_file)
f.write(write_string)
- if d.getVar("CVE_CHECK_COPY_FILES", True) == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR", True)
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ cve_dir = d.getVar("CVE_CHECK_DIR")
bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN", True))
+ deploy_file = os.path.join(cve_dir, d.getVar("PN"))
with open(deploy_file, "w") as f:
f.write(write_string)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST", True) == "1":
- with open(d.getVar("CVE_CHECK_TMP_FILE", True), "a") as f:
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
f.write("%s" % write_string)
diff --git a/import-layers/yocto-poky/meta/classes/debian.bbclass b/import-layers/yocto-poky/meta/classes/debian.bbclass
index be7cacca9..8124558b8 100644
--- a/import-layers/yocto-poky/meta/classes/debian.bbclass
+++ b/import-layers/yocto-poky/meta/classes/debian.bbclass
@@ -20,17 +20,17 @@ do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
python () {
- if not d.getVar("PACKAGES", True):
+ if not d.getVar("PACKAGES"):
d.setVar("DEBIANRDEP", "")
}
python debian_package_name_hook () {
import glob, copy, stat, errno, re
- pkgdest = d.getVar('PKGDEST', True)
- packages = d.getVar('PACKAGES', True)
- bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
- lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar('PACKAGES')
+ bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir")) + "$")
+ lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir")) + "$")
so_re = re.compile("lib.*\.so")
def socrunch(s):
@@ -53,11 +53,11 @@ python debian_package_name_hook () {
return (s[stat.ST_MODE] & stat.S_IEXEC)
def add_rprovides(pkg, d):
- newpkg = d.getVar('PKG_' + pkg, True)
+ newpkg = d.getVar('PKG_' + pkg)
if newpkg and newpkg != pkg:
- provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split()
+ provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
if pkg not in provs:
- d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV", True) + ")")
+ d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
def auto_libname(packages, orig_pkg):
sonames = []
@@ -70,7 +70,7 @@ python debian_package_name_hook () {
if lib_re.match(root):
has_libs = 1
if so_re.match(os.path.basename(file)):
- cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null"
+ cmd = (d.getVar('TARGET_PREFIX') or "") + "objdump -p " + file + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
@@ -84,7 +84,7 @@ python debian_package_name_hook () {
if len(sonames) == 1:
soname = sonames[0]
elif len(sonames) > 1:
- lead = d.getVar('LEAD_SONAME', True)
+ lead = d.getVar('LEAD_SONAME')
if lead:
r = re.compile(lead)
filtered = []
@@ -115,7 +115,7 @@ python debian_package_name_hook () {
newpkg = pkgname
else:
newpkg = pkg.replace(orig_pkg, devname, 1)
- mlpre=d.getVar('MLPREFIX', True)
+ mlpre=d.getVar('MLPREFIX')
if mlpre:
if not newpkg.find(mlpre) == 0:
newpkg = mlpre + newpkg
@@ -131,7 +131,7 @@ python debian_package_name_hook () {
# and later
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
# so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
- for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
+ for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
auto_libname(packages, pkg)
}
diff --git a/import-layers/yocto-poky/meta/classes/devshell.bbclass b/import-layers/yocto-poky/meta/classes/devshell.bbclass
index be71aff35..4de7ea6fc 100644
--- a/import-layers/yocto-poky/meta/classes/devshell.bbclass
+++ b/import-layers/yocto-poky/meta/classes/devshell.bbclass
@@ -3,16 +3,16 @@ inherit terminal
DEVSHELL = "${SHELL}"
python do_devshell () {
- if d.getVarFlag("do_devshell", "manualfakeroot", True):
+ if d.getVarFlag("do_devshell", "manualfakeroot"):
d.prependVar("DEVSHELL", "pseudo ")
- fakeenv = d.getVar("FAKEROOTENV", True).split()
+ fakeenv = d.getVar("FAKEROOTENV").split()
for f in fakeenv:
k = f.split("=")
d.setVar(k[0], k[1])
d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
d.delVarFlag("do_devshell", "fakeroot")
- oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d)
+ oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
}
addtask devshell after do_patch
@@ -27,7 +27,7 @@ do_devshell[nostamp] = "1"
# be done as the normal user. We therfore carefully construct the envionment
# manually
python () {
- if d.getVarFlag("do_devshell", "fakeroot", True):
+ if d.getVarFlag("do_devshell", "fakeroot"):
# We need to signal our code that we want fakeroot however we
# can't manipulate the environment and variables here yet (see YOCTO #4795)
d.setVarFlag("do_devshell", "manualfakeroot", "1")
@@ -82,7 +82,7 @@ def devpyshell(d):
more = False
i = code.InteractiveInterpreter(locals=_context)
- print("OE PyShell (PN = %s)\n" % d.getVar("PN", True))
+ print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
def prompt(more):
if more:
diff --git a/import-layers/yocto-poky/meta/classes/devupstream.bbclass b/import-layers/yocto-poky/meta/classes/devupstream.bbclass
new file mode 100644
index 000000000..7780c5482
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/devupstream.bbclass
@@ -0,0 +1,48 @@
+# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
+# can build both stable tarballs and snapshots from upstream source
+# repositories.
+#
+# Usage:
+# BBCLASSEXTEND = "devupstream:target"
+# SRC_URI_class-devupstream = "git://git.example.com/example"
+# SRCREV_class-devupstream = "abcdef"
+#
+# If the first entry in SRC_URI is a git: URL then S is rewritten to
+# WORKDIR/git.
+#
+# There are a few caveats that remain to be solved:
+# - You can't build native or nativesdk recipes using for example
+# devupstream:native, you can only build target recipes.
+# - If the fetcher requires native tools (such as subversion-native) then
+# bitbake won't be able to add them automatically.
+
+CLASSOVERRIDE .= ":class-devupstream"
+
+python devupstream_virtclass_handler () {
+ # Do nothing if this is inherited, as it's for BBCLASSEXTEND
+ if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
+ bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
+ return
+
+ variant = d.getVar("BBEXTENDVARIANT")
+ if variant not in ("target"):
+ bb.error("Pass the variant when using devupstream, for example devupstream:target")
+ return
+
+ # Develpment releases are never preferred by default
+ d.setVar("DEFAULT_PREFERENCE", "-1")
+
+ uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
+
+ if uri.scheme == "git":
+ d.setVar("S", "${WORKDIR}/git")
+
+ # Modify the PV if the recipe hasn't already overridden it
+ pv = d.getVar("PV")
+ proto_marker = "+" + uri.scheme
+ if proto_marker not in pv:
+ d.setVar("PV", pv + proto_marker + "${SRCPV}")
+}
+
+addhandler devupstream_virtclass_handler
+devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
diff --git a/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass b/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
index 7e91dbcf4..e74d3c04b 100644
--- a/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
@@ -11,15 +11,15 @@
python () {
# Assume at least one var is set.
- distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
+ distro_features = (d.getVar('DISTRO_FEATURES') or "").split()
- any_of_distro_features = d.getVar('ANY_OF_DISTRO_FEATURES', True)
+ any_of_distro_features = d.getVar('ANY_OF_DISTRO_FEATURES')
if any_of_distro_features:
any_of_distro_features = any_of_distro_features.split()
if set.isdisjoint(set(any_of_distro_features),set(distro_features)):
raise bb.parse.SkipPackage("one of '%s' needs to be in DISTRO_FEATURES" % any_of_distro_features)
- required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True)
+ required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES')
if required_distro_features:
required_distro_features = required_distro_features.split()
for f in required_distro_features:
@@ -28,7 +28,7 @@ python () {
else:
raise bb.parse.SkipPackage("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
- conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True)
+ conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES')
if conflict_distro_features:
conflict_distro_features = conflict_distro_features.split()
for f in conflict_distro_features:
diff --git a/import-layers/yocto-poky/meta/classes/distrodata.bbclass b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
index fbb7402e0..5e3444161 100644
--- a/import-layers/yocto-poky/meta/classes/distrodata.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
@@ -25,75 +25,70 @@ addtask distrodata_np
do_distrodata_np[nostamp] = "1"
python do_distrodata_np() {
localdata = bb.data.createCopy(d)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
bb.note("Package Name: %s" % pn)
import oe.distro_check as dist_check
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
distro_check_dir = os.path.join(tmpdir, "distro_check")
- datetime = localdata.getVar('DATETIME', True)
+ datetime = localdata.getVar('DATETIME')
dist_check.update_distro_data(distro_check_dir, datetime, localdata)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pn.find("-crosssdk") != -1:
pnstripped = pn.split("-crosssdk")
bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pn.startswith("nativesdk-"):
pnstripped = pn.replace("nativesdk-", "")
bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES'))
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
"""generate package information from .bb file"""
- pname = localdata.getVar('PN', True)
- pcurver = localdata.getVar('PV', True)
- pdesc = localdata.getVar('DESCRIPTION', True)
+ pname = localdata.getVar('PN')
+ pcurver = localdata.getVar('PV')
+ pdesc = localdata.getVar('DESCRIPTION')
if pdesc is not None:
pdesc = pdesc.replace(',','')
pdesc = pdesc.replace('\n','')
- pgrp = localdata.getVar('SECTION', True)
- plicense = localdata.getVar('LICENSE', True).replace(',','_')
+ pgrp = localdata.getVar('SECTION')
+ plicense = localdata.getVar('LICENSE').replace(',','_')
- rstatus = localdata.getVar('RECIPE_COLOR', True)
+ rstatus = localdata.getVar('RECIPE_COLOR')
if rstatus is not None:
rstatus = rstatus.replace(',','')
- pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
+ pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION')
if pcurver == pupver:
vermatch="1"
else:
vermatch="0"
- noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
+ noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON')
if noupdate_reason is None:
noupdate="0"
else:
noupdate="1"
noupdate_reason = noupdate_reason.replace(',','')
- maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
- rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
+ maintainer = localdata.getVar('RECIPE_MAINTAINER')
+ rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE')
result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
@@ -109,80 +104,75 @@ addtask distrodata
do_distrodata[nostamp] = "1"
python do_distrodata() {
import csv
- logpath = d.getVar('LOG_DIR', True)
+ logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
logfile = os.path.join(logpath, "distrodata.csv")
import oe.distro_check as dist_check
localdata = bb.data.createCopy(d)
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
distro_check_dir = os.path.join(tmpdir, "distro_check")
- datetime = localdata.getVar('DATETIME', True)
+ datetime = localdata.getVar('DATETIME')
dist_check.update_distro_data(distro_check_dir, datetime, localdata)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
bb.note("Package Name: %s" % pn)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pn.startswith("nativesdk-"):
pnstripped = pn.replace("nativesdk-", "")
bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES'))
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pn.find("-crosssdk") != -1:
pnstripped = pn.split("-crosssdk")
bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
"""generate package information from .bb file"""
- pname = localdata.getVar('PN', True)
- pcurver = localdata.getVar('PV', True)
- pdesc = localdata.getVar('DESCRIPTION', True)
+ pname = localdata.getVar('PN')
+ pcurver = localdata.getVar('PV')
+ pdesc = localdata.getVar('DESCRIPTION')
if pdesc is not None:
pdesc = pdesc.replace(',','')
pdesc = pdesc.replace('\n','')
- pgrp = localdata.getVar('SECTION', True)
- plicense = localdata.getVar('LICENSE', True).replace(',','_')
+ pgrp = localdata.getVar('SECTION')
+ plicense = localdata.getVar('LICENSE').replace(',','_')
- rstatus = localdata.getVar('RECIPE_COLOR', True)
+ rstatus = localdata.getVar('RECIPE_COLOR')
if rstatus is not None:
rstatus = rstatus.replace(',','')
- pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
+ pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION')
if pcurver == pupver:
vermatch="1"
else:
vermatch="0"
- noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
+ noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON')
if noupdate_reason is None:
noupdate="0"
else:
noupdate="1"
noupdate_reason = noupdate_reason.replace(',','')
- maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
- rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
+ maintainer = localdata.getVar('RECIPE_MAINTAINER')
+ rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE')
# do the comparison
result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
@@ -272,60 +262,56 @@ python do_checkpkg() {
from bb.fetch2 import FetchError, NoMethodError, decodeurl
"""first check whether a uri is provided"""
- src_uri = (d.getVar('SRC_URI', True) or '').split()
+ src_uri = (d.getVar('SRC_URI') or '').split()
if src_uri:
uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
else:
uri_type = "none"
"""initialize log files."""
- logpath = d.getVar('LOG_DIR', True)
+ logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
logfile = os.path.join(logpath, "checkpkg.csv")
"""generate package information from .bb file"""
- pname = d.getVar('PN', True)
+ pname = d.getVar('PN')
if pname.find("-native") != -1:
- if d.getVar('BBCLASSEXTEND', True):
+ if d.getVar('BBCLASSEXTEND'):
return
pnstripped = pname.split("-native")
bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pname.startswith("nativesdk-"):
- if d.getVar('BBCLASSEXTEND', True):
+ if d.getVar('BBCLASSEXTEND'):
return
pnstripped = pname.replace("nativesdk-", "")
bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES'))
if pname.find("-cross") != -1:
pnstripped = pname.split("-cross")
bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
if pname.find("-initial") != -1:
pnstripped = pname.split("-initial")
bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
- bb.data.update_data(localdata)
-
- pdesc = localdata.getVar('DESCRIPTION', True)
- pgrp = localdata.getVar('SECTION', True)
- pversion = localdata.getVar('PV', True)
- plicense = localdata.getVar('LICENSE', True)
- psection = localdata.getVar('SECTION', True)
- phome = localdata.getVar('HOMEPAGE', True)
- prelease = localdata.getVar('PR', True)
- pdepends = localdata.getVar('DEPENDS', True)
- pbugtracker = localdata.getVar('BUGTRACKER', True)
- ppe = localdata.getVar('PE', True)
- psrcuri = localdata.getVar('SRC_URI', True)
- maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
+
+ pdesc = localdata.getVar('DESCRIPTION')
+ pgrp = localdata.getVar('SECTION')
+ pversion = localdata.getVar('PV')
+ plicense = localdata.getVar('LICENSE')
+ psection = localdata.getVar('SECTION')
+ phome = localdata.getVar('HOMEPAGE')
+ prelease = localdata.getVar('PR')
+ pdepends = localdata.getVar('DEPENDS')
+ pbugtracker = localdata.getVar('BUGTRACKER')
+ ppe = localdata.getVar('PE')
+ psrcuri = localdata.getVar('SRC_URI')
+ maintainer = localdata.getVar('RECIPE_MAINTAINER')
""" Get upstream version version """
pupver = ""
@@ -362,7 +348,7 @@ python do_checkpkg() {
psrcuri = "none"
pdepends = "".join(pdepends.split("\t"))
pdesc = "".join(pdesc.split("\t"))
- no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
+ no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON')
lf = bb.utils.lockfile("%s.lock" % logfile)
with open(logfile, "a") as f:
writer = csv.writer(f, delimiter='\t')
@@ -392,6 +378,7 @@ python distro_check_eventhandler() {
addtask distro_check
do_distro_check[nostamp] = "1"
+do_distro_check[vardepsexclude] += "DATETIME"
python do_distro_check() {
"""checks if the package is present in other public Linux distros"""
import oe.distro_check as dc
@@ -400,13 +387,12 @@ python do_distro_check() {
return
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
distro_check_dir = os.path.join(tmpdir, "distro_check")
- logpath = d.getVar('LOG_DIR', True)
+ logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
result_file = os.path.join(logpath, "distrocheck.csv")
- datetime = localdata.getVar('DATETIME', True)
+ datetime = localdata.getVar('DATETIME')
dc.update_distro_data(distro_check_dir, datetime, localdata)
# do the comparison
@@ -449,12 +435,12 @@ do_checklicense[nostamp] = "1"
python do_checklicense() {
import csv
import shutil
- logpath = d.getVar('LOG_DIR', True)
+ logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
logfile = os.path.join(logpath, "missinglicense.csv")
- generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
- license_types = d.getVar('LICENSE', True)
+ generic_directory = d.getVar('COMMON_LICENSE_DIR')
+ license_types = d.getVar('LICENSE')
for license_type in ((license_types.replace('+', '').replace('|', '&')
.replace('(', '').replace(')', '').replace(';', '')
.replace(',', '').replace(" ", "").split("&"))):
@@ -475,5 +461,3 @@ do_checklicenseall[nostamp] = "1"
do_checklicenseall() {
:
}
-
-
diff --git a/import-layers/yocto-poky/meta/classes/distutils-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils-base.bbclass
index aa18e8b29..9f398d705 100644
--- a/import-layers/yocto-poky/meta/classes/distutils-base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils-base.bbclass
@@ -1,4 +1,4 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
+DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
inherit distutils-common-base pythonnative
diff --git a/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass b/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
index 3ef9cc5a7..6f2880ea0 100644
--- a/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
@@ -63,7 +63,7 @@ distutils_do_install() {
#
# FIXME: Bandaid against wrong datadir computation
#
- if test -e ${D}${datadir}/share; then
+ if [ -e ${D}${datadir}/share ]; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
fi
}
diff --git a/import-layers/yocto-poky/meta/classes/distutils.bbclass b/import-layers/yocto-poky/meta/classes/distutils.bbclass
index 857572d75..1930c3529 100644
--- a/import-layers/yocto-poky/meta/classes/distutils.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils.bbclass
@@ -44,16 +44,16 @@ distutils_do_install() {
if test -e ${D}${bindir} ; then
for i in ${D}${bindir}/* ; do \
if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${USRBINPATH}/env\ python:g $i
fi
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
fi
- if test -e ${D}${sbindir}; then
+ if [ -e ${D}${sbindir} ]; then
for i in ${D}${sbindir}/* ; do \
if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${USRBINPATH}/env\ python:g $i
fi
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
@@ -65,13 +65,13 @@ distutils_do_install() {
#
# FIXME: Bandaid against wrong datadir computation
#
- if test -e ${D}${datadir}/share; then
+ if [ -e ${D}${datadir}/share ]; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
rmdir ${D}${datadir}/share
fi
# Fix backport modules
- if test -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py && test -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py; then
+ if [ -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py ] && [ -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py ]; then
rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py;
rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.pyc;
fi
diff --git a/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
index 82ab6a3d1..7dbf07ac4 100644
--- a/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
@@ -1,4 +1,4 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
+DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
inherit distutils-common-base python3native
diff --git a/import-layers/yocto-poky/meta/classes/distutils3.bbclass b/import-layers/yocto-poky/meta/classes/distutils3.bbclass
index a6720c5b6..6c3030688 100644
--- a/import-layers/yocto-poky/meta/classes/distutils3.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils3.bbclass
@@ -47,14 +47,14 @@ distutils3_do_install() {
if test -e ${D}${bindir} ; then
for i in ${D}${bindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${USRBINPATH}/env\ ${PYTHON_PN}:g $i
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
fi
if test -e ${D}${sbindir}; then
for i in ${D}${sbindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${USRBINPATH}/env\ ${PYTHON_PN}:g $i
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
fi
@@ -64,7 +64,7 @@ distutils3_do_install() {
#
# FIXME: Bandaid against wrong datadir computation
#
- if test -e ${D}${datadir}/share; then
+ if [ -e ${D}${datadir}/share ]; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
rmdir ${D}${datadir}/share
fi
diff --git a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
index 31908c3ca..d64af6a9c 100644
--- a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
@@ -4,7 +4,7 @@
# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
# Released under the MIT license (see COPYING.MIT for the terms)
#
-# externalsrc.bbclass enables use of an existing source tree, usually external to
+# externalsrc.bbclass enables use of an existing source tree, usually external to
# the build system to build a piece of software rather than the usual fetch/unpack/patch
# process.
#
@@ -28,34 +28,34 @@ SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
python () {
- externalsrc = d.getVar('EXTERNALSRC', True)
+ externalsrc = d.getVar('EXTERNALSRC')
# If this is the base recipe and EXTERNALSRC is set for it or any of its
# derivatives, then enable BB_DONT_CACHE to force the recipe to always be
# re-parsed so that the file-checksums function for do_compile is run every
# time.
- bpn = d.getVar('BPN', True)
- if bpn == d.getVar('PN', True):
- classextend = (d.getVar('BBCLASSEXTEND', True) or '').split()
+ bpn = d.getVar('BPN')
+ if bpn == d.getVar('PN'):
+ classextend = (d.getVar('BBCLASSEXTEND') or '').split()
if (externalsrc or
('native' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-native' % bpn, True)) or
+ d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or
('nativesdk' in classextend and
- d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn, True)) or
+ d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-cross' % bpn, True))):
+ d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
d.setVar('S', externalsrc)
- externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
+ externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
local_srcuri = []
- fetch = bb.fetch2.Fetch((d.getVar('SRC_URI', True) or '').split(), d)
+ fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
for url in fetch.urls:
url_data = fetch.ud[url]
parm = url_data.parm
@@ -69,7 +69,7 @@ python () {
# Dummy value because the default function can't be called with blank SRC_URI
d.setVar('SRCPV', '999')
- tasks = filter(lambda k: d.getVarFlag(k, "task", True), d.keys())
+ tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
for task in tasks:
if task.endswith("_setscene"):
@@ -94,7 +94,7 @@ python () {
# Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
- for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
+ for task in d.getVar("SRCTREECOVEREDTASKS").split():
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
@@ -106,24 +106,31 @@ python () {
d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
# We don't want the workdir to go away
- d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
+ d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
+
+ bb.build.addtask('do_buildclean',
+ 'do_clean' if d.getVar('S') == d.getVar('B') else None,
+ None, d)
# If B=S the same builddir is used even for different architectures.
# Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
# change of do_configure task hash is correctly detected and stamps are
# invalidated if e.g. MACHINE changes.
- if d.getVar('S', True) == d.getVar('B', True):
+ if d.getVar('S') == d.getVar('B'):
configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
d.setVar('CONFIGURESTAMPFILE', configstamp)
d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
+ d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
}
python externalsrc_configure_prefunc() {
+ s_dir = d.getVar('S')
# Create desired symlinks
- symlinks = (d.getVar('EXTERNALSRC_SYMLINKS', True) or '').split()
+ symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
+ newlinks = []
for symlink in symlinks:
symsplit = symlink.split(':', 1)
- lnkfile = os.path.join(d.getVar('S', True), symsplit[0])
+ lnkfile = os.path.join(s_dir, symsplit[0])
target = d.expand(symsplit[1])
if len(symsplit) > 1:
if os.path.islink(lnkfile):
@@ -135,19 +142,43 @@ python externalsrc_configure_prefunc() {
# File/dir exists with same name as link, just leave it alone
continue
os.symlink(target, lnkfile)
+ newlinks.append(symsplit[0])
+ # Hide the symlinks from git
+ try:
+ git_exclude_file = os.path.join(s_dir, '.git/info/exclude')
+ if os.path.exists(git_exclude_file):
+ with open(git_exclude_file, 'r+') as efile:
+ elines = efile.readlines()
+ for link in newlinks:
+ if link in elines or '/'+link in elines:
+ continue
+ efile.write('/' + link + '\n')
+ except IOError as ioe:
+ bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
}
python externalsrc_compile_prefunc() {
# Make it obvious that this is happening, since forgetting about it could lead to much confusion
- bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
+ bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
+}
+
+do_buildclean[dirs] = "${S} ${B}"
+do_buildclean[nostamp] = "1"
+do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
+externalsrc_do_buildclean() {
+ if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
+ oe_runmake clean || die "make failed"
+ else
+ bbnote "nothing to do - no makefile found"
+ fi
}
-def srctree_hash_files(d):
+def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
- s_dir = d.getVar('EXTERNALSRC', True)
+ s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = os.path.join(s_dir, '.git')
oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
@@ -159,13 +190,13 @@ def srctree_hash_files(d):
# Update our custom index
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
- subprocess.check_output(['git', 'add', '.'], cwd=s_dir, env=env)
+ subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
else:
- ret = d.getVar('EXTERNALSRC', True) + '/*:True'
+ ret = s_dir + '/*:True'
return ret
def srctree_configure_hash_files(d):
@@ -173,7 +204,7 @@ def srctree_configure_hash_files(d):
Get the list of files that should trigger do_configure to re-execute,
based on the value of CONFIGURE_FILES
"""
- in_files = (d.getVar('CONFIGURE_FILES', True) or '').split()
+ in_files = (d.getVar('CONFIGURE_FILES') or '').split()
out_items = []
search_files = []
for entry in in_files:
@@ -182,9 +213,11 @@ def srctree_configure_hash_files(d):
else:
search_files.append(entry)
if search_files:
- s_dir = d.getVar('EXTERNALSRC', True)
+ s_dir = d.getVar('EXTERNALSRC')
for root, _, files in os.walk(s_dir):
for f in files:
if f in search_files:
out_items.append('%s:True' % os.path.join(root, f))
return ' '.join(out_items)
+
+EXPORT_FUNCTIONS do_buildclean
diff --git a/import-layers/yocto-poky/meta/classes/extrausers.bbclass b/import-layers/yocto-poky/meta/classes/extrausers.bbclass
index 43900f359..7709407b6 100644
--- a/import-layers/yocto-poky/meta/classes/extrausers.bbclass
+++ b/import-layers/yocto-poky/meta/classes/extrausers.bbclass
@@ -15,7 +15,7 @@
inherit useradd_base
-IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}"
+PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
@@ -63,3 +63,7 @@ set_user_group () {
remaining=`echo $remaining | cut -d ';' -f2-`
done
}
+
+USERADDEXTENSION ?= ""
+
+inherit ${USERADDEXTENSION}
diff --git a/import-layers/yocto-poky/meta/classes/fontcache.bbclass b/import-layers/yocto-poky/meta/classes/fontcache.bbclass
index 8ebdfc4f5..e76331131 100644
--- a/import-layers/yocto-poky/meta/classes/fontcache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/fontcache.bbclass
@@ -3,7 +3,7 @@
# packages.
#
-DEPENDS += "qemu-native"
+PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
FONT_PACKAGES ??= "${PN}"
@@ -30,26 +30,26 @@ fi
}
python () {
- font_pkgs = d.getVar('FONT_PACKAGES', True).split()
- deps = d.getVar("FONT_EXTRA_RDEPENDS", True)
+ font_pkgs = d.getVar('FONT_PACKAGES').split()
+ deps = d.getVar("FONT_EXTRA_RDEPENDS")
for pkg in font_pkgs:
if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
}
python add_fontcache_postinsts() {
- for pkg in d.getVar('FONT_PACKAGES', True).split():
+ for pkg in d.getVar('FONT_PACKAGES').split():
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('fontcache_common', True)
+ postinst += d.getVar('fontcache_common')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('fontcache_common', True)
+ postrm += d.getVar('fontcache_common')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass b/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass
index bd2613cf1..9b53dfba7 100644
--- a/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass
+++ b/import-layers/yocto-poky/meta/classes/fs-uuid.bbclass
@@ -3,7 +3,7 @@
# on ext file systems and depends on tune2fs.
def get_rootfs_uuid(d):
import subprocess
- rootfs = d.getVar('ROOTFS', True)
+ rootfs = d.getVar('ROOTFS')
output = subprocess.check_output(['tune2fs', '-l', rootfs])
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
@@ -13,7 +13,7 @@ def get_rootfs_uuid(d):
bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
# Replace the special <<uuid-of-rootfs>> inside a string (like the
-# root= APPEND string in a syslinux.cfg or gummiboot entry) with the
+# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
# actual UUID of the rootfs. Does nothing if the special string
# is not used.
def replace_rootfs_uuid(d, string):
diff --git a/import-layers/yocto-poky/meta/classes/gconf.bbclass b/import-layers/yocto-poky/meta/classes/gconf.bbclass
index d7afa7282..4e0ee2e7d 100644
--- a/import-layers/yocto-poky/meta/classes/gconf.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gconf.bbclass
@@ -1,4 +1,5 @@
-DEPENDS += "gconf gconf-native"
+DEPENDS += "gconf"
+PACKAGE_WRITE_DEPS += "gconf-native"
# These are for when gconftool is used natively and the prefix isn't necessarily
# the sysroot. TODO: replicate the postinst logic for -native packages going
@@ -42,8 +43,8 @@ done
python populate_packages_append () {
import re
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
@@ -56,15 +57,15 @@ python populate_packages_append () {
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
d.setVar('SCHEMA_FILES', " ".join(schemas))
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gconf_postinst', True)
+ postinst += d.getVar('gconf_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += d.getVar('gconf_prerm', True)
+ prerm += d.getVar('gconf_prerm')
d.setVar('pkg_prerm_%s' % pkg, prerm)
d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
}
diff --git a/import-layers/yocto-poky/meta/classes/gettext.bbclass b/import-layers/yocto-poky/meta/classes/gettext.bbclass
index 03b89b245..0be14246b 100644
--- a/import-layers/yocto-poky/meta/classes/gettext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gettext.bbclass
@@ -1,15 +1,15 @@
def gettext_dependencies(d):
- if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
return ""
- if d.getVar('USE_NLS', True) == 'no':
+ if d.getVar('USE_NLS') == 'no':
return "gettext-minimal-native"
return d.getVar('DEPENDS_GETTEXT', False)
def gettext_oeconf(d):
- if d.getVar('USE_NLS', True) == 'no':
+ if d.getVar('USE_NLS') == 'no':
return '--disable-nls'
# Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
- if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
return '--disable-nls'
return "--enable-nls"
diff --git a/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass b/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
index 91461b11e..a8190b7b8 100644
--- a/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
@@ -1,4 +1,4 @@
-DEPENDS += "qemu-native"
+PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
GIO_MODULE_PACKAGES ??= "${PN}"
@@ -17,21 +17,21 @@ fi
}
python populate_packages_append () {
- packages = d.getVar('GIO_MODULE_PACKAGES', True).split()
+ packages = d.getVar('GIO_MODULE_PACKAGES').split()
for pkg in packages:
bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gio_module_cache_common', True)
+ postinst += d.getVar('gio_module_cache_common')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gio_module_cache_common', True)
+ postrm += d.getVar('gio_module_cache_common')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/import-layers/yocto-poky/meta/classes/go.bbclass b/import-layers/yocto-poky/meta/classes/go.bbclass
new file mode 100644
index 000000000..85f71a2e9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/go.bbclass
@@ -0,0 +1,77 @@
+inherit goarch
+
+# x32 ABI is not supported on go compiler so far
+COMPATIBLE_HOST_linux-gnux32 = "null"
+# ppc32 is not supported in go compilers
+COMPATIBLE_HOST_powerpc = "null"
+
+GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
+GOROOT = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
+GOBIN_FINAL_class-native = "${GOROOT_FINAL}/bin"
+GOBIN_FINAL = "${GOROOT_FINAL}/bin/${GOOS}_${GOARCH}"
+
+export GOOS = "${TARGET_GOOS}"
+export GOARCH = "${TARGET_GOARCH}"
+export GOARM = "${TARGET_GOARM}"
+export CGO_ENABLED = "1"
+export GOROOT
+export GOROOT_FINAL = "${libdir}/${TARGET_SYS}/go"
+export GOBIN_FINAL
+export GOPKG_FINAL = "${GOROOT_FINAL}/pkg/${GOOS}_${GOARCH}"
+export GOSRC_FINAL = "${GOROOT_FINAL}/src"
+export GO_GCFLAGS = "${TARGET_CFLAGS}"
+export GO_LDFLAGS = "${TARGET_LDFLAGS}"
+export CGO_CFLAGS = "${TARGET_CC_ARCH}${TOOLCHAIN_OPTIONS} ${TARGET_CFLAGS}"
+export CGO_CPPFLAGS = "${TARGET_CPPFLAGS}"
+export CGO_CXXFLAGS = "${TARGET_CC_ARCH}${TOOLCHAIN_OPTIONS} ${TARGET_CXXFLAGS}"
+export CGO_LDFLAGS = "${TARGET_CC_ARCH}${TOOLCHAIN_OPTIONS} ${TARGET_LDFLAGS}"
+
+DEPENDS += "go-cross-${TARGET_ARCH}"
+DEPENDS_class-native += "go-native"
+
+FILES_${PN}-staticdev += "${GOSRC_FINAL}/${GO_IMPORT}"
+FILES_${PN}-staticdev += "${GOPKG_FINAL}/${GO_IMPORT}*"
+
+GO_INSTALL ?= "${GO_IMPORT}/..."
+
+do_go_compile() {
+ GOPATH=${S}:${STAGING_LIBDIR}/${TARGET_SYS}/go go env
+ if [ -n "${GO_INSTALL}" ]; then
+ GOPATH=${S}:${STAGING_LIBDIR}/${TARGET_SYS}/go go install -v ${GO_INSTALL}
+ fi
+}
+
+do_go_install() {
+ rm -rf ${WORKDIR}/staging
+ install -d ${WORKDIR}/staging${GOROOT_FINAL} ${D}${GOROOT_FINAL}
+ tar -C ${S} -cf - . | tar -C ${WORKDIR}/staging${GOROOT_FINAL} -xpvf -
+
+ find ${WORKDIR}/staging${GOROOT_FINAL} \( \
+ -name \*.indirectionsymlink -o \
+ -name .git\* -o \
+ -name .hg -o \
+ -name .svn -o \
+ -name .pc\* -o \
+ -name patches\* \
+ \) -print0 | \
+ xargs -r0 rm -rf
+
+ tar -C ${WORKDIR}/staging${GOROOT_FINAL} -cf - . | \
+ tar -C ${D}${GOROOT_FINAL} -xpvf -
+
+ chown -R root:root "${D}${GOROOT_FINAL}"
+
+ if [ -e "${D}${GOBIN_FINAL}" ]; then
+ install -d -m 0755 "${D}${bindir}"
+ find "${D}${GOBIN_FINAL}" ! -type d -print0 | xargs -r0 mv --target-directory="${D}${bindir}"
+ rmdir -p "${D}${GOBIN_FINAL}" || true
+ fi
+}
+
+do_compile() {
+ do_go_compile
+}
+
+do_install() {
+ do_go_install
+}
diff --git a/import-layers/yocto-poky/meta/classes/goarch.bbclass b/import-layers/yocto-poky/meta/classes/goarch.bbclass
new file mode 100644
index 000000000..12df88f8c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/goarch.bbclass
@@ -0,0 +1,53 @@
+BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS', True), d)}"
+BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH', True), d)}"
+BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
+HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS', True), d)}"
+HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH', True), d)}"
+HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH', True), d.getVar('TUNE_FEATURES', True), d)}"
+HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
+TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS', True), d)}"
+TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH', True), d)}"
+TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True), d)}"
+TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
+GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE',True) == d.getVar('HOST_GOTUPLE',True)]}"
+
+def go_map_arch(a, d):
+ import re
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('mips64el*', a):
+ return 'mips64le'
+ elif re.match('mips64*', a):
+ return 'mips64'
+ elif re.match('mipsel*', a):
+ return 'mipsle'
+ elif re.match('mips*', a):
+ return 'mips'
+ elif re.match('p(pc|owerpc)(64)', a):
+ return 'ppc64'
+ elif re.match('p(pc|owerpc)(64el)', a):
+ return 'ppc64le'
+ else:
+ raise bb.parse.SkipPackage("Unsupported CPU architecture: %s" % a)
+
+def go_map_arm(a, f, d):
+ import re
+ if re.match('arm.*', a):
+ if 'armv7' in f:
+ return '7'
+ elif 'armv6' in f:
+ return '6'
+ return ''
+
+def go_map_os(o, d):
+ if o.startswith('linux'):
+ return 'linux'
+ return o
+
+
diff --git a/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass b/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
index 37389cbc8..b6160b88b 100644
--- a/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
@@ -17,7 +17,7 @@ UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspe
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native"
+DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
# Even though introspection is disabled on -native, gobject-introspection package is still
# needed for m4 macros.
diff --git a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
index 17417ba5d..df7fe18a7 100644
--- a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
+++ b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
@@ -40,13 +40,15 @@ efi_populate() {
install -d ${DEST}${EFIDIR}
- GRUB_IMAGE="bootia32.efi"
+ GRUB_IMAGE="grub-efi-bootia32.efi"
+ DEST_IMAGE="bootia32.efi"
if [ "${TARGET_ARCH}" = "x86_64" ]; then
- GRUB_IMAGE="bootx64.efi"
+ GRUB_IMAGE="grub-efi-bootx64.efi"
+ DEST_IMAGE="bootx64.efi"
fi
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}/${DEST_IMAGE}
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" >${DEST}/startup.nsh
+ printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
}
@@ -72,14 +74,14 @@ efi_hddimg_populate() {
python build_efi_cfg() {
import sys
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- gfxserial = d.getVar('GRUB_GFXSERIAL', True) or ""
+ gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
- labels = d.getVar('LABELS', True)
+ labels = d.getVar('LABELS')
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
@@ -88,7 +90,7 @@ python build_efi_cfg() {
bb.debug(1, "No labels, nothing to do")
return
- cfile = d.getVar('GRUB_CFG', True)
+ cfile = d.getVar('GRUB_CFG')
if not cfile:
bb.fatal('Unable to read GRUB_CFG')
@@ -99,39 +101,38 @@ python build_efi_cfg() {
cfgfile.write('# Automatically created by OE\n')
- opts = d.getVar('GRUB_OPTS', True)
+ opts = d.getVar('GRUB_OPTS')
if opts:
for opt in opts.split(';'):
cfgfile.write('%s\n' % opt)
cfgfile.write('default=%s\n' % (labels.split()[0]))
- timeout = d.getVar('GRUB_TIMEOUT', True)
+ timeout = d.getVar('GRUB_TIMEOUT')
if timeout:
cfgfile.write('timeout=%s\n' % timeout)
else:
cfgfile.write('timeout=50\n')
- root = d.getVar('GRUB_ROOT', True)
+ root = d.getVar('GRUB_ROOT')
if not root:
bb.fatal('GRUB_ROOT not defined')
if gfxserial == "1":
btypes = [ [ " graphics console", "" ],
- [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
+ [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
else:
btypes = [ [ "", "" ] ]
for label in labels.split():
localdata = d.createCopy()
- overrides = localdata.getVar('OVERRIDES', True)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
bb.fatal('OVERRIDES not defined')
for btype in btypes:
localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
lb = label
@@ -141,8 +142,8 @@ python build_efi_cfg() {
cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
if append:
append = replace_rootfs_uuid(d, append)
diff --git a/import-layers/yocto-poky/meta/classes/gsettings.bbclass b/import-layers/yocto-poky/meta/classes/gsettings.bbclass
index dec5abc02..eae3dc799 100644
--- a/import-layers/yocto-poky/meta/classes/gsettings.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gsettings.bbclass
@@ -7,31 +7,32 @@
# TODO use a trigger so that this runs once per package operation run
-DEPENDS += "glib-2.0-native"
RDEPENDS_${PN} += "glib-2.0-utils"
FILES_${PN} += "${datadir}/glib-2.0/schemas"
+PACKAGE_WRITE_DEPS += "glib-2.0-native"
+
gsettings_postinstrm () {
glib-compile-schemas $D${datadir}/glib-2.0/schemas
}
python populate_packages_append () {
- pkg = d.getVar('PN', True)
+ pkg = d.getVar('PN')
bb.note("adding gsettings postinst scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gsettings_postinstrm', True)
+ postinst += d.getVar('gsettings_postinstrm')
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.note("adding gsettings postrm scripts to %s" % pkg)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gsettings_postinstrm', True)
+ postrm += d.getVar('gsettings_postinstrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
index 297eac63b..0ae2729c0 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
@@ -50,7 +50,7 @@ export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
-if test -d ".libs"; then
+if [ -d ".libs" ]; then
$qemu_binary ".libs/\$@"
else
$qemu_binary "\$@"
diff --git a/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass b/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass
index 0f1052b08..d87167aec 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-icon-cache.bbclass
@@ -2,6 +2,8 @@ FILES_${PN} += "${datadir}/icons/hicolor"
DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-icon-utils-native"
+PACKAGE_WRITE_DEPS += "gtk-icon-utils-native gdk-pixbuf-native"
+
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
$INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
@@ -35,11 +37,11 @@ fi
}
python populate_packages_append () {
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
- icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
+ icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
if not os.path.exists(icon_dir):
continue
@@ -49,16 +51,16 @@ python populate_packages_append () {
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gtk_icon_cache_postinst', True)
+ postinst += d.getVar('gtk_icon_cache_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gtk_icon_cache_postrm', True)
+ postrm += d.getVar('gtk_icon_cache_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass b/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
index ebbc9dea8..3d82dbe9e 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
@@ -2,7 +2,7 @@
#
# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
-DEPENDS =+ "qemu-native"
+PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
@@ -61,21 +61,21 @@ fi
}
python populate_packages_append () {
- gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split()
+ gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
for pkg in gtkimmodules_pkgs:
bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('gtk_immodule_cache_postinst', True)
+ postinst += d.getVar('gtk_immodule_cache_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('gtk_immodule_cache_postrm', True)
+ postrm += d.getVar('gtk_immodule_cache_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/import-layers/yocto-poky/meta/classes/gummiboot.bbclass b/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
deleted file mode 100644
index 4f2dea6c3..000000000
--- a/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (C) 2014 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-# gummiboot.bbclass - equivalent of grub-efi.bbclass
-# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
-# (images built by image-live.bbclass or image-vm.bbclass)
-
-do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy"
-do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy"
-
-EFIDIR = "/EFI/BOOT"
-
-GUMMIBOOT_CFG ?= "${S}/loader.conf"
-GUMMIBOOT_ENTRIES ?= ""
-GUMMIBOOT_TIMEOUT ?= "10"
-
-# Need UUID utility code.
-inherit fs-uuid
-
-efi_populate() {
- DEST=$1
-
- EFI_IMAGE="gummibootia32.efi"
- DEST_EFI_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- EFI_IMAGE="gummibootx64.efi"
- DEST_EFI_IMAGE="bootx64.efi"
- fi
-
- install -d ${DEST}${EFIDIR}
- # gummiboot requires these paths for configuration files
- # they are not customizable so no point in new vars
- install -d ${DEST}/loader
- install -d ${DEST}/loader/entries
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
- install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf
- for i in ${GUMMIBOOT_ENTRIES}; do
- install -m 0644 ${i} ${DEST}/loader/entries
- done
-}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/vmlinuz ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
-
-python build_efi_cfg() {
- s = d.getVar("S", True)
- labels = d.getVar('LABELS', True)
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('GUMMIBOOT_CFG', True)
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- bb.fatal('Unable to open %s' % cfile)
-
- cfgfile.write('# Automatically created by OE\n')
- cfgfile.write('default %s\n' % (labels.split()[0]))
- timeout = d.getVar('GUMMIBOOT_TIMEOUT', True)
- if timeout:
- cfgfile.write('timeout %s\n' % timeout)
- else:
- cfgfile.write('timeout 10\n')
- cfgfile.close()
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES', True)
- if not overrides:
- bb.fatal('OVERRIDES not defined')
-
- entryfile = "%s/%s.conf" % (s, label)
- d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile)
- try:
- entrycfg = open(entryfile, "w")
- except OSError:
- bb.fatal('Unable to open %s' % entryfile)
- localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
-
- entrycfg.write('title %s\n' % label)
- entrycfg.write('linux /vmlinuz\n')
-
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
-
- if initrd:
- entrycfg.write('initrd /initrd\n')
- lb = label
- if label == "install":
- lb = "install-efi"
- entrycfg.write('options LABEL=%s ' % lb)
- if append:
- append = replace_rootfs_uuid(d, append)
- entrycfg.write('%s' % append)
- entrycfg.write('\n')
- entrycfg.close()
-}
diff --git a/import-layers/yocto-poky/meta/classes/gzipnative.bbclass b/import-layers/yocto-poky/meta/classes/gzipnative.bbclass
deleted file mode 100644
index 326cbbb6f..000000000
--- a/import-layers/yocto-poky/meta/classes/gzipnative.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-EXTRANATIVEPATH += "pigz-native gzip-native"
-DEPENDS += "gzip-native"
-
-# tar may get run by do_unpack or do_populate_lic which could call gzip
-do_unpack[depends] += "gzip-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/icecc.bbclass b/import-layers/yocto-poky/meta/classes/icecc.bbclass
index c57257151..77bf61133 100644
--- a/import-layers/yocto-poky/meta/classes/icecc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/icecc.bbclass
@@ -101,7 +101,7 @@ def use_icecc(bb,d):
if icecc_is_allarch(bb, d):
return "no"
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
system_class_blacklist = []
user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL', False) or "none").split()
@@ -140,7 +140,7 @@ def use_icecc(bb,d):
return "yes"
def icecc_is_allarch(bb, d):
- return d.getVar("PACKAGE_ARCH", True) == "all" or bb.data.inherits_class('allarch', d)
+ return d.getVar("PACKAGE_ARCH") == "all" or bb.data.inherits_class('allarch', d)
def icecc_is_kernel(bb, d):
return \
diff --git a/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass b/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
index 3003f5d25..213fb9cf9 100644
--- a/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
@@ -12,14 +12,17 @@
# Desired variables to display
IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
+# Desired location of the output file in the image.
+IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/build"
+
# From buildhistory.bbclass
def image_buildinfo_outputvars(vars, listvars, d):
vars = vars.split()
listvars = listvars.split()
ret = ""
for var in vars:
- value = d.getVar(var, True) or ""
- if (d.getVarFlag(var, 'type', True) == "list"):
+ value = d.getVar(var) or ""
+ if (d.getVarFlag(var, 'type') == "list"):
value = oe.utils.squashspaces(value)
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
@@ -28,7 +31,9 @@ def image_buildinfo_outputvars(vars, listvars, d):
def get_layer_git_status(path):
import subprocess
try:
- subprocess.check_output("cd %s; PSEUDO_UNLOAD=1 git diff --quiet --no-ext-diff" % path,
+ subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
+ git diff --quiet --no-ext-diff
+ git diff --quiet --no-ext-diff --cached""" % path,
shell=True,
stderr=subprocess.STDOUT)
return ""
@@ -40,7 +45,7 @@ def get_layer_git_status(path):
# Returns layer revisions along with their respective status
def get_layer_revs(d):
- layers = (d.getVar("BBLAYERS", True) or "").split()
+ layers = (d.getVar("BBLAYERS") or "").split()
medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None), \
@@ -50,16 +55,16 @@ def get_layer_revs(d):
def buildinfo_target(d):
# Get context
- if d.getVar('BB_WORKERCONTEXT', True) != '1':
+ if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
# Single and list variables to be read
- vars = (d.getVar("IMAGE_BUILDINFO_VARS", True) or "")
- listvars = (d.getVar("IMAGE_BUILDINFO_LVARS", True) or "")
+ vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
+ listvars = (d.getVar("IMAGE_BUILDINFO_LVARS") or "")
return image_buildinfo_outputvars(vars, listvars, d)
# Write build information to target filesystem
python buildinfo () {
- with open(d.expand('${IMAGE_ROOTFS}${sysconfdir}/build'), 'w') as build:
+ with open(d.expand('${IMAGE_ROOTFS}${IMAGE_BUILDINFO_FILE}'), 'w') as build:
build.writelines((
'''-----------------------
Build Configuration: |
diff --git a/import-layers/yocto-poky/meta/classes/image-container.bbclass b/import-layers/yocto-poky/meta/classes/image-container.bbclass
new file mode 100644
index 000000000..f002858bd
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-container.bbclass
@@ -0,0 +1,21 @@
+ROOTFS_BOOTSTRAP_INSTALL = ""
+IMAGE_TYPES_MASKED += "container"
+IMAGE_TYPEDEP_container = "tar.bz2"
+
+python __anonymous() {
+ if "container" in d.getVar("IMAGE_FSTYPES") and \
+ d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
+ "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
+ msg = '"container" is in IMAGE_FSTYPES, but ' \
+ 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
+ 'Unless a particular kernel is needed, using linux-dummy will ' \
+ 'prevent a kernel from being built, which can reduce ' \
+ 'build times. If you don\'t want to use "linux-dummy", set ' \
+ '"IMAGE_CONTAINER_NO_DUMMY" to "1".'
+
+ # Raising skip recipe was Paul's clever idea. It causes the error to
+ # only be shown for the recipes actually requested to build, rather
+ # than bb.fatal which would appear for all recipes inheriting the
+ # class.
+ raise bb.parse.SkipRecipe(msg)
+}
diff --git a/import-layers/yocto-poky/meta/classes/image-live.bbclass b/import-layers/yocto-poky/meta/classes/image-live.bbclass
index 4a634dca9..a3d1b4e56 100644
--- a/import-layers/yocto-poky/meta/classes/image-live.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-live.bbclass
@@ -51,8 +51,8 @@ IMAGE_TYPEDEP_hddimg = "ext4"
IMAGE_TYPES_MASKED += "live hddimg iso"
python() {
- image_b = d.getVar('IMAGE_BASENAME', True)
- initrd_i = d.getVar('INITRD_IMAGE_LIVE', True)
+ image_b = d.getVar('IMAGE_BASENAME')
+ initrd_i = d.getVar('INITRD_IMAGE_LIVE')
if image_b == initrd_i:
bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
@@ -264,9 +264,9 @@ build_hddimg() {
python do_bootimg() {
set_live_vm_vars(d, 'LIVE')
- if d.getVar("PCBIOS", True) == "1":
+ if d.getVar("PCBIOS") == "1":
bb.build.exec_func('build_syslinux_cfg', d)
- if d.getVar("EFI", True) == "1":
+ if d.getVar("EFI") == "1":
bb.build.exec_func('build_efi_cfg', d)
bb.build.exec_func('build_hddimg', d)
bb.build.exec_func('build_iso', d)
diff --git a/import-layers/yocto-poky/meta/classes/image-vm.bbclass b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
index 2f35d6b4d..98bd92000 100644
--- a/import-layers/yocto-poky/meta/classes/image-vm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
@@ -93,7 +93,7 @@ build_boot_dd() {
parted $IMAGE print
- awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
+ awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | sed 's/\(..\)\(..\)\(..\)\(..\)/\\x\4\\x\3\\x\2\\x\1/')\" }" | \
dd of=$IMAGE bs=1 seek=440 conv=notrunc
OFFSET=`expr $END2 / 512`
@@ -112,9 +112,9 @@ build_boot_dd() {
python do_bootdirectdisk() {
validate_disk_signature(d)
set_live_vm_vars(d, 'VM')
- if d.getVar("PCBIOS", True) == "1":
+ if d.getVar("PCBIOS") == "1":
bb.build.exec_func('build_syslinux_cfg', d)
- if d.getVar("EFI", True) == "1":
+ if d.getVar("EFI") == "1":
bb.build.exec_func('build_efi_cfg', d)
bb.build.exec_func('build_boot_dd', d)
}
@@ -132,7 +132,7 @@ def generate_disk_signature():
def validate_disk_signature(d):
import re
- disk_signature = d.getVar("DISK_SIGNATURE", True)
+ disk_signature = d.getVar("DISK_SIGNATURE")
if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
@@ -158,11 +158,11 @@ create_qcow2_image () {
}
python do_vmimg() {
- if 'vmdk' in d.getVar('IMAGE_FSTYPES', True):
+ if 'vmdk' in d.getVar('IMAGE_FSTYPES'):
bb.build.exec_func('create_vmdk_image', d)
- if 'vdi' in d.getVar('IMAGE_FSTYPES', True):
+ if 'vdi' in d.getVar('IMAGE_FSTYPES'):
bb.build.exec_func('create_vdi_image', d)
- if 'qcow2' in d.getVar('IMAGE_FSTYPES', True):
+ if 'qcow2' in d.getVar('IMAGE_FSTYPES'):
bb.build.exec_func('create_qcow2_image', d)
}
diff --git a/import-layers/yocto-poky/meta/classes/image.bbclass b/import-layers/yocto-poky/meta/classes/image.bbclass
index a9ab2fac1..4bcfb87c9 100644
--- a/import-layers/yocto-poky/meta/classes/image.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image.bbclass
@@ -1,19 +1,17 @@
inherit rootfs_${IMAGE_PKGTYPE}
-# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk
+# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
# in the non-Linux SDK_OS case, such as mingw32
-SDKEXTCLASS ?= "${@['populate_sdk', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS", True)]}"
+SDKEXTCLASS ?= "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
inherit ${SDKEXTCLASS}
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
-inherit gzipnative
-
LICENSE = "MIT"
PACKAGES = ""
-DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross"
+DEPENDS += "${MLPREFIX}qemuwrapper-cross depmodwrapper-cross"
RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
@@ -31,7 +29,7 @@ IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password
IMAGE_GEN_DEBUGFS ?= "0"
# rootfs bootstrap install
-ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
+ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
# These packages will be removed from a read-only rootfs after all other
# packages have been installed
@@ -51,7 +49,7 @@ FEATURE_PACKAGES_splash = "${SPLASH}"
IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
def check_image_features(d):
- valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split()
+ valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
for var in d:
if var.startswith("PACKAGE_GROUP_"):
@@ -133,7 +131,7 @@ def build_live(d):
if bb.utils.contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg
d.setVar('NOISO', bb.utils.contains('IMAGE_FSTYPES', "iso", "0", "1", d))
d.setVar('NOHDD', bb.utils.contains('IMAGE_FSTYPES', "hddimg", "0", "1", d))
- if d.getVar('NOISO', True) == "0" or d.getVar('NOHDD', True) == "0":
+ if d.getVar('NOISO') == "0" or d.getVar('NOHDD') == "0":
return "image-live"
return ""
return "image-live"
@@ -144,37 +142,39 @@ inherit ${IMAGE_TYPE_live}
IMAGE_TYPE_vm = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2", "hdddirect"], "image-vm", "", d)}'
inherit ${IMAGE_TYPE_vm}
+IMAGE_TYPE_container = '${@bb.utils.contains("IMAGE_FSTYPES", "container", "image-container", "", d)}'
+inherit ${IMAGE_TYPE_container}
+
+IMAGE_TYPE_wic = "image_types_wic"
+inherit ${IMAGE_TYPE_wic}
+
python () {
deps = " " + imagetypes_getdepends(d)
d.appendVarFlag('do_rootfs', 'depends', deps)
deps = ""
- for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
deps += " %s:do_populate_sysroot" % dep
- d.appendVarFlag('do_build', 'depends', deps)
+ d.appendVarFlag('do_image_complete', 'depends', deps)
#process IMAGE_FEATURES, we must do this before runtime_mapping_rename
#Check for replaces image features
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
remain_features = features.copy()
for feature in features:
- replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split())
+ replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
remain_features -= replaces
#Check for conflict image features
for feature in remain_features:
- conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split())
+ conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
temp = conflicts & remain_features
if temp:
- bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
+ bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
check_image_features(d)
- initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
- if initramfs_image != "":
- d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True))
- d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_image_complete" % initramfs_image)
}
IMAGE_CLASSES += "image_types"
@@ -185,7 +185,7 @@ IMAGE_POSTPROCESS_COMMAND ?= ""
# some default locales
IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
-LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
+LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
@@ -199,6 +199,14 @@ PACKAGE_EXCLUDE[type] = "list"
fakeroot python do_rootfs () {
from oe.rootfs import create_rootfs
from oe.manifest import create_manifest
+ import logging
+
+ logger = d.getVar('BB_TASK_LOGGER', False)
+ if logger:
+ logcatcher = bb.utils.LogCatcher()
+ logger.addHandler(logcatcher)
+ else:
+ logcatcher = None
# NOTE: if you add, remove or significantly refactor the stages of this
# process then you should recalculate the weightings here. This is quite
@@ -212,20 +220,20 @@ fakeroot python do_rootfs () {
progress_reporter.next_stage()
# Handle package exclusions
- excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
- inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
- inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
+ excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
+ inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
+ inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
for pkg in excl_pkgs:
if pkg in inst_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
inst_pkgs.remove(pkg)
if pkg in inst_attempt_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
inst_attempt_pkgs.remove(pkg)
d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
@@ -235,7 +243,7 @@ fakeroot python do_rootfs () {
# We have to delay the runtime_mapping_rename until just before rootfs runs
# otherwise, the multilib renaming could step in and squash any fixups that
# may have occurred.
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
@@ -246,19 +254,19 @@ fakeroot python do_rootfs () {
progress_reporter.next_stage()
# generate rootfs
- create_rootfs(d, progress_reporter=progress_reporter)
+ create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
progress_reporter.finish()
}
do_rootfs[dirs] = "${TOPDIR}"
do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
do_rootfs[umask] = "022"
-addtask rootfs before do_build
+addtask rootfs before do_build after do_prepare_recipe_sysroot
fakeroot python do_image () {
from oe.utils import execute_pre_post_process
- pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND", True)
+ pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
execute_pre_post_process(d, pre_process_cmds)
}
@@ -269,7 +277,7 @@ addtask do_image after do_rootfs before do_build
fakeroot python do_image_complete () {
from oe.utils import execute_pre_post_process
- post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
+ post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
execute_pre_post_process(d, post_process_cmds)
}
@@ -292,7 +300,7 @@ addtask do_image_complete after do_image before do_build
fakeroot python do_image_qa () {
from oe.utils import ImageQAFailed
- qa_cmds = (d.getVar('IMAGE_QA_COMMANDS', True) or '').split()
+ qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
qamsg = ""
for cmd in qa_cmds:
@@ -307,40 +315,17 @@ fakeroot python do_image_qa () {
qamsg = qamsg + '\n'
if qamsg:
- imgname = d.getVar('IMAGE_NAME', True)
+ imgname = d.getVar('IMAGE_NAME')
bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
}
addtask do_image_qa after do_image_complete before do_build
-#
-# Write environment variables used by wic
-# to tmp/sysroots/<machine>/imgdata/<image>.env
-#
-python do_rootfs_wicenv () {
- wicvars = d.getVar('WICVARS', True)
- if not wicvars:
- return
-
- stdir = d.getVar('STAGING_DIR_TARGET', True)
- outdir = os.path.join(stdir, 'imgdata')
- bb.utils.mkdirhier(outdir)
- basename = d.getVar('IMAGE_BASENAME', True)
- with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
- for var in wicvars.split():
- value = d.getVar(var, True)
- if value:
- envf.write('%s="%s"\n' % (var, value.strip()))
-}
-addtask do_rootfs_wicenv after do_image before do_image_wic
-do_rootfs_wicenv[vardeps] += "${WICVARS}"
-do_rootfs_wicenv[prefuncs] = 'set_image_size'
-
def setup_debugfs_variables(d):
d.appendVar('IMAGE_ROOTFS', '-dbg')
d.appendVar('IMAGE_LINK_NAME', '-dbg')
d.appendVar('IMAGE_NAME','-dbg')
d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
- debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True)
+ debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
if debugfs_image_fstypes:
d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
@@ -358,7 +343,7 @@ python () {
#
# Without de-duplication, gen_conversion_cmds() below
# would create the same compression command multiple times.
- ctypes = set(d.getVar('CONVERSIONTYPES', True).split())
+ ctypes = set(d.getVar('CONVERSIONTYPES').split())
old_overrides = d.getVar('OVERRIDES', False)
def _image_base_type(type):
@@ -375,11 +360,11 @@ python () {
return basetype
basetypes = {}
- alltypes = d.getVar('IMAGE_FSTYPES', True).split()
+ alltypes = d.getVar('IMAGE_FSTYPES').split()
typedeps = {}
- if d.getVar('IMAGE_GEN_DEBUGFS', True) == "1":
- debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True).split()
+ if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
+ debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
for t in debugfs_fstypes:
alltypes.append("debugfs_" + t)
@@ -394,7 +379,7 @@ python () {
if t.startswith("debugfs_"):
t = t[8:]
debug = "debugfs_"
- deps = (d.getVar('IMAGE_TYPEDEP_' + t, True) or "").split()
+ deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
vardeps.add('IMAGE_TYPEDEP_' + t)
if baset not in typedeps:
typedeps[baset] = set()
@@ -414,7 +399,7 @@ python () {
d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
- maskedtypes = (d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
+ maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
for t in basetypes:
@@ -433,16 +418,17 @@ python () {
debug = "setup_debugfs "
realt = t[8:]
localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
- bb.data.update_data(localdata)
localdata.setVar('type', realt)
# Delete DATETIME so we don't expand any references to it now
# This means the task's hash can be stable rather than having hardcoded
# date/time values. It will get expanded at execution time.
# Similarly TMPDIR since otherwise we see QA stamp comparision problems
+ # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
+ localdata.setVar('PV', d.getVar('PV'))
localdata.delVar('DATETIME')
localdata.delVar('TMPDIR')
- image_cmd = localdata.getVar("IMAGE_CMD", True)
+ image_cmd = localdata.getVar("IMAGE_CMD")
vardeps.add('IMAGE_CMD_' + realt)
if image_cmd:
cmds.append("\t" + image_cmd)
@@ -464,7 +450,7 @@ python () {
# Create input image first.
gen_conversion_cmds(type)
localdata.setVar('type', type)
- cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype, True) or localdata.getVar("COMPRESS_CMD_" + ctype, True))
+ cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
if cmd not in cmds:
cmds.append(cmd)
vardeps.add('CONVERSION_CMD_' + ctype)
@@ -515,17 +501,17 @@ python () {
def get_rootfs_size(d):
import subprocess
- rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
- overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR', True))
- rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE', True))
- rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
- rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
- image_fstypes = d.getVar('IMAGE_FSTYPES', True) or ''
- initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES', True) or ''
- initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE', True)
+ rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
+ overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
+ rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
+ rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
+ rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
+ image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
+ initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
+ initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
output = subprocess.check_output(['du', '-ks',
- d.getVar('IMAGE_ROOTFS', True)])
+ d.getVar('IMAGE_ROOTFS')])
size_kb = int(output.split()[0])
base_size = size_kb * overhead_factor
base_size = max(base_size, rootfs_req_size) + rootfs_extra_space
@@ -541,7 +527,7 @@ def get_rootfs_size(d):
# Do not check image size of the debugfs image. This is not supposed
# to be deployed, etc. so it doesn't make sense to limit the size
# of the debug.
- if (d.getVar('IMAGE_BUILDING_DEBUGFS', True) or "") == "true":
+ if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
return base_size
# Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
@@ -572,13 +558,13 @@ python set_image_size () {
#
python create_symlinks() {
- deploy_dir = d.getVar('IMGDEPLOYDIR', True)
- img_name = d.getVar('IMAGE_NAME', True)
- link_name = d.getVar('IMAGE_LINK_NAME', True)
- manifest_name = d.getVar('IMAGE_MANIFEST', True)
- taskname = d.getVar("BB_CURRENTTASK", True)
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ img_name = d.getVar('IMAGE_NAME')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ manifest_name = d.getVar('IMAGE_MANIFEST')
+ taskname = d.getVar("BB_CURRENTTASK")
subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
- imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix', True) or d.expand("${IMAGE_NAME_SUFFIX}.")
+ imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
if not link_name:
return
@@ -604,19 +590,11 @@ do_patch[noexec] = "1"
do_configure[noexec] = "1"
do_compile[noexec] = "1"
do_install[noexec] = "1"
-do_populate_sysroot[noexec] = "1"
+deltask do_populate_sysroot
do_package[noexec] = "1"
-do_package_qa[noexec] = "1"
+deltask do_package_qa
do_packagedata[noexec] = "1"
do_package_write_ipk[noexec] = "1"
do_package_write_deb[noexec] = "1"
do_package_write_rpm[noexec] = "1"
-# Allow the kernel to be repacked with the initramfs and boot image file as a single file
-do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
-do_bundle_initramfs[nostamp] = "1"
-do_bundle_initramfs[noexec] = "1"
-do_bundle_initramfs () {
- :
-}
-addtask bundle_initramfs after do_image_complete
diff --git a/import-layers/yocto-poky/meta/classes/image_types.bbclass b/import-layers/yocto-poky/meta/classes/image_types.bbclass
index 3bfa60ba2..8db18ac5a 100644
--- a/import-layers/yocto-poky/meta/classes/image_types.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types.bbclass
@@ -23,22 +23,22 @@ def imagetypes_getdepends(d):
types = typestring.split(".")
return types[0], types[1:]
- fstypes = set((d.getVar('IMAGE_FSTYPES', True) or "").split())
- fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS', True) or "").split())
+ fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
+ fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
deps = set()
for typestring in fstypes:
basetype, resttypes = split_types(typestring)
- adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype) , deps)
- for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split():
+ for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
base, rest = split_types(typedepends)
- adddep(d.getVar('IMAGE_DEPENDS_%s' % base, True) , deps)
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % base) , deps)
resttypes += rest
for ctype in resttypes:
- adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype, True), deps)
- adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
+ adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
+ adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
# Sort the set so that ordering is consistant
return " ".join(sorted(deps))
@@ -74,6 +74,8 @@ oe_mkext234fs () {
# Create a sparse image block
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
+ # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
+ fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
}
IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
@@ -82,12 +84,13 @@ IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
MIN_BTRFS_SIZE ?= "16384"
IMAGE_CMD_btrfs () {
- if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${ROOTFS_SIZE} bs=1024
- mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
- else
- bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
+ size=${ROOTFS_SIZE}
+ if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
+ size=${MIN_BTRFS_SIZE}
+ bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
fi
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${size} bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
}
IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
@@ -192,95 +195,9 @@ IMAGE_CMD_ubi () {
IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
-WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
-WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
-WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
-WKS_FULL_PATH = "${@wks_search('${WKS_FILES}'.split(), '${WKS_SEARCH_PATH}') or ''}"
-
-def wks_search(files, search_path):
- for f in files:
- if os.path.isabs(f):
- if os.path.exists(f):
- return f
- else:
- searched = bb.utils.which(search_path, f)
- if searched:
- return searched
-
-WIC_CREATE_EXTRA_ARGS ?= ""
-
-IMAGE_CMD_wic () {
- out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
- wks="${WKS_FULL_PATH}"
- if [ -z "$wks" ]; then
- bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
- fi
-
- BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR_TARGET}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
- mv "$out/build/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
- rm -rf "$out/"
-}
-IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES"
-
-# Rebuild when the wks file or vars in WICVARS change
-USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
-WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
-do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
-
-python () {
- if d.getVar('USING_WIC', True) and 'do_bootimg' in d:
- bb.build.addtask('do_image_wic', '', 'do_bootimg', d)
-}
-
-python do_write_wks_template () {
- """Write out expanded template contents to WKS_FULL_PATH."""
- import re
-
- template_body = d.getVar('_WKS_TEMPLATE', True)
-
- # Remove any remnant variable references left behind by the expansion
- # due to undefined variables
- expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
- while True:
- new_body = re.sub(expand_var_regexp, '', template_body)
- if new_body == template_body:
- break
- else:
- template_body = new_body
-
- wks_file = d.getVar('WKS_FULL_PATH', True)
- with open(wks_file, 'w') as f:
- f.write(template_body)
-}
-
-python () {
- if d.getVar('USING_WIC', True):
- wks_file_u = d.getVar('WKS_FULL_PATH', False)
- wks_file = d.expand(wks_file_u)
- base, ext = os.path.splitext(wks_file)
- if ext == '.in' and os.path.exists(wks_file):
- wks_out_file = os.path.join(d.getVar('WORKDIR', True), os.path.basename(base))
- d.setVar('WKS_FULL_PATH', wks_out_file)
- d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
- d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
-
- try:
- with open(wks_file, 'r') as f:
- body = f.read()
- except (IOError, OSError) as exc:
- pass
- else:
- # Previously, I used expandWithRefs to get the dependency list
- # and add it to WICVARS, but there's no point re-parsing the
- # file in process_wks_template as well, so just put it in
- # a variable and let the metadata deal with the deps.
- d.setVar('_WKS_TEMPLATE', body)
- bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
-}
-
EXTRA_IMAGECMD = ""
-inherit siteinfo
+inherit siteinfo kernel-arch
JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
@@ -328,6 +245,7 @@ IMAGE_TYPES = " \
hdddirect \
elf \
wic wic.gz wic.bz2 wic.lzma \
+ container \
"
# Compression is a special case of conversion. The old variable
@@ -336,12 +254,14 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap ${COMPRESSIONTYPES}"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot ${COMPRESSIONTYPES}"
CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_lz4 = "lz4 -9 -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_lz4_legacy = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
@@ -351,14 +271,17 @@ CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}
CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
+CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
CONVERSION_DEPENDS_lzma = "xz-native"
-CONVERSION_DEPENDS_gz = ""
+CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
CONVERSION_DEPENDS_xz = "xz-native"
CONVERSION_DEPENDS_lz4 = "lz4-native"
+CONVERSION_DEPENDS_lzo = "lzop-native"
CONVERSION_DEPENDS_zip = "zip-native"
CONVERSION_DEPENDS_sum = "mtd-utils-native"
CONVERSION_DEPENDS_bmap = "bmap-tools-native"
+CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
@@ -371,7 +294,3 @@ IMAGE_EXTENSION_live = "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
-
-# The WICVARS variable is used to define list of bitbake variables used in wic code
-# variables from this list is written to <image>.env file
-WICVARS ?= "BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE HDDDIR IMAGE_BASENAME IMAGE_BOOT_FILES IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD ISODIR MACHINE_ARCH ROOTFS_SIZE STAGING_DATADIR STAGING_DIR_NATIVE STAGING_LIBDIR TARGET_SYS"
diff --git a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
deleted file mode 100644
index 933fa4d9c..000000000
--- a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
+++ /dev/null
@@ -1,23 +0,0 @@
-inherit image_types kernel-arch
-
-oe_mkimage () {
- mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
- -d ${IMGDEPLOYDIR}/$1 ${IMGDEPLOYDIR}/$1.u-boot
-}
-
-CONVERSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
-
-CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
-
-CONVERSION_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_gz.u-boot = "${CONVERSION_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip"
-
-CONVERSION_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_bz2.u-boot = "${CONVERSION_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2"
-
-CONVERSION_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
-CONVERSION_CMD_lzma.u-boot = "${CONVERSION_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma"
-
-IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot cpio.gz.u-boot"
-
diff --git a/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass b/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
new file mode 100644
index 000000000..68f251cfd
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
@@ -0,0 +1,117 @@
+# The WICVARS variable is used to define list of bitbake variables used in wic code
+# variables from this list is written to <image>.env file
+WICVARS ?= "\
+ BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
+ IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
+ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS TRANSLATED_TARGET_ARCH"
+
+WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
+WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
+WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
+WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
+
+def wks_search(files, search_path):
+ for f in files:
+ if os.path.isabs(f):
+ if os.path.exists(f):
+ return f
+ else:
+ searched = bb.utils.which(search_path, f)
+ if searched:
+ return searched
+
+WIC_CREATE_EXTRA_ARGS ?= ""
+
+IMAGE_CMD_wic () {
+ out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
+ wks="${WKS_FULL_PATH}"
+ if [ -z "$wks" ]; then
+ bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
+ fi
+
+ BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
+ mv "$out/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
+ rm -rf "$out/"
+}
+IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+
+# Rebuild when the wks file or vars in WICVARS change
+USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
+WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
+do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
+do_image_wic[depends] += "wic-tools:do_populate_sysroot"
+WKS_FILE_DEPENDS ??= ''
+DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
+
+python do_write_wks_template () {
+ """Write out expanded template contents to WKS_FULL_PATH."""
+ import re
+
+ template_body = d.getVar('_WKS_TEMPLATE')
+
+ # Remove any remnant variable references left behind by the expansion
+ # due to undefined variables
+ expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
+ while True:
+ new_body = re.sub(expand_var_regexp, '', template_body)
+ if new_body == template_body:
+ break
+ else:
+ template_body = new_body
+
+ wks_file = d.getVar('WKS_FULL_PATH')
+ with open(wks_file, 'w') as f:
+ f.write(template_body)
+}
+
+python () {
+ if d.getVar('USING_WIC'):
+ wks_file_u = d.getVar('WKS_FULL_PATH', False)
+ wks_file = d.expand(wks_file_u)
+ base, ext = os.path.splitext(wks_file)
+ if ext == '.in' and os.path.exists(wks_file):
+ wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
+ d.setVar('WKS_FULL_PATH', wks_out_file)
+ d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
+ d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
+
+ # We need to re-parse each time the file changes, and bitbake
+ # needs to be told about that explicitly.
+ bb.parse.mark_dependency(d, wks_file)
+
+ try:
+ with open(wks_file, 'r') as f:
+ body = f.read()
+ except (IOError, OSError) as exc:
+ pass
+ else:
+ # Previously, I used expandWithRefs to get the dependency list
+ # and add it to WICVARS, but there's no point re-parsing the
+ # file in process_wks_template as well, so just put it in
+ # a variable and let the metadata deal with the deps.
+ d.setVar('_WKS_TEMPLATE', body)
+ bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
+}
+
+#
+# Write environment variables used by wic
+# to tmp/sysroots/<machine>/imgdata/<image>.env
+#
+python do_rootfs_wicenv () {
+ wicvars = d.getVar('WICVARS')
+ if not wicvars:
+ return
+
+ stdir = d.getVar('STAGING_DIR')
+ outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
+ bb.utils.mkdirhier(outdir)
+ basename = d.getVar('IMAGE_BASENAME')
+ with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
+ for var in wicvars.split():
+ value = d.getVar(var)
+ if value:
+ envf.write('%s="%s"\n' % (var, value.strip()))
+}
+addtask do_rootfs_wicenv after do_image before do_image_wic
+do_rootfs_wicenv[vardeps] += "${WICVARS}"
+do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/import-layers/yocto-poky/meta/classes/insane.bbclass b/import-layers/yocto-poky/meta/classes/insane.bbclass
index 7bbe8b63a..0c11c3658 100644
--- a/import-layers/yocto-poky/meta/classes/insane.bbclass
+++ b/import-layers/yocto-poky/meta/classes/insane.bbclass
@@ -30,7 +30,7 @@ QA_SANE = "True"
WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
textrel already-stripped incompatible-license files-invalid \
installed-vs-shipped compile-host-path install-host-path \
- pn-overrides infodir build-deps file-rdeps \
+ pn-overrides infodir build-deps \
unknown-configure-option symlink-to-sysroot multilib \
invalid-packageconfig host-user-contaminated \
"
@@ -38,7 +38,7 @@ ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
split-strip packages-list pkgv-undefined var-undefined \
version-going-backwards expanded-d invalid-chars \
- license-checksum dev-elf \
+ license-checksum dev-elf file-rdeps \
"
FAKEROOT_QA = "host-user-contaminated"
FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
@@ -138,6 +138,7 @@ def package_qa_get_machine_dict(d):
"microblaze": (189, 0, 0, False, 32),
"microblazeeb":(189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
+ "sh4": ( 42, 0, 0, True, 32),
},
"uclinux-uclibc" : {
"bfin": ( 106, 0, 0, True, 32),
@@ -173,12 +174,14 @@ def package_qa_get_machine_dict(d):
"linux-gnun32" : {
"mips64": ( 8, 0, 0, False, 32),
"mips64el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 32),
+ "mipsisa64r6el":( 8, 0, 0, True, 32),
},
}
# Add in any extra user supplied data which may come from a BSP layer, removing the
# need to always change this class directly
- extra_machdata = (d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS", True) or "").split()
+ extra_machdata = (d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS") or "").split()
for m in extra_machdata:
call = m + "(machdata, d)"
locs = { "machdata" : machdata, "d" : d}
@@ -193,23 +196,23 @@ def package_qa_clean_path(path, d, pkg=None):
TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
"""
if pkg:
- path = path.replace(os.path.join(d.getVar("PKGDEST", True), pkg), "/")
- return path.replace(d.getVar("TMPDIR", True), "/").replace("//", "/")
+ path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
+ return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
def package_qa_write_error(type, error, d):
- logfile = d.getVar('QA_LOGFILE', True)
+ logfile = d.getVar('QA_LOGFILE')
if logfile:
- p = d.getVar('P', True)
+ p = d.getVar('P')
with open(logfile, "a+") as f:
f.write("%s: %s [%s]\n" % (p, error, type))
def package_qa_handle_error(error_class, error_msg, d):
package_qa_write_error(error_class, error_msg, d)
- if error_class in (d.getVar("ERROR_QA", True) or "").split():
+ if error_class in (d.getVar("ERROR_QA") or "").split():
bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
d.setVar("QA_SANE", False)
return False
- elif error_class in (d.getVar("WARN_QA", True) or "").split():
+ elif error_class in (d.getVar("WARN_QA") or "").split():
bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
else:
bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
@@ -225,7 +228,7 @@ QAPATHTEST[libexec] = "package_qa_check_libexec"
def package_qa_check_libexec(path,name, d, elf, messages):
# Skip the case where the default is explicitly /usr/libexec
- libexec = d.getVar('libexecdir', True)
+ libexec = d.getVar('libexecdir')
if libexec == "/usr/libexec":
return True
@@ -246,7 +249,7 @@ def package_qa_check_rpath(file,name, d, elf, messages):
if os.path.islink(file):
return
- bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)]
+ bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
phdrs = elf.run_objdump("-p", d)
@@ -274,8 +277,8 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if os.path.islink(file):
return
- libdir = d.getVar("libdir", True)
- base_libdir = d.getVar("base_libdir", True)
+ libdir = d.getVar("libdir")
+ base_libdir = d.getVar("base_libdir")
phdrs = elf.run_objdump("-p", d)
@@ -332,11 +335,11 @@ def package_qa_check_libdir(d):
"""
import re
- pkgdest = d.getVar('PKGDEST', True)
- base_libdir = d.getVar("base_libdir",True) + os.sep
- libdir = d.getVar("libdir", True) + os.sep
- libexecdir = d.getVar("libexecdir", True) + os.sep
- exec_prefix = d.getVar("exec_prefix", True) + os.sep
+ pkgdest = d.getVar('PKGDEST')
+ base_libdir = d.getVar("base_libdir") + os.sep
+ libdir = d.getVar("libdir") + os.sep
+ libexecdir = d.getVar("libexecdir") + os.sep
+ exec_prefix = d.getVar("exec_prefix") + os.sep
messages = []
@@ -351,10 +354,10 @@ def package_qa_check_libdir(d):
# Skip subdirectories for any packages with libdir in INSANE_SKIP
skippackages = []
for package in dirs:
- if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
+ if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
bb.note("Package %s skipping libdir QA test" % (package))
skippackages.append(package)
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory' and package.endswith("-dbg"):
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
skippackages.append(package)
for package in skippackages:
@@ -395,7 +398,7 @@ def package_qa_check_dbg(path, name, d, elf, messages):
if not "-dbg" in name and not "-ptest" in name:
if '.debug' in path.split(os.path.sep):
- messages("debug-files", "non debug package contains .debug directory: %s path %s" % \
+ package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[perms] = "package_qa_check_perm"
@@ -405,7 +408,6 @@ def package_qa_check_perm(path,name,d, elf, messages):
"""
return
-
QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
"""
@@ -417,13 +419,13 @@ def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
if not elf:
import stat
import subprocess
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
# Ensure we're checking an executable script
statinfo = os.stat(path)
if bool(statinfo.st_mode & stat.S_IXUSR):
# grep shell scripts for possible references to /exec_prefix/
- exec_prefix = d.getVar('exec_prefix', True)
+ exec_prefix = d.getVar('exec_prefix')
statement = "grep -e '%s/[^ :]\{1,\}/[^ :]\{1,\}' %s > /dev/null" % (exec_prefix, path)
if subprocess.call(statement, shell=True) == 0:
error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
@@ -447,19 +449,19 @@ def unsafe_references_skippable(path, name, d):
return True
# Skip unusual rootfs layouts which make these tests irrelevant
- exec_prefix = d.getVar('exec_prefix', True)
+ exec_prefix = d.getVar('exec_prefix')
if exec_prefix == "":
return True
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
pkgdest = pkgdest + "/" + name
pkgdest = os.path.abspath(pkgdest)
- base_bindir = pkgdest + d.getVar('base_bindir', True)
- base_sbindir = pkgdest + d.getVar('base_sbindir', True)
- base_libdir = pkgdest + d.getVar('base_libdir', True)
- bindir = pkgdest + d.getVar('bindir', True)
- sbindir = pkgdest + d.getVar('sbindir', True)
- libdir = pkgdest + d.getVar('libdir', True)
+ base_bindir = pkgdest + d.getVar('base_bindir')
+ base_sbindir = pkgdest + d.getVar('base_sbindir')
+ base_libdir = pkgdest + d.getVar('base_libdir')
+ bindir = pkgdest + d.getVar('bindir')
+ sbindir = pkgdest + d.getVar('sbindir')
+ libdir = pkgdest + d.getVar('libdir')
if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
return True
@@ -481,13 +483,13 @@ def package_qa_check_arch(path,name,d, elf, messages):
if not elf:
return
- target_os = d.getVar('TARGET_OS', True)
- target_arch = d.getVar('TARGET_ARCH', True)
- provides = d.getVar('PROVIDES', True)
- bpn = d.getVar('BPN', True)
+ target_os = d.getVar('TARGET_OS')
+ target_arch = d.getVar('TARGET_ARCH')
+ provides = d.getVar('PROVIDES')
+ bpn = d.getVar('BPN')
if target_arch == "allarch":
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
return
@@ -507,7 +509,7 @@ def package_qa_check_arch(path,name,d, elf, messages):
# Check the architecture and endiannes of the binary
is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
- (target_os == "linux-gnux32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE', True)))
+ (target_os == "linux-gnux32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
if not ((machine == elf.machine()) or is_32):
package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
(oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
@@ -524,7 +526,7 @@ def package_qa_check_desktop(path, name, d, elf, messages):
Run all desktop files through desktop-file-validate.
"""
if path.endswith(".desktop"):
- desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
+ desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
@@ -566,9 +568,9 @@ def package_qa_hash_style(path, name, d, elf, messages):
if os.path.islink(path):
return
- gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
+ gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
if not gnu_hash:
- gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
+ gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
if not gnu_hash:
return
@@ -607,7 +609,7 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
return
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
with open(path, 'rb') as f:
file_content = f.read().decode('utf-8', errors='ignore')
if tmpdir in file_content:
@@ -626,8 +628,8 @@ def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
driverdir = d.expand("${libdir}/xorg/modules/drivers/")
if driverdir in path and path.endswith(".so"):
- mlprefix = d.getVar('MLPREFIX', True) or ''
- for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
+ mlprefix = d.getVar('MLPREFIX') or ''
+ for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
if rdep.startswith("%sxorg-abi-" % mlprefix):
return
package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
@@ -650,9 +652,9 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
if os.path.islink(path):
target = os.readlink(path)
if os.path.isabs(target):
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if target.startswith(tmpdir):
- trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
+ trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
# Check license variables
@@ -664,17 +666,17 @@ python populate_lic_qa_checksum() {
import tempfile
sane = True
- lic_files = d.getVar('LIC_FILES_CHKSUM', True) or ''
- lic = d.getVar('LICENSE', True)
- pn = d.getVar('PN', True)
+ lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
+ lic = d.getVar('LICENSE')
+ pn = d.getVar('PN')
if lic == "CLOSED":
return
- if not lic_files and d.getVar('SRC_URI', True):
+ if not lic_files and d.getVar('SRC_URI'):
sane = package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
- srcdir = d.getVar('S', True)
+ srcdir = d.getVar('S')
for url in lic_files.split():
try:
@@ -696,17 +698,21 @@ python populate_lic_qa_checksum() {
if (not beginline) and (not endline):
md5chksum = bb.utils.md5_file(srclicfile)
+ with open(srclicfile, 'rb') as f:
+ license = f.read()
else:
fi = open(srclicfile, 'rb')
fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
tmplicfile = fo.name;
lineno = 0
linesout = 0
+ license = []
for line in fi:
lineno += 1
if (lineno >= beginline):
if ((lineno <= endline) or not endline):
fo.write(line)
+ license.append(line)
linesout += 1
else:
break
@@ -714,6 +720,7 @@ python populate_lic_qa_checksum() {
fo.close()
fi.close()
md5chksum = bb.utils.md5_file(tmplicfile)
+ license = b''.join(license)
os.unlink(tmplicfile)
if recipemd5 == md5chksum:
@@ -722,6 +729,30 @@ python populate_lic_qa_checksum() {
if recipemd5:
msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
+ try:
+ license_lines = license.decode('utf-8').split('\n')
+ except:
+ # License text might not be valid UTF-8, in which
+ # case we don't know how to include it in our output
+ # and have to skip it.
+ pass
+ else:
+ max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
+ if not license_lines or license_lines[-1] != '':
+ # Ensure that our license text ends with a line break
+ # (will be added with join() below).
+ license_lines.append('')
+ remove = len(license_lines) - max_lines
+ if remove > 0:
+ start = max_lines // 2
+ end = start + remove - 1
+ del license_lines[start:end]
+ license_lines.insert(start, '...')
+ msg = msg + "\n" + pn + ": Here is the selected license text:" + \
+ "\n" + \
+ "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
+ "\n" + "\n".join(license_lines) + \
+ "{:^^70}".format(" endline=%d " % endline if endline else "")
if beginline:
if endline:
srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
@@ -752,8 +783,9 @@ def package_qa_check_staged(path,d):
"""
sane = True
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
workdir = os.path.join(tmpdir, "work")
+ recipesysroot = d.getVar("RECIPE_SYSROOT")
if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
pkgconfigcheck = workdir
@@ -769,12 +801,14 @@ def package_qa_check_staged(path,d):
if file.endswith(".la"):
with open(path) as f:
file_content = f.read()
+ file_content = file_content.replace(recipesysroot, "")
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
sane = package_qa_handle_error("la", error_msg, d)
elif file.endswith(".pc"):
with open(path) as f:
file_content = f.read()
+ file_content = file_content.replace(recipesysroot, "")
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
sane = package_qa_handle_error("pkgconfig", error_msg, d)
@@ -803,8 +837,8 @@ def package_qa_walk(warnfuncs, errorfuncs, skip, package, d):
import oe.qa
#if this will throw an exception, then fix the dict above
- target_os = d.getVar('TARGET_OS', True)
- target_arch = d.getVar('TARGET_ARCH', True)
+ target_os = d.getVar('TARGET_OS')
+ target_arch = d.getVar('TARGET_ARCH')
warnings = {}
errors = {}
@@ -833,11 +867,10 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
localdata = bb.data.createCopy(d)
- localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES', True) + ':' + pkg)
- bb.data.update_data(localdata)
+ localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
# Now check the RDEPENDS
- rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
+ rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
# Now do the sanity check!!!
if "build-deps" not in skip:
@@ -853,7 +886,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
continue
if not rdep_data or not 'PN' in rdep_data:
- pkgdata_dir = d.getVar("PKGDATA_DIR", True)
+ pkgdata_dir = d.getVar("PKGDATA_DIR")
try:
possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
except OSError:
@@ -873,14 +906,15 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
if "file-rdeps" not in skip:
ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
if bb.data.inherits_class('nativesdk', d):
- ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl'])
+ ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
# For Saving the FILERDEPENDS
filerdepends = {}
rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
for key in rdep_data:
if key.startswith("FILERDEPENDS_"):
- for subkey in rdep_data[key].split():
- if subkey not in ignored_file_rdeps:
+ for subkey in bb.utils.explode_deps(rdep_data[key]):
+ if subkey not in ignored_file_rdeps and \
+ not subkey.startswith('perl('):
# We already know it starts with FILERDEPENDS_
filerdepends[subkey] = key[13:]
@@ -895,11 +929,10 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
if not sub_rdeps:
continue
- for sub_rdep in sub_rdeps.split():
+ for sub_rdep in bb.utils.explode_deps(sub_rdeps):
if sub_rdep in done:
continue
- if not sub_rdep.startswith('(') and \
- oe.packagedata.has_subpkgdata(sub_rdep, d):
+ if oe.packagedata.has_subpkgdata(sub_rdep, d):
# It's a new rdep
done.append(sub_rdep)
new.append(sub_rdep)
@@ -912,16 +945,20 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# The python is not a package, but python-core provides it, so
# skip checking /usr/bin/python if python is in the rdeps, in
# case there is a RDEPENDS_pkg = "python" in the recipe.
- for py in [ d.getVar('MLPREFIX', True) + "python", "python" ]:
+ for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
if py in done:
filerdepends.pop("/usr/bin/python",None)
done.remove(py)
for rdep in done:
+ # The file dependencies may contain package names, e.g.,
+ # perl
+ filerdepends.pop(rdep,None)
+
# For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
for key in rdep_data:
if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
- for subkey in rdep_data[key].split():
+ for subkey in bb.utils.explode_deps(rdep_data[key]):
filerdepends.pop(subkey,None)
# Add the files list to the rprovides
if key == "FILES_INFO":
@@ -935,17 +972,16 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
for key in filerdepends:
error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
(filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
- package_qa_handle_error("file-rdeps", error_msg, d)
+ package_qa_handle_error("file-rdeps", error_msg, d)
def package_qa_check_deps(pkg, pkgdest, skip, d):
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', pkg)
- bb.data.update_data(localdata)
def check_valid_deps(var):
try:
- rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
+ rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
except ValueError as e:
bb.fatal("%s_%s: %s" % (var, pkg, e))
for dep in rvar:
@@ -968,10 +1004,10 @@ def package_qa_check_expanded_d(package, d, messages):
variables, warn the user to use it correctly.
"""
sane = True
- expanded_d = d.getVar('D', True)
+ expanded_d = d.getVar('D')
for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
- bbvar = d.getVar(var + "_" + package, True) or ""
+ bbvar = d.getVar(var + "_" + package) or ""
if expanded_d in bbvar:
if var == 'FILES':
package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
@@ -984,7 +1020,7 @@ def package_qa_check_expanded_d(package, d, messages):
def package_qa_check_encoding(keys, encode, d):
def check_encoding(key, enc):
sane = True
- value = d.getVar(key, True)
+ value = d.getVar(key)
if value:
try:
s = value.encode(enc)
@@ -1009,8 +1045,8 @@ def package_qa_check_host_user(path, name, d, elf, messages):
if not os.path.lexists(path):
return
- dest = d.getVar('PKGDEST', True)
- pn = d.getVar('PN', True)
+ dest = d.getVar('PKGDEST')
+ pn = d.getVar('PN')
home = os.path.join(dest, 'home')
if path == home or path.startswith(home + os.sep):
return
@@ -1023,12 +1059,12 @@ def package_qa_check_host_user(path, name, d, elf, messages):
raise
else:
rootfs_path = path[len(dest):]
- check_uid = int(d.getVar('HOST_USER_UID', True))
+ check_uid = int(d.getVar('HOST_USER_UID'))
if stat.st_uid == check_uid:
package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid))
return False
- check_gid = int(d.getVar('HOST_USER_GID', True))
+ check_gid = int(d.getVar('HOST_USER_GID'))
if stat.st_gid == check_gid:
package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid))
return False
@@ -1046,8 +1082,8 @@ python do_package_qa () {
# Check non UTF-8 characters on recipe's metadata
package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
- logdir = d.getVar('T', True)
- pkg = d.getVar('PN', True)
+ logdir = d.getVar('T')
+ pkg = d.getVar('PN')
# Check the compile log for host contamination
compilelog = os.path.join(logdir,"log.do_compile")
@@ -1070,8 +1106,8 @@ python do_package_qa () {
package_qa_handle_error("install-host-path", msg, d)
# Scan the packages...
- pkgdest = d.getVar('PKGDEST', True)
- packages = set((d.getVar('PACKAGES', True) or '').split())
+ pkgdest = d.getVar('PKGDEST')
+ packages = set((d.getVar('PACKAGES') or '').split())
cpath = oe.cachedpath.CachedPath()
global pkgfiles
@@ -1100,7 +1136,7 @@ python do_package_qa () {
testmatrix = d.getVarFlags(matrix_name) or {}
g = globals()
warnchecks = []
- for w in (d.getVar("WARN_QA", True) or "").split():
+ for w in (d.getVar("WARN_QA") or "").split():
if w in skip:
continue
if w in testmatrix and testmatrix[w] in g:
@@ -1109,7 +1145,7 @@ python do_package_qa () {
oe.utils.write_ld_so_conf(d)
errorchecks = []
- for e in (d.getVar("ERROR_QA", True) or "").split():
+ for e in (d.getVar("ERROR_QA") or "").split():
if e in skip:
continue
if e in testmatrix and testmatrix[e] in g:
@@ -1118,7 +1154,8 @@ python do_package_qa () {
oe.utils.write_ld_so_conf(d)
return warnchecks, errorchecks
- skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
+ skip = set((d.getVar('INSANE_SKIP') or "").split() +
+ (d.getVar('INSANE_SKIP_' + package) or "").split())
if skip:
bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
@@ -1138,15 +1175,18 @@ python do_package_qa () {
package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
package_qa_check_deps(package, pkgdest, skip, d)
- if 'libdir' in d.getVar("ALL_QA", True).split():
+ if 'libdir' in d.getVar("ALL_QA").split():
package_qa_check_libdir(d)
- qa_sane = d.getVar("QA_SANE", True)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("QA run found fatal errors. Please consider fixing them.")
bb.note("DONE with PACKAGE QA")
}
+# binutils is used for most checks, so need to set as dependency
+# POPULATESYSROOTDEPS is defined in staging class.
+do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
do_package_qa[rdeptask] = "do_packagedata"
addtask do_package_qa after do_packagedata do_package before do_build
@@ -1174,7 +1214,7 @@ python do_qa_configure() {
###########################################################################
configs = []
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
if bb.data.inherits_class('autotools', d):
bb.note("Checking autotools environment for common misconfiguration")
@@ -1195,16 +1235,16 @@ Rerun configure task after fixing this.""")
# Check gettext configuration and dependencies are correct
###########################################################################
- cnf = d.getVar('EXTRA_OECONF', True) or ""
- if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
- ml = d.getVar("MLPREFIX", True) or ""
+ cnf = d.getVar('EXTRA_OECONF') or ""
+ if "gettext" not in d.getVar('P') and "gcc-runtime" not in d.getVar('P') and "--disable-nls" not in cnf:
+ ml = d.getVar("MLPREFIX") or ""
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
gt = "gettext-native"
elif bb.data.inherits_class('cross-canadian', d):
gt = "nativesdk-gettext"
else:
gt = "virtual/" + ml + "gettext"
- deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
+ deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
if gt not in deps:
for config in configs:
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
@@ -1219,40 +1259,40 @@ Missing inherit gettext?""" % (gt, config))
bb.note("Checking configure output for unrecognised options")
try:
flag = "WARNING: unrecognized options:"
- log = os.path.join(d.getVar('B', True), 'config.log')
+ log = os.path.join(d.getVar('B'), 'config.log')
output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
- whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST", True).split())
+ whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
options -= whitelist
if options:
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
package_qa_handle_error("unknown-configure-option", error_msg, d)
except subprocess.CalledProcessError:
pass
# Check invalid PACKAGECONFIG
- pkgconfig = (d.getVar("PACKAGECONFIG", True) or "").split()
+ pkgconfig = (d.getVar("PACKAGECONFIG") or "").split()
if pkgconfig:
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
for pconfig in pkgconfig:
if pconfig not in pkgconfigflags:
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
package_qa_handle_error("invalid-packageconfig", error_msg, d)
- qa_sane = d.getVar("QA_SANE", True)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
}
python do_qa_unpack() {
- src_uri = d.getVar('SRC_URI', True)
- s_dir = d.getVar('S', True)
+ src_uri = d.getVar('SRC_URI')
+ s_dir = d.getVar('S')
if src_uri and not os.path.exists(s_dir):
- bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN', True), d.getVar('S', False), s_dir))
+ bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
}
# The Staging Func, to check all staging
@@ -1268,7 +1308,7 @@ do_configure[postfuncs] += "do_qa_configure "
do_unpack[postfuncs] += "do_qa_unpack"
python () {
- tests = d.getVar('ALL_QA', True).split()
+ tests = d.getVar('ALL_QA').split()
if "desktop" in tests:
d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
@@ -1277,7 +1317,7 @@ python () {
###########################################################################
# Checking ${FILESEXTRAPATHS}
- extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
if '__default' not in extrapaths.split(":"):
msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
msg += "type of assignment, and don't forget the colon.\n"
@@ -1289,29 +1329,29 @@ python () {
msg += "%s\n" % extrapaths
bb.warn(msg)
- overrides = d.getVar('OVERRIDES', True).split(':')
- pn = d.getVar('PN', True)
+ overrides = d.getVar('OVERRIDES').split(':')
+ pn = d.getVar('PN')
if pn in overrides:
- msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn)
+ msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
package_qa_handle_error("pn-overrides", msg, d)
issues = []
- if (d.getVar('PACKAGES', True) or "").split():
- for dep in (d.getVar('QADEPENDS', True) or "").split():
+ if (d.getVar('PACKAGES') or "").split():
+ for dep in (d.getVar('QADEPENDS') or "").split():
d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
if d.getVar(var, False):
issues.append(var)
- fakeroot_tests = d.getVar('FAKEROOT_QA', True).split()
+ fakeroot_tests = d.getVar('FAKEROOT_QA').split()
if set(tests) & set(fakeroot_tests):
d.setVarFlag('do_package_qa', 'fakeroot', '1')
d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
else:
d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
- package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
- qa_sane = d.getVar("QA_SANE", True)
+ package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
}
diff --git a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
index ea976c66b..d036fcf20 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
@@ -19,7 +19,7 @@ valid_archs = "alpha cris ia64 \
def map_kernel_arch(a, d):
import re
- valid_archs = d.getVar('valid_archs', True).split()
+ valid_archs = d.getVar('valid_archs').split()
if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
elif re.match('armeb$', a): return 'arm'
@@ -32,9 +32,11 @@ def map_kernel_arch(a, d):
elif re.match('microblazee[bl]', a): return 'microblaze'
elif a in valid_archs: return a
else:
+ if not d.getVar("TARGET_OS").startswith("linux"):
+ return a
bb.error("cannot map '%s' to a linux kernel architecture" % a)
-export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
+export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
def map_uboot_arch(a, d):
import re
@@ -43,7 +45,7 @@ def map_uboot_arch(a, d):
elif re.match('i.86$', a): return 'x86'
return a
-export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
+export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
# specific options necessary for building the kernel and modules.
@@ -57,4 +59,5 @@ HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
+TOOLCHAIN = "gcc"
diff --git a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
index 05be1f070..179185b6b 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
@@ -1,13 +1,15 @@
inherit kernel-uboot uboot-sign
python __anonymous () {
- kerneltypes = d.getVar('KERNEL_IMAGETYPES', True) or ""
+ kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
- depends = d.getVar("DEPENDS", True)
+ depends = d.getVar("DEPENDS")
depends = "%s u-boot-mkimage-native dtc-native" % depends
d.setVar("DEPENDS", depends)
- if d.getVar("UBOOT_ARCH", True) == "x86":
+ if d.getVar("UBOOT_ARCH") == "mips":
+ replacementtype = "vmlinuz.bin"
+ elif d.getVar("UBOOT_ARCH") == "x86":
replacementtype = "bzImage"
else:
replacementtype = "zImage"
@@ -15,19 +17,19 @@ python __anonymous () {
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
- typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE", True) or ""
+ typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
if 'fitImage' in typeformake.split():
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', replacementtype))
- image = d.getVar('INITRAMFS_IMAGE', True)
+ image = d.getVar('INITRAMFS_IMAGE')
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
# Verified boot will sign the fitImage and append the public key to
- # U-boot dtb. We ensure the U-Boot dtb is deployed before assembling
+ # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
# the fitImage:
- if d.getVar('UBOOT_SIGN_ENABLE', True):
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot', True) or 'u-boot'
+ if d.getVar('UBOOT_SIGN_ENABLE') == "1":
+ uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_deploy' % uboot_pn)
}
@@ -96,9 +98,9 @@ fitimage_emit_section_kernel() {
kernel_csum="sha1"
ENTRYPOINT=${UBOOT_ENTRYPOINT}
- if test -n "${UBOOT_ENTRYSYMBOL}"; then
- ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
- awk '$4=="${UBOOT_ENTRYSYMBOL}" {print $2}'`
+ if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
+ ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
cat << EOF >> ${1}
@@ -229,9 +231,10 @@ EOF
#
# $1 ... .its filename
# $2 ... Linux kernel ID
-# $3 ... DTB image ID
+# $3 ... DTB image name
# $4 ... ramdisk ID
# $5 ... config ID
+# $6 ... default flag
fitimage_emit_section_config() {
conf_csum="sha1"
@@ -244,6 +247,8 @@ fitimage_emit_section_config() {
kernel_line="kernel = \"kernel@${2}\";"
fdt_line=""
ramdisk_line=""
+ setup_line=""
+ default_line=""
if [ -n "${3}" ]; then
conf_desc="${conf_desc}, FDT blob"
@@ -260,10 +265,14 @@ fitimage_emit_section_config() {
setup_line="setup = \"setup@${5}\";"
fi
+ if [ "${6}" = "1" ]; then
+ default_line="default = \"conf@${3}\";"
+ fi
+
cat << EOF >> ${1}
- default = "conf@1";
- conf@1 {
- description = "${conf_desc}";
+ ${default_line}
+ conf@${3} {
+ description = "${6} ${conf_desc}";
${kernel_line}
${fdt_line}
${ramdisk_line}
@@ -314,6 +323,7 @@ EOF
fitimage_assemble() {
kernelcount=1
dtbcount=""
+ DTBS=""
ramdiskcount=${3}
setupcount=""
rm -f ${1} arch/${ARCH}/boot/${2}
@@ -331,7 +341,7 @@ fitimage_assemble() {
#
# Step 2: Prepare a DTB image section
#
- if test -n "${KERNEL_DEVICETREE}"; then
+ if [ -n "${KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in ${KERNEL_DEVICETREE}; do
if echo ${DTB} | grep -q '/dts/'; then
@@ -343,15 +353,16 @@ fitimage_assemble() {
DTB_PATH="arch/${ARCH}/boot/${DTB}"
fi
- fitimage_emit_section_dtb ${1} ${dtbcount} ${DTB_PATH}
- dtbcount=`expr ${dtbcount} + 1`
+ DTB=$(echo "${DTB}" | tr '/' '_')
+ DTBS="${DTBS} ${DTB}"
+ fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
done
fi
#
# Step 3: Prepare a setup section. (For x86)
#
- if test -e arch/${ARCH}/boot/setup.bin ; then
+ if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
fi
@@ -362,7 +373,7 @@ fitimage_assemble() {
if [ "x${ramdiskcount}" = "x1" ] ; then
# Find and use the first initramfs image archive type we find
for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz cpio; do
- initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.${img}"
+ initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
echo "Using $initramfs_path"
if [ -e "${initramfs_path}" ]; then
fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
@@ -375,7 +386,7 @@ fitimage_assemble() {
# Force the first Kernel and DTB in the default config
kernelcount=1
- if test -n "${dtbcount}"; then
+ if [ -n "${dtbcount}" ]; then
dtbcount=1
fi
@@ -384,7 +395,13 @@ fitimage_assemble() {
#
fitimage_emit_section_maint ${1} confstart
- fitimage_emit_section_config ${1} "${kernelcount}" "${dtbcount}" "${ramdiskcount}" "${setupcount}"
+ if [ -n "${DTBS}" ]; then
+ i=1
+ for DTB in ${DTBS}; do
+ fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ i=`expr ${i} + 1`
+ done
+ fi
fitimage_emit_section_maint ${1} sectend
@@ -445,11 +462,11 @@ kernel_do_deploy_append() {
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
- its_initramfs_base_name="fitImage-its-${INITRAMFS_IMAGE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
- its_initramfs_symlink_name=fitImage-its-${INITRAMFS_IMAGE}-${MACHINE}
+ its_initramfs_base_name="fitImage-its-${INITRAMFS_IMAGE_NAME}-${PV}-${PR}-${DATETIME}"
+ its_initramfs_symlink_name=fitImage-its-${INITRAMFS_IMAGE_NAME}
install -m 0644 fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/${its_initramfs_base_name}.its
- fit_initramfs_base_name="fitImage-${INITRAMFS_IMAGE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
- fit_initramfs_symlink_name=fitImage-${INITRAMFS_IMAGE}-${MACHINE}
+ fit_initramfs_base_name="fitImage-${INITRAMFS_IMAGE_NAME}-${PV}-${PR}-${DATETIME}"
+ fit_initramfs_symlink_name=fitImage-${INITRAMFS_IMAGE_NAME}
install -m 0644 arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/${fit_initramfs_base_name}.bin
fi
diff --git a/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass b/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass
index f7dcc0715..5d92f3b63 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-grub.bbclass
@@ -92,7 +92,7 @@ python __anonymous () {
fi
'''
- imagetypes = d.getVar('KERNEL_IMAGETYPES', True)
+ imagetypes = d.getVar('KERNEL_IMAGETYPES')
imagetypes = re.sub(r'\.gz$', '', imagetypes)
for type in imagetypes.split():
diff --git a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
index 08d226276..5e10dcf73 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
@@ -22,6 +22,8 @@ if [ x"$D" = "x" ]; then
fi
}
+PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
+
do_install_append() {
install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
}
@@ -31,6 +33,8 @@ PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
KERNEL_MODULE_PACKAGE_PREFIX ?= ""
+KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
+KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
python split_kernel_module_packages () {
import re
@@ -39,10 +43,10 @@ python split_kernel_module_packages () {
def extract_modinfo(file):
import tempfile, subprocess
- tempfile.tempdir = d.getVar("WORKDIR", True)
+ tempfile.tempdir = d.getVar("WORKDIR")
tf = tempfile.mkstemp()
tmpfile = tf[1]
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
subprocess.call(cmd, shell=True)
f = open(tmpfile)
l = f.read().split("\000")
@@ -60,12 +64,12 @@ python split_kernel_module_packages () {
def frob_metadata(file, pkg, pattern, format, basename):
vals = extract_modinfo(file)
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
# If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
# appropriate modprobe commands to the postinst
- autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD", True) or "").split()
- autoload = d.getVar('module_autoload_%s' % basename, True)
+ autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
+ autoload = d.getVar('module_autoload_%s' % basename)
if autoload and autoload == basename:
bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
if autoload and basename not in autoloadlist:
@@ -79,15 +83,15 @@ python split_kernel_module_packages () {
else:
f.write('%s\n' % basename)
f.close()
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
bb.fatal("pkg_postinst_%s not defined" % pkg)
- postinst += d.getVar('autoload_postinst_fragment', True) % (autoload or basename)
+ postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
d.setVar('pkg_postinst_%s' % pkg, postinst)
# Write out any modconf fragment
- modconflist = (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()
- modconf = d.getVar('module_conf_%s' % basename, True)
+ modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
+ modconf = d.getVar('module_conf_%s' % basename)
if modconf and basename in modconflist:
name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
f = open(name, 'w')
@@ -96,15 +100,15 @@ python split_kernel_module_packages () {
elif modconf:
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
- files = d.getVar('FILES_%s' % pkg, True)
+ files = d.getVar('FILES_%s' % pkg)
files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
d.setVar('FILES_%s' % pkg, files)
if "description" in vals:
- old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
+ old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
modinfo_deps = []
if "depends" in vals and vals["depends"] != "":
for dep in vals["depends"].split(","):
@@ -119,26 +123,33 @@ python split_kernel_module_packages () {
# Avoid automatic -dev recommendations for modules ending with -dev.
d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
+ # Provide virtual package without postfix
+ providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
+ if providevirt == "1":
+ postfix = format.split('%s')[1]
+ d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
+
module_regex = '^(.*)\.k?o$'
- module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX', True)
- module_pattern = module_pattern_prefix + 'kernel-module-%s'
+ module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
+ module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
+ module_pattern = module_pattern_prefix + 'kernel-module-%s' + module_pattern_suffix
- postinst = d.getVar('pkg_postinst_modules', True)
- postrm = d.getVar('pkg_postrm_modules', True)
+ postinst = d.getVar('pkg_postinst_modules')
+ postrm = d.getVar('pkg_postrm_modules')
- modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True)))
+ modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION")))
if modules:
- metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True)
+ metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
# If modules-load.d and modprobe.d are empty at this point, remove them to
# avoid warnings. removedirs only raises an OSError if an empty
# directory cannot be removed.
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
if len(os.listdir(dir)) == 0:
os.rmdir(dir)
}
-do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()))}'
+do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass b/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
index 345e7f5f3..87f02654f 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
@@ -1,15 +1,21 @@
uboot_prep_kimage() {
- if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
+ if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
linux_suffix=""
linux_comp="none"
+ elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
+ rm -f linux.bin
+ cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
+ vmlinux_path=""
+ linux_suffix=""
+ linux_comp="none"
else
vmlinux_path="vmlinux"
linux_suffix=".gz"
linux_comp="gzip"
fi
- ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
+ [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
if [ "${linux_comp}" != "none" ] ; then
gzip -9 linux.bin
diff --git a/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
index 340503a2d..1d8656e76 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
@@ -1,8 +1,8 @@
inherit kernel-uboot
python __anonymous () {
- if "uImage" in (d.getVar('KERNEL_IMAGETYPES', True) or "").split():
- depends = d.getVar("DEPENDS", True)
+ if "uImage" in (d.getVar('KERNEL_IMAGETYPES') or "").split():
+ depends = d.getVar("DEPENDS")
depends = "%s u-boot-mkimage-native" % depends
d.setVar("DEPENDS", depends)
@@ -11,27 +11,25 @@ python __anonymous () {
# to build uImage using the kernel build system if and only if
# KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
# the uImage .
- if d.getVar("KEEPUIMAGE", True) != 'yes':
- typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE", True) or ""
+ if d.getVar("KEEPUIMAGE") != 'yes':
+ typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
if "uImage" in typeformake.split():
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
+
+ # Enable building of uImage with mkimage
+ bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
}
+do_uboot_mkimage[dirs] += "${B}"
do_uboot_mkimage() {
- if echo "${KERNEL_IMAGETYPES}" | grep -wq "uImage"; then
- if test "x${KEEPUIMAGE}" != "xyes" ; then
- uboot_prep_kimage
-
- ENTRYPOINT=${UBOOT_ENTRYPOINT}
- if test -n "${UBOOT_ENTRYSYMBOL}"; then
- ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
- awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
- fi
+ uboot_prep_kimage
- uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
- rm -f linux.bin
- fi
+ ENTRYPOINT=${UBOOT_ENTRYPOINT}
+ if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
+ ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
-}
-addtask uboot_mkimage before do_install after do_compile
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
+ rm -f linux.bin
+}
diff --git a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
index a60327a07..1ca0756c4 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -148,7 +148,8 @@ do_kernel_metadata() {
# run1: pull all the configuration fragments, no matter where they come from
elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
if [ -n "${elements}" ]; then
- scc --force -o ${S}/${meta_dir}:cfg,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
+ echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
+ scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
if [ $? -ne 0 ]; then
bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
fi
@@ -165,6 +166,7 @@ do_kernel_metadata() {
}
do_patch() {
+ set +e
cd ${S}
check_git_config
@@ -177,6 +179,19 @@ do_patch() {
bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
fi
fi
+
+ if [ -f "${meta_dir}/merge.queue" ]; then
+ # we need to merge all these branches
+ for b in $(cat ${meta_dir}/merge.queue); do
+ git show-ref --verify --quiet refs/heads/${b}
+ if [ $? -eq 0 ]; then
+ bbnote "Merging branch ${b}"
+ git merge -q --no-ff -m "Merge branch ${b}" ${b}
+ else
+ bbfatal "branch ${b} does not exist, cannot merge"
+ fi
+ done
+ fi
}
do_kernel_checkout() {
@@ -240,6 +255,7 @@ do_kernel_checkout[dirs] = "${S}"
addtask kernel_checkout before do_kernel_metadata after do_unpack
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
+do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
@@ -265,7 +281,8 @@ do_kernel_configme() {
meta_dir=$(kgit --meta)
configs="$(scc --configs -o ${meta_dir})"
- if [ -z "${configs}" ]; then
+ if [ $? -ne 0 ]; then
+ bberror "${configs}"
bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
fi
@@ -286,11 +303,11 @@ python do_kernel_configcheck() {
# if KMETA isn't set globally by a recipe using this routine, we need to
# set the default to 'meta'. Otherwise, kconf_check is not passed a valid
# meta-series for processing
- kmeta = d.getVar( "KMETA", True ) or "meta"
+ kmeta = d.getVar("KMETA") or "meta"
if not os.path.exists(kmeta):
kmeta = "." + kmeta
- pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
+ pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH'), "${S}/scripts/util/")
cmd = d.expand("scc --configs -o ${S}/.kernel-meta")
ret, configs = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
@@ -298,8 +315,8 @@ python do_kernel_configcheck() {
cmd = d.expand("cd ${S}; kconf_check --report -o ${S}/%s/cfg/ ${B}/.config ${S} %s" % (kmeta,configs))
ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
- config_check_visibility = int(d.getVar( "KCONF_AUDIT_LEVEL", True ) or 0)
- bsp_check_visibility = int(d.getVar( "KCONF_BSP_AUDIT_LEVEL", True ) or 0)
+ config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
+ bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
# if config check visibility is non-zero, report dropped configuration values
mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
@@ -350,6 +367,10 @@ do_validate_branches() {
current_branch=`git rev-parse --abbrev-ref HEAD`
git branch "$current_branch-orig"
git reset --hard ${force_srcrev}
+ # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
+ # so the patches are applied as expected otherwise no patching
+ # would be done in some corner cases.
+ kgit-s2q --clean
fi
fi
}
diff --git a/import-layers/yocto-poky/meta/classes/kernel.bbclass b/import-layers/yocto-poky/meta/classes/kernel.bbclass
index eefe574a6..ce2cab65a 100644
--- a/import-layers/yocto-poky/meta/classes/kernel.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel.bbclass
@@ -1,7 +1,12 @@
inherit linux-kernel-base kernel-module-split
PROVIDES += "virtual/kernel"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native lzop-native"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native"
+PACKAGE_WRITE_DEPS += "depmodwrapper-cross virtual/update-alternatives-native"
+
+do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
+
+CVE_PRODUCT ?= "linux_kernel"
S = "${STAGING_KERNEL_DIR}"
B = "${WORKDIR}/build"
@@ -13,6 +18,7 @@ INHIBIT_DEFAULT_DEPS = "1"
KERNEL_IMAGETYPE ?= "zImage"
INITRAMFS_IMAGE ?= ""
+INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
@@ -22,33 +28,36 @@ INITRAMFS_IMAGE_BUNDLE ?= ""
# number and cause kernel to be rebuilt. To avoid this, make
# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
# LINUX_VERSION which is a constant.
-KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION', True) or ""}"
+KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
-KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION', True))}"
+KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
python __anonymous () {
- import re
# Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
- type = d.getVar('KERNEL_IMAGETYPE', True) or ""
- alttype = d.getVar('KERNEL_ALT_IMAGETYPE', True) or ""
- types = d.getVar('KERNEL_IMAGETYPES', True) or ""
+ type = d.getVar('KERNEL_IMAGETYPE') or ""
+ alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
+ types = d.getVar('KERNEL_IMAGETYPES') or ""
if type not in types.split():
types = (type + ' ' + types).strip()
if alttype not in types.split():
types = (alttype + ' ' + types).strip()
d.setVar('KERNEL_IMAGETYPES', types)
- typeformake = re.sub(r'\.gz', '', types)
+ # some commonly used kernel images aren't generated by the kernel build system, such as vmlinux.gz
+ # typeformake lists only valid kernel make targets, and post processing can be done after the kernel
+ # is built (such as using gzip to compress vmlinux)
+ typeformake = types.replace('vmlinux.gz', 'vmlinux')
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake)
for type in types.split():
typelower = type.lower()
+ imagedest = d.getVar('KERNEL_IMAGEDEST')
d.appendVar('PACKAGES', ' ' + 'kernel-image-' + typelower)
- d.setVar('FILES_kernel-image-' + typelower, '/boot/' + type + '-${KERNEL_VERSION_NAME}')
+ d.setVar('FILES_kernel-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}')
d.appendVar('RDEPENDS_kernel-image', ' ' + 'kernel-image-' + typelower)
@@ -56,15 +65,14 @@ python __anonymous () {
d.setVar('ALLOW_EMPTY_kernel-image-' + typelower, '1')
- imagedest = d.getVar('KERNEL_IMAGEDEST', True)
- priority = d.getVar('KERNEL_PRIORITY', True)
- postinst = '#!/bin/sh\n' + 'update-alternatives --install /' + imagedest + '/' + type + ' ' + type + ' ' + '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME} ' + priority + ' || true' + '\n'
+ priority = d.getVar('KERNEL_PRIORITY')
+ postinst = '#!/bin/sh\n' + 'update-alternatives --install /' + imagedest + '/' + type + ' ' + type + ' ' + type + '-${KERNEL_VERSION_NAME} ' + priority + ' || true' + '\n'
d.setVar('pkg_postinst_kernel-image-' + typelower, postinst)
postrm = '#!/bin/sh\n' + 'update-alternatives --remove' + ' ' + type + ' ' + type + '-${KERNEL_VERSION_NAME} || true' + '\n'
d.setVar('pkg_postrm_kernel-image-' + typelower, postrm)
- image = d.getVar('INITRAMFS_IMAGE', True)
+ image = d.getVar('INITRAMFS_IMAGE')
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
@@ -72,7 +80,7 @@ python __anonymous () {
# The preferred method is to set INITRAMFS_IMAGE, because
# this INITRAMFS_TASK has circular dependency problems
# if the initramfs requires kernel modules
- image_task = d.getVar('INITRAMFS_TASK', True)
+ image_task = d.getVar('INITRAMFS_TASK')
if image_task:
d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
}
@@ -101,15 +109,15 @@ inherit ${KERNEL_CLASSES}
do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
base_do_unpack_append () {
- s = d.getVar("S", True)
+ s = d.getVar("S")
if s[-1] == '/':
# drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
s=s[:-1]
- kernsrc = d.getVar("STAGING_KERNEL_DIR", True)
+ kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
- if d.getVar("EXTERNALSRC", True):
+ if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
else:
@@ -126,10 +134,12 @@ PACKAGES_DYNAMIC += "^kernel-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
+export KBUILD_BUILD_USER = "oe-user"
+export KBUILD_BUILD_HOST = "oe-host"
-KERNEL_PRIORITY ?= "${@int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
- int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
- int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[-1])}"
+KERNEL_PRIORITY ?= "${@int(d.getVar('PV').split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
+ int(d.getVar('PV').split('-')[0].split('+')[0].split('.')[1]) * 100 + \
+ int(d.getVar('PV').split('-')[0].split('+')[0].split('.')[-1])}"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
@@ -140,7 +150,7 @@ KERNEL_IMAGEDEST = "boot"
#
# configuration
#
-export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE", True) or "ttyS0"}"
+export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
@@ -164,34 +174,34 @@ copy_initramfs() {
# In case the directory is not created yet from the first pass compile:
mkdir -p ${B}/usr
# Find and use the first initramfs image archive type we find
- rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
- if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
- cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/.
+ if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
+ cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
case $img in
*gz)
echo "gzip decompressing image"
- gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*lz4)
echo "lz4 decompressing image"
- lz4 -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*lzo)
echo "lzo decompressing image"
- lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*lzma)
echo "lzma decompressing image"
- lzma -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
*xz)
echo "xz decompressing image"
- xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
+ xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
esac
@@ -219,7 +229,7 @@ do_bundle_initramfs () {
tmp_path=$tmp_path" "$type"##"
fi
done
- use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
kernel_do_compile
# Restoring kernel image
for tp in $tmp_path ; do
@@ -258,18 +268,16 @@ kernel_do_compile() {
# The old style way of copying an prebuilt image and building it
# is turned on via INTIRAMFS_TASK != ""
copy_initramfs
- use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
+ use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
- for type in ${KERNEL_IMAGETYPES} ; do
- if test "${typeformake}.gz" = "${type}"; then
- mkdir -p "${KERNEL_OUTPUT_DIR}"
- gzip -9c < "${typeformake}" > "${KERNEL_OUTPUT_DIR}/${type}"
- break;
- fi
- done
done
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ mkdir -p "${KERNEL_OUTPUT_DIR}"
+ gzip -9c < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
+ fi
}
do_compile_kernelmodules() {
@@ -296,11 +304,11 @@ kernel_do_install() {
#
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
- rm "${D}/lib/modules/${KERNEL_VERSION}/build"
- rm "${D}/lib/modules/${KERNEL_VERSION}/source"
+ oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
+ rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
# If the kernel/ directory is empty remove it to prevent QA issues
- rmdir --ignore-fail-on-non-empty "${D}/lib/modules/${KERNEL_VERSION}/kernel"
+ rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
else
bbnote "no modules to install"
fi
@@ -324,6 +332,10 @@ do_install[prefuncs] += "package_get_auto_pr"
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
do_kernel_version_sanity_check() {
+ if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
+ exit 0
+ fi
+
# The Makefile determines the kernel version shown at runtime
# Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
@@ -347,7 +359,7 @@ do_kernel_version_sanity_check() {
reg="${reg}${EXTRAVERSION}"
if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
- bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source."
+ bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
fi
exit 0
}
@@ -430,14 +442,14 @@ sysroot_stage_all () {
KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig || yes '' | oe_runmake -C ${S} O=${B} oldconfig"
python check_oldest_kernel() {
- oldest_kernel = d.getVar('OLDEST_KERNEL', True)
- kernel_version = d.getVar('KERNEL_VERSION', True)
- tclibc = d.getVar('TCLIBC', True)
+ oldest_kernel = d.getVar('OLDEST_KERNEL')
+ kernel_version = d.getVar('KERNEL_VERSION')
+ tclibc = d.getVar('TCLIBC')
if tclibc == 'glibc':
kernel_version = kernel_version.split('-', 1)[0]
if oldest_kernel and kernel_version:
if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
- bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN', True), oldest_kernel, kernel_version, tclibc))
+ bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
}
check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
@@ -478,9 +490,9 @@ EXPORT_FUNCTIONS do_compile do_install do_configure
# kernel-image becomes kernel-image-${KERNEL_VERSION}
PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
FILES_${PN} = ""
-FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
+FILES_kernel-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin"
FILES_kernel-image = ""
-FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build"
+FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
FILES_kernel-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
FILES_kernel-modules = ""
RDEPENDS_kernel = "kernel-base"
@@ -511,7 +523,7 @@ pkg_postinst_kernel-base () {
PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
python split_kernel_packages () {
- do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
}
# Many scripts want to look in arch/$arch/boot for the bootable
@@ -527,7 +539,11 @@ do_kernel_link_images() {
if [ -f ../../../vmlinuz ]; then
ln -sf ../../../vmlinuz
fi
+ if [ -f ../../../vmlinuz.bin ]; then
+ ln -sf ../../../vmlinuz.bin
+ fi
}
+addtask kernel_link_images after do_compile before do_strip
do_strip() {
if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
@@ -556,7 +572,7 @@ do_strip() {
}
do_strip[dirs] = "${B}"
-addtask do_strip before do_sizecheck after do_kernel_link_images
+addtask strip before do_sizecheck after do_kernel_link_images
# Support checking the kernel size since some kernels need to reside in partitions
# with a fixed length or there is a limit in transferring the kernel to memory
@@ -623,6 +639,6 @@ do_deploy[cleandirs] = "${DEPLOYDIR}"
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
do_deploy[prefuncs] += "package_get_auto_pr"
-addtask deploy after do_populate_sysroot
+addtask deploy after do_populate_sysroot do_packagedata
EXPORT_FUNCTIONS do_deploy
diff --git a/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass b/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass
index 9efd46a92..675d40ec9 100644
--- a/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernelsrc.bbclass
@@ -1,7 +1,7 @@
S = "${STAGING_KERNEL_DIR}"
-do_fetch[noexec] = "1"
-do_unpack[depends] += "virtual/kernel:do_patch"
-do_unpack[noexec] = "1"
+deltask do_fetch
+deltask do_unpack
+do_patch[depends] += "virtual/kernel:do_patch"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
diff --git a/import-layers/yocto-poky/meta/classes/libc-common.bbclass b/import-layers/yocto-poky/meta/classes/libc-common.bbclass
index 11b0065a6..9ea2c0374 100644
--- a/import-layers/yocto-poky/meta/classes/libc-common.bbclass
+++ b/import-layers/yocto-poky/meta/classes/libc-common.bbclass
@@ -17,15 +17,15 @@ do_install() {
}
def get_libc_fpu_setting(bb, d):
- if d.getVar('TARGET_FPU', True) in [ 'soft', 'ppc-efd' ]:
+ if d.getVar('TARGET_FPU') in [ 'soft', 'ppc-efd' ]:
return "--without-fp"
return ""
python populate_packages_prepend () {
- if d.getVar('DEBIAN_NAMES', True):
- pkgs = d.getVar('PACKAGES', True).split()
- bpn = d.getVar('BPN', True)
- prefix = d.getVar('MLPREFIX', True) or ""
+ if d.getVar('DEBIAN_NAMES'):
+ pkgs = d.getVar('PACKAGES').split()
+ bpn = d.getVar('BPN')
+ prefix = d.getVar('MLPREFIX') or ""
# Set the base package...
d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
libcprefix = prefix + bpn + '-'
diff --git a/import-layers/yocto-poky/meta/classes/libc-package.bbclass b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
index 2dc90c44d..739adce69 100644
--- a/import-layers/yocto-poky/meta/classes/libc-package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
@@ -9,25 +9,27 @@
GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
+GLIBC_SPLIT_LC_PACKAGES ?= "0"
+
python __anonymous () {
- enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
+ enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
if pn.endswith("-initial"):
enabled = False
if enabled and int(enabled):
import re
- target_arch = d.getVar("TARGET_ARCH", True)
- binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
- use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
+ target_arch = d.getVar("TARGET_ARCH")
+ binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
for regexp in binary_arches.split(" "):
r = re.compile(regexp)
if r.match(target_arch):
- depends = d.getVar("DEPENDS", True)
+ depends = d.getVar("DEPENDS")
if use_cross_localedef == "1" :
depends = "%s cross-localedef-native" % depends
else:
@@ -92,21 +94,21 @@ inherit qemu
python package_do_split_gconvs () {
import re
- if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
+ if (d.getVar('PACKAGE_NO_GCONV') == '1'):
bb.note("package requested not splitting gconvs")
return
- if not d.getVar('PACKAGES', True):
+ if not d.getVar('PACKAGES'):
return
- mlprefix = d.getVar("MLPREFIX", True) or ""
+ mlprefix = d.getVar("MLPREFIX") or ""
- bpn = d.getVar('BPN', True)
- libdir = d.getVar('libdir', True)
+ bpn = d.getVar('BPN')
+ libdir = d.getVar('libdir')
if not libdir:
bb.error("libdir not defined")
return
- datadir = d.getVar('datadir', True)
+ datadir = d.getVar('datadir')
if not datadir:
bb.error("datadir not defined")
return
@@ -114,7 +116,7 @@ python package_do_split_gconvs () {
gconv_libdir = base_path_join(libdir, "gconv")
charmap_dir = base_path_join(datadir, "i18n", "charmaps")
locales_dir = base_path_join(datadir, "i18n", "locales")
- binary_locales_dir = d.getVar('localedir', True)
+ binary_locales_dir = d.getVar('localedir')
def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
@@ -181,13 +183,13 @@ python package_do_split_gconvs () {
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
- use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
+ use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
dot_re = re.compile("(.*)\.(.*)")
# Read in supported locales and associated encodings
supported = {}
- with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
+ with open(base_path_join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
for line in f.readlines():
try:
locale, charset = line.rstrip().split()
@@ -196,7 +198,7 @@ python package_do_split_gconvs () {
supported[locale] = charset
# GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
- to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
+ to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
if not to_generate or to_generate == 'all':
to_generate = sorted(supported.keys())
else:
@@ -213,33 +215,32 @@ python package_do_split_gconvs () {
def output_locale_source(name, pkgname, locale, encoding):
d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
(mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
- d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
+ d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst') \
% (locale, encoding, locale))
- d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
+ d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
(locale, encoding, locale))
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
- m = re.match("(.*)\.(.*)", name)
- if m:
- libc_name = "%s.%s" % (m.group(1), m.group(2).lower())
- else:
- libc_name = name
- d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
- % (mlprefix+bpn, libc_name)))
+ dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
+ lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
+ if lcsplit and int(lcsplit):
+ d.appendVar('PACKAGES', ' ' + dep)
+ d.setVar('ALLOW_EMPTY_%s' % dep, '1')
+ d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
commands = {}
def output_locale_binary(name, pkgname, locale, encoding):
- treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
- ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
- path = d.getVar("PATH", True)
+ treedir = base_path_join(d.getVar("WORKDIR"), "locale-tree")
+ ldlibdir = base_path_join(treedir, d.getVar("base_libdir"))
+ path = d.getVar("PATH")
i18npath = base_path_join(treedir, datadir, "i18n")
gconvpath = base_path_join(treedir, "iconvdata")
outputpath = base_path_join(treedir, binary_locales_dir)
- use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
if use_cross_localedef == "1":
- target_arch = d.getVar('TARGET_ARCH', True)
+ target_arch = d.getVar('TARGET_ARCH')
locale_arch_options = { \
"arm": " --uint32-align=4 --little-endian ", \
"armeb": " --uint32-align=4 --big-endian ", \
@@ -278,7 +279,7 @@ python package_do_split_gconvs () {
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
- qemu_options = d.getVar('QEMU_OPTIONS', True)
+ qemu_options = d.getVar('QEMU_OPTIONS')
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
-E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
@@ -291,7 +292,7 @@ python package_do_split_gconvs () {
def output_locale(name, locale, encoding):
pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
- d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
+ d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
m = re.match("(.*)_(.*)", name)
if m:
@@ -310,8 +311,8 @@ python package_do_split_gconvs () {
bb.note("preparing tree for binary locale generation")
bb.build.exec_func("do_prep_locale_tree", d)
- utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
- utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT', True) or 0)
+ utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
+ utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
encodings = {}
for locale in to_generate:
@@ -337,8 +338,13 @@ python package_do_split_gconvs () {
else:
output_locale('%s.%s' % (base, charset), base, charset)
+ def metapkg_hook(file, pkg, pattern, format, basename):
+ name = basename.split('/', 1)[0]
+ metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
+ d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
+
if use_bin == "compile":
- makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
+ makefile = base_path_join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
m = open(makefile, "w")
m.write("all: %s\n\n" % " ".join(commands.keys()))
for cmd in commands:
@@ -350,13 +356,18 @@ python package_do_split_gconvs () {
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
bb.build.exec_func("do_collect_bins_from_locale_tree", d)
- do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
- output_pattern=bpn+'-binary-localedata-%s', \
- description='binary locale definition for %s', extra_depends='', allow_dirs=True)
- elif use_bin == "precompiled":
- do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
- output_pattern=bpn+'-binary-localedata-%s', \
- description='binary locale definition for %s', extra_depends='', allow_dirs=True)
+
+ if use_bin in ('compile', 'precompiled'):
+ lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
+ if lcsplit and int(lcsplit):
+ do_split_packages(d, binary_locales_dir, file_regex='^(.*/LC_\w+)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', recursive=True,
+ hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
+ else:
+ do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', extra_depends='', allow_dirs=True)
else:
bb.note("generation of binary locales disabled. this may break i18n!")
diff --git a/import-layers/yocto-poky/meta/classes/license.bbclass b/import-layers/yocto-poky/meta/classes/license.bbclass
index 721343d0f..b1fffe70f 100644
--- a/import-layers/yocto-poky/meta/classes/license.bbclass
+++ b/import-layers/yocto-poky/meta/classes/license.bbclass
@@ -37,13 +37,13 @@ python license_create_manifest() {
import oe.packagedata
from oe.rootfs import image_list_installed_packages
- build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS', True)
+ build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
if build_images_from_feeds == "1":
return 0
pkg_dic = {}
for pkg in sorted(image_list_installed_packages(d)):
- pkg_info = os.path.join(d.getVar('PKGDATA_DIR', True),
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
'runtime-reverse', pkg)
pkg_name = os.path.basename(os.readlink(pkg_info))
@@ -52,15 +52,15 @@ python license_create_manifest() {
pkg_lic_name = "LICENSE_" + pkg_name
pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
- rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- d.getVar('IMAGE_NAME', True), 'license.manifest')
+ rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ d.getVar('IMAGE_NAME'), 'license.manifest')
write_license_files(d, rootfs_license_manifest, pkg_dic)
}
def write_license_files(d, license_manifest, pkg_dic):
import re
- bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE", True) or "").split()
+ bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
@@ -72,7 +72,7 @@ def write_license_files(d, license_manifest, pkg_dic):
oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
bad_licenses, canonical_license, d)
except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P', True), exc))
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
else:
pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
pkg_dic[pkg]["LICENSES"] = re.sub(' *', ' ', pkg_dic[pkg]["LICENSES"])
@@ -98,7 +98,7 @@ def write_license_files(d, license_manifest, pkg_dic):
license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
for lic in pkg_dic[pkg]["LICENSES"]:
- lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
pkg_dic[pkg]["PN"], "generic_%s" %
re.sub('\+', '', lic))
# add explicity avoid of CLOSED license because isn't generic
@@ -114,10 +114,10 @@ def write_license_files(d, license_manifest, pkg_dic):
# - Just copy the manifest
# - Copy the manifest and the license directories
# With both options set we see a .5 M increase in core-image-minimal
- copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST', True)
- copy_lic_dirs = d.getVar('COPY_LIC_DIRS', True)
+ copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
+ copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
if copy_lic_manifest == "1":
- rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS', 'True'),
+ rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
'usr', 'share', 'common-licenses')
bb.utils.mkdirhier(rootfs_license_dir)
rootfs_license_manifest = os.path.join(rootfs_license_dir,
@@ -129,8 +129,12 @@ def write_license_files(d, license_manifest, pkg_dic):
for pkg in sorted(pkg_dic):
pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
bb.utils.mkdirhier(pkg_rootfs_license_dir)
- pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
pkg_dic[pkg]["PN"])
+
+ pkg_manifest_licenses = [canonical_license(d, lic) \
+ for lic in pkg_dic[pkg]["LICENSES"]]
+
licenses = os.listdir(pkg_license_dir)
for lic in licenses:
rootfs_license = os.path.join(rootfs_license_dir, lic)
@@ -138,9 +142,18 @@ def write_license_files(d, license_manifest, pkg_dic):
pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
if re.match("^generic_.*$", lic):
- generic_lic = re.search("^generic_(.*)$", lic).group(1)
- if oe.license.license_ok(canonical_license(d,
- generic_lic), bad_licenses) == False:
+ generic_lic = canonical_license(d,
+ re.search("^generic_(.*)$", lic).group(1))
+
+ # Do not copy generic license into package if isn't
+ # declared into LICENSES of the package.
+ if not re.sub('\+$', '', generic_lic) in \
+ [re.sub('\+', '', lic) for lic in \
+ pkg_manifest_licenses]:
+ continue
+
+ if oe.license.license_ok(generic_lic,
+ bad_licenses) == False:
continue
if not os.path.exists(rootfs_license):
@@ -166,7 +179,7 @@ def license_deployed_manifest(d):
dep_dic = {}
man_dic = {}
- lic_dir = d.getVar("LICENSE_DIRECTORY", True)
+ lic_dir = d.getVar("LICENSE_DIRECTORY")
dep_dic = get_deployed_dependencies(d)
for dep in dep_dic.keys():
@@ -181,8 +194,8 @@ def license_deployed_manifest(d):
key,val = line.split(": ", 1)
man_dic[dep][key] = val[:-1]
- lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- d.getVar('IMAGE_NAME', True))
+ lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ d.getVar('IMAGE_NAME'))
bb.utils.mkdirhier(lic_manifest_dir)
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
write_license_files(d, image_license_manifest, man_dic)
@@ -202,7 +215,7 @@ def get_deployed_dependencies(d):
depends = list(set([dep[0] for dep
in list(taskdata.values())
if not dep[0].endswith("-native")]))
- extra_depends = d.getVar("EXTRA_IMAGEDEPENDS", True)
+ extra_depends = d.getVar("EXTRA_IMAGEDEPENDS")
boot_depends = get_boot_dependencies(d)
depends.extend(extra_depends.split())
depends.extend(boot_depends)
@@ -212,13 +225,13 @@ def get_deployed_dependencies(d):
# the SSTATE_MANIFESTS for "deploy" task.
# The manifest file name contains the arch. Because we are not running
# in the recipe context it is necessary to check every arch used.
- sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS", True)
- sstate_archs = d.getVar("SSTATE_ARCHS", True)
- extra_archs = d.getVar("PACKAGE_EXTRA_ARCHS", True)
+ sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
+ sstate_archs = d.getVar("SSTATE_ARCHS")
+ extra_archs = d.getVar("PACKAGE_EXTRA_ARCHS")
archs = list(set(("%s %s" % (sstate_archs, extra_archs)).split()))
for dep in depends:
# Some recipes have an arch on their own, so we try that first.
- special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep, True)
+ special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep)
if special_arch:
sstate_manifest_file = os.path.join(sstate_manifest_dir,
"manifest-%s-%s.deploy" % (special_arch, dep))
@@ -249,12 +262,12 @@ def get_boot_dependencies(d):
for task in boot_tasks:
boot_depends_string = "%s %s" % (boot_depends_string,
- d.getVarFlag(task, "depends", True) or "")
+ d.getVarFlag(task, "depends") or "")
boot_depends = [dep.split(":")[0] for dep
in boot_depends_string.split()
if not dep.split(":")[0].endswith("-native")]
for dep in boot_depends:
- info_file = os.path.join(d.getVar("LICENSE_DIRECTORY", True),
+ info_file = os.path.join(d.getVar("LICENSE_DIRECTORY"),
dep, "recipeinfo")
# If the recipe and dependency name is the same
if os.path.exists(info_file):
@@ -265,7 +278,7 @@ def get_boot_dependencies(d):
# The fifth field contains what the task provides
if dep in taskdep[4]:
info_file = os.path.join(
- d.getVar("LICENSE_DIRECTORY", True),
+ d.getVar("LICENSE_DIRECTORY"),
taskdep[0], "recipeinfo")
if os.path.exists(info_file):
depends.append(taskdep[0])
@@ -295,7 +308,7 @@ python do_populate_lic() {
lic_files_paths = find_license_files(d)
# The base directory we wrangle licenses to
- destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
+ destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('PN'))
copy_license_files(lic_files_paths, destdir)
info = get_recipe_info(d)
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
@@ -306,11 +319,11 @@ python do_populate_lic() {
# it would be better to copy them in do_install_append, but find_license_filesa is python
python perform_packagecopy_prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
- if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled:
+ if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
lic_files_paths = find_license_files(d)
# LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
- destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True))
+ destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
copy_license_files(lic_files_paths, destdir)
add_package_and_files(d)
}
@@ -318,15 +331,15 @@ perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
def get_recipe_info(d):
info = {}
- info["PV"] = d.getVar("PV", True)
- info["PR"] = d.getVar("PR", True)
- info["LICENSE"] = d.getVar("LICENSE", True)
+ info["PV"] = d.getVar("PV")
+ info["PR"] = d.getVar("PR")
+ info["LICENSE"] = d.getVar("LICENSE")
return info
def add_package_and_files(d):
- packages = d.getVar('PACKAGES', True)
- files = d.getVar('LICENSE_FILES_DIRECTORY', True)
- pn = d.getVar('PN', True)
+ packages = d.getVar('PACKAGES')
+ files = d.getVar('LICENSE_FILES_DIRECTORY')
+ pn = d.getVar('PN')
pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
if pn_lic in packages:
bb.warn("%s package already existed in %s." % (pn_lic, pn))
@@ -334,7 +347,7 @@ def add_package_and_files(d):
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
d.setVar('FILES_' + pn_lic, files)
- rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True)
+ rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
if rrecommends_pn:
d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
else:
@@ -345,7 +358,7 @@ def copy_license_files(lic_files_paths, destdir):
import errno
bb.utils.mkdirhier(destdir)
- for (basename, path) in lic_files_paths:
+ for (basename, path, beginline, endline) in lic_files_paths:
try:
src = path
dst = os.path.join(destdir, basename)
@@ -353,7 +366,7 @@ def copy_license_files(lic_files_paths, destdir):
os.remove(dst)
if os.path.islink(src):
src = os.path.realpath(src)
- canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev)
+ canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
if canlink:
try:
os.link(src, dst)
@@ -364,20 +377,19 @@ def copy_license_files(lic_files_paths, destdir):
canlink = False
else:
raise
- try:
- if canlink:
- os.chown(dst,0,0)
- except OSError as err:
- if err.errno in (errno.EPERM, errno.EINVAL):
- # Suppress "Operation not permitted" error, as
- # sometimes this function is not executed under pseudo.
- # Also ignore "Invalid argument" errors that happen in
- # some (unprivileged) container environments (no root).
- pass
- else:
- raise
+ # Only chown if we did hardling, and, we're running under pseudo
+ if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
+ os.chown(dst,0,0)
if not canlink:
- shutil.copyfile(src, dst)
+ begin_idx = int(beginline)-1 if beginline is not None else None
+ end_idx = int(endline) if endline is not None else None
+ if begin_idx is None and end_idx is None:
+ shutil.copyfile(src, dst)
+ else:
+ with open(src, 'rb') as src_f:
+ with open(dst, 'wb') as dst_f:
+ dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
+
except Exception as e:
bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
@@ -390,20 +402,22 @@ def find_license_files(d):
from collections import defaultdict, OrderedDict
# All the license files for the package
- lic_files = d.getVar('LIC_FILES_CHKSUM', True)
- pn = d.getVar('PN', True)
+ lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
+ pn = d.getVar('PN')
# The license files are located in S/LIC_FILE_CHECKSUM.
- srcdir = d.getVar('S', True)
+ srcdir = d.getVar('S')
# Directory we store the generic licenses as set in the distro configuration
- generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
+ generic_directory = d.getVar('COMMON_LICENSE_DIR')
# List of basename, path tuples
lic_files_paths = []
+ # hash for keep track generic lics mappings
+ non_generic_lics = {}
# Entries from LIC_FILES_CHKSUM
lic_chksums = {}
license_source_dirs = []
license_source_dirs.append(generic_directory)
try:
- additional_lic_dirs = d.getVar('LICENSE_PATH', True).split()
+ additional_lic_dirs = d.getVar('LICENSE_PATH').split()
for lic_dir in additional_lic_dirs:
license_source_dirs.append(lic_dir)
except:
@@ -431,10 +445,10 @@ def find_license_files(d):
# unless NO_GENERIC_LICENSE is set.
for lic_dir in license_source_dirs:
if not os.path.isfile(os.path.join(lic_dir, license_type)):
- if d.getVarFlag('SPDXLICENSEMAP', license_type, True) != None:
+ if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
# Great, there is an SPDXLICENSEMAP. We can copy!
bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
- spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type, True)
+ spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
license_source = lic_dir
break
elif os.path.isfile(os.path.join(lic_dir, license_type)):
@@ -442,23 +456,25 @@ def find_license_files(d):
license_source = lic_dir
break
- non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type, True)
+ non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
if spdx_generic and license_source:
# we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
# audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
- lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic)))
+ lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
+ None, None))
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
# and should not be allowed, warn the user in this case.
- if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
+ if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
elif non_generic_lic and non_generic_lic in lic_chksums:
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
# of the package rather than the license_source_dirs.
lic_files_paths.append(("generic_" + license_type,
- os.path.join(srcdir, non_generic_lic)))
+ os.path.join(srcdir, non_generic_lic), None, None))
+ non_generic_lics[non_generic_lic] = license_type
else:
# Add explicity avoid of CLOSED license because this isn't generic
if license_type != 'CLOSED':
@@ -469,41 +485,40 @@ def find_license_files(d):
if not generic_directory:
bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
- if not lic_files:
- # No recipe should have an invalid license file. This is checked else
- # where, but let's be pedantic
- bb.note(pn + ": Recipe file does not have license file information.")
- return lic_files_paths
-
for url in lic_files.split():
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
+ bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
# We want the license filename and path
- chksum = parm['md5'] if 'md5' in parm else parm['sha256']
- lic_chksums[path] = chksum
+ chksum = parm.get('md5', None)
+ beginline = parm.get('beginline')
+ endline = parm.get('endline')
+ lic_chksums[path] = (chksum, beginline, endline)
v = FindVisitor()
try:
- v.visit_string(d.getVar('LICENSE', True))
+ v.visit_string(d.getVar('LICENSE'))
except oe.license.InvalidLicense as exc:
- bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
+ bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
- bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
-
+ bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
- for path, chksum in lic_chksums.items():
- lic_chksum_paths[os.path.basename(path)][chksum] = os.path.join(srcdir, path)
+ for path, data in sorted(lic_chksums.items()):
+ lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
for basename, files in lic_chksum_paths.items():
if len(files) == 1:
- lic_files_paths.append((basename, list(files.values())[0]))
+ # Don't copy again a LICENSE already handled as non-generic
+ if basename in non_generic_lics:
+ continue
+ data = list(files.values())[0]
+ lic_files_paths.append(tuple([basename] + list(data)))
else:
# If there are multiple different license files with identical
# basenames we rename them to <file>.0, <file>.1, ...
- for i, path in enumerate(files.values()):
- lic_files_paths.append(("%s.%d" % (basename, i), path))
+ for i, data in enumerate(files.values()):
+ lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
return lic_files_paths
@@ -511,7 +526,7 @@ def return_spdx(d, license):
"""
This function returns the spdx mapping of a license if it exists.
"""
- return d.getVarFlag('SPDXLICENSEMAP', license, True)
+ return d.getVarFlag('SPDXLICENSEMAP', license)
def canonical_license(d, license):
"""
@@ -520,9 +535,9 @@ def canonical_license(d, license):
'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
or the passed license if there is no canonical form.
"""
- lic = d.getVarFlag('SPDXLICENSEMAP', license, True) or ""
+ lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
if not lic and license.endswith('+'):
- lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'), True)
+ lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'))
if lic:
lic += '+'
return lic or license
@@ -537,7 +552,7 @@ def expand_wildcard_licenses(d, wildcard_licenses):
spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
for wld_lic in wildcard_licenses:
spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag, True) for flag in spdxflags]
+ licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
for wld_lic in wildcard_licenses:
@@ -548,7 +563,7 @@ def expand_wildcard_licenses(d, wildcard_licenses):
def incompatible_license_contains(license, truevalue, falsevalue, d):
license = canonical_license(d, license)
- bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
+ bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
return truevalue if license in bad_licenses else falsevalue
@@ -559,9 +574,9 @@ def incompatible_license(d, dont_want_licenses, package=None):
as canonical (SPDX) names.
"""
import oe.license
- license = d.getVar("LICENSE_%s" % package, True) if package else None
+ license = d.getVar("LICENSE_%s" % package) if package else None
if not license:
- license = d.getVar('LICENSE', True)
+ license = d.getVar('LICENSE')
# Handles an "or" or two license sets provided by
# flattened_licenses(), pick one that works if possible.
@@ -572,7 +587,7 @@ def incompatible_license(d, dont_want_licenses, package=None):
try:
licenses = oe.license.flattened_licenses(license, choose_lic_set)
except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P', True), exc))
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
return any(not oe.license.license_ok(canonical_license(d, l), \
dont_want_licenses) for l in licenses)
@@ -620,16 +635,16 @@ def check_license_flags(d):
def all_license_flags_match(license_flags, whitelist):
""" Return first unmatched flag, None if all flags match """
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
split_whitelist = whitelist.split()
for flag in license_flags.split():
if not license_flag_matches(flag, split_whitelist, pn):
return flag
return None
- license_flags = d.getVar('LICENSE_FLAGS', True)
+ license_flags = d.getVar('LICENSE_FLAGS')
if license_flags:
- whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
+ whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
if not whitelist:
return license_flags
unmatched_flag = all_license_flags_match(license_flags, whitelist)
@@ -643,8 +658,8 @@ def check_license_format(d):
Validate operators in LICENSES.
No spaces are allowed between LICENSES.
"""
- pn = d.getVar('PN', True)
- licenses = d.getVar('LICENSE', True)
+ pn = d.getVar('PN')
+ licenses = d.getVar('LICENSE')
from oe.license import license_operator, license_operator_chars, license_pattern
elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
diff --git a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
index 734697f9e..27b137dec 100644
--- a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
+++ b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
@@ -4,11 +4,11 @@ def set_live_vm_vars(d, suffix):
vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
for var in vars:
var_with_suffix = var + '_' + suffix
- if d.getVar(var, True):
+ if d.getVar(var):
bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
(var, var_with_suffix, var))
- elif d.getVar(var_with_suffix, True):
- d.setVar(var, d.getVar(var_with_suffix, True))
+ elif d.getVar(var_with_suffix):
+ d.setVar(var, d.getVar(var_with_suffix))
EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
@@ -25,7 +25,7 @@ def pcbios(d):
return pcbios
PCBIOS = "${@pcbios(d)}"
-PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS', True) == '1']}"
+PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
diff --git a/import-layers/yocto-poky/meta/classes/manpages.bbclass b/import-layers/yocto-poky/meta/classes/manpages.bbclass
new file mode 100644
index 000000000..d16237b89
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/manpages.bbclass
@@ -0,0 +1,5 @@
+# Inherit this class to enable or disable building and installation of manpages
+# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
+# tends to pull in the entire XML stack and other tools, so it's not enabled
+# by default.
+PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
diff --git a/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass b/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
index 2e6fac209..fa791f04c 100644
--- a/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
@@ -26,7 +26,7 @@ def base_detect_branch(d):
return "<unknown>"
def base_get_scmbasepath(d):
- return os.path.join(d.getVar('COREBASE', True), 'meta')
+ return os.path.join(d.getVar('COREBASE'), 'meta')
def base_get_metadata_monotone_branch(path, d):
monotone_branch = "<unknown>"
diff --git a/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass b/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass
index aa0df8bb7..810a54131 100644
--- a/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass
+++ b/import-layers/yocto-poky/meta/classes/migrate_localcount.bbclass
@@ -6,12 +6,12 @@ python migrate_localcount_handler () {
if not e.data:
return
- pv = e.data.getVar('PV', True)
+ pv = e.data.getVar('PV')
if not 'AUTOINC' in pv:
return
localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
- pn = e.data.getVar('PN', True)
+ pn = e.data.getVar('PN')
revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
counts = localcounts.get_by_pattern('%%-%s_count' % pn)
if not revs or not counts:
@@ -21,10 +21,10 @@ python migrate_localcount_handler () {
bb.warn("The number of revs and localcounts don't match in %s" % pn)
return
- version = e.data.getVar('PRAUTOINX', True)
+ version = e.data.getVar('PRAUTOINX')
srcrev = bb.fetch2.get_srcrev(e.data)
base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
- pkgarch = e.data.getVar('PACKAGE_ARCH', True)
+ pkgarch = e.data.getVar('PACKAGE_ARCH')
value = max(int(count) for count in counts)
if len(revs) == 1:
@@ -33,8 +33,8 @@ python migrate_localcount_handler () {
else:
value += 1
- bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True))
- df = e.data.getVar('LOCALCOUNT_DUMPFILE', True)
+ bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR'))
+ df = e.data.getVar('LOCALCOUNT_DUMPFILE')
flock = bb.utils.lockfile("%s.lock" % df)
with open(df, 'a') as fd:
fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
diff --git a/import-layers/yocto-poky/meta/classes/mime.bbclass b/import-layers/yocto-poky/meta/classes/mime.bbclass
index 721c73fcf..0df15831c 100644
--- a/import-layers/yocto-poky/meta/classes/mime.bbclass
+++ b/import-layers/yocto-poky/meta/classes/mime.bbclass
@@ -1,4 +1,5 @@
-DEPENDS += "shared-mime-info-native shared-mime-info"
+DEPENDS += "shared-mime-info"
+PACKAGE_WRITE_DEPS += "shared-mime-info-native"
mime_postinst() {
if [ "$1" = configure ]; then
@@ -28,8 +29,8 @@ fi
python populate_packages_append () {
import re
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
@@ -41,15 +42,15 @@ python populate_packages_append () {
mimes.append(f)
if mimes:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('mime_postinst', True)
+ postinst += d.getVar('mime_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('mime_postrm', True)
+ postrm += d.getVar('mime_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
bb.note("adding shared-mime-info-data dependency to %s" % pkg)
d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
diff --git a/import-layers/yocto-poky/meta/classes/mirrors.bbclass b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
index 2cdc71b6e..4ad814ff2 100644
--- a/import-layers/yocto-poky/meta/classes/mirrors.bbclass
+++ b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
@@ -27,7 +27,7 @@ ${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR} \n \
+ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
@@ -54,7 +54,7 @@ p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-npm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
cvs://.*/.* http://sources.openembedded.org/ \n \
svn://.*/.* http://sources.openembedded.org/ \n \
git://.*/.* http://sources.openembedded.org/ \n \
@@ -64,7 +64,18 @@ p4://.*/.* http://sources.openembedded.org/ \n \
osc://.*/.* http://sources.openembedded.org/ \n \
https?$://.*/.* http://sources.openembedded.org/ \n \
ftp://.*/.* http://sources.openembedded.org/ \n \
-npm://.*/.* http://sources.openembedded.org/ \n \
+npm://.*/?.* http://sources.openembedded.org/ \n \
${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
"
+
+# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
+# where git native protocol fetches may fail due to local firewall rules, etc.
+
+MIRRORS += "\
+git://anonscm.debian.org/.* git://anonscm.debian.org/git/PATH;protocol=https \n \
+git://git.gnome.org/.* git://git.gnome.org/browse/PATH;protocol=https \n \
+git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
+git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
+git://.*/.* git://HOST/PATH;protocol=https \n \
+"
diff --git a/import-layers/yocto-poky/meta/classes/module.bbclass b/import-layers/yocto-poky/meta/classes/module.bbclass
index 68e3d341a..802476bc7 100644
--- a/import-layers/yocto-poky/meta/classes/module.bbclass
+++ b/import-layers/yocto-poky/meta/classes/module.bbclass
@@ -1,15 +1,16 @@
-inherit module-base kernel-module-split
+inherit module-base kernel-module-split pkgconfig
-addtask make_scripts after do_patch before do_compile
+addtask make_scripts after do_prepare_recipe_sysroot before do_compile
do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
do_make_scripts[depends] += "virtual/kernel:do_shared_workdir"
EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
MODULES_INSTALL_TARGET ?= "modules_install"
+MODULES_MODULE_SYMVERS_LOCATION ?= ""
python __anonymous () {
- depends = d.getVar('DEPENDS', True)
+ depends = d.getVar('DEPENDS')
extra_symbols = []
for dep in depends.split():
if dep.startswith("kernel-module-"):
@@ -30,15 +31,22 @@ module_do_compile() {
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
+ oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
O=${STAGING_KERNEL_BUILDDIR} \
${MODULES_INSTALL_TARGET}
- install -d -m0755 ${D}${includedir}/${BPN}
- cp -a --no-preserve=ownership ${B}/Module.symvers ${D}${includedir}/${BPN}
- # it doesn't actually seem to matter which path is specified here
- sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
+ if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
+ bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}"
+ bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a"
+ bbwarn "directory below B to get correct inter-module dependencies"
+ else
+ install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers
+ # Module.symvers contains absolute path to the build directory.
+ # While it doesn't actually seem to matter which path is specified,
+ # clear them out to avoid confusion
+ sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
+ fi
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/multilib.bbclass b/import-layers/yocto-poky/meta/classes/multilib.bbclass
index d5a31287a..ab04597f9 100644
--- a/import-layers/yocto-poky/meta/classes/multilib.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib.bbclass
@@ -1,20 +1,20 @@
python multilib_virtclass_handler () {
- cls = e.data.getVar("BBEXTENDCURR", True)
- variant = e.data.getVar("BBEXTENDVARIANT", True)
+ cls = e.data.getVar("BBEXTENDCURR")
+ variant = e.data.getVar("BBEXTENDVARIANT")
if cls != "multilib" or not variant:
return
- e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True))
+ e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR'))
# There should only be one kernel in multilib configs
# We also skip multilib setup for module packages.
- provides = (e.data.getVar("PROVIDES", True) or "").split()
+ provides = (e.data.getVar("PROVIDES") or "").split()
if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
- save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
+ save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME") or ""
for name in save_var_name.split():
- val=e.data.getVar(name, True)
+ val=e.data.getVar(name)
if val:
e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
@@ -26,7 +26,7 @@ python multilib_virtclass_handler () {
if bb.data.inherits_class('image', e.data):
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
- e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT', True))
+ e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
@@ -36,7 +36,6 @@ python multilib_virtclass_handler () {
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- bb.data.update_data(e.data)
return
if bb.data.inherits_class('native', e.data):
@@ -50,17 +49,23 @@ python multilib_virtclass_handler () {
# Expand this since this won't work correctly once we set a multilib into place
- e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+ e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
override = ":virtclass-multilib-" + variant
+ blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN'))
+ if blacklist:
+ pn_new = variant + "-" + e.data.getVar('PN')
+ if not e.data.getVarFlag('PNBLACKLIST', pn_new):
+ e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist)
+
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
# Expand the WHITELISTs with multilib prefix
for whitelist in ["WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
- pkgs = e.data.getVar(whitelist, True)
+ pkgs = e.data.getVar(whitelist)
for pkg in pkgs.split():
pkgs += " " + variant + "-" + pkg
e.data.setVar(whitelist, pkgs)
@@ -78,7 +83,7 @@ multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
python __anonymous () {
- variant = d.getVar("BBEXTENDVARIANT", True)
+ variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
@@ -88,7 +93,7 @@ python __anonymous () {
clsextend.map_depends_variable("PACKAGE_INSTALL")
clsextend.map_depends_variable("LINGUAS_INSTALL")
clsextend.map_depends_variable("RDEPENDS")
- pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
+ pinstall = d.getVar("LINGUAS_INSTALL") + " " + d.getVar("PACKAGE_INSTALL")
d.setVar("PACKAGE_INSTALL", pinstall)
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
@@ -104,7 +109,7 @@ python __anonymous () {
return
clsextend.rename_packages()
- clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
clsextend.map_packagevars()
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
@@ -119,7 +124,7 @@ PACKAGEFUNCS_append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
- values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
+ values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
@@ -130,14 +135,14 @@ python do_package_qa_multilib() {
candidates.append(i)
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
- % (d.getVar('PN', True), pkg, ' '.join(candidates), var)
+ % (d.getVar('PN'), pkg, ' '.join(candidates), var)
package_qa_handle_error("multilib", msg, d)
- ml = d.getVar('MLPREFIX', True)
+ ml = d.getVar('MLPREFIX')
if not ml:
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
for pkg in packages.split():
check_mlprefix(pkg, 'RDEPENDS', ml)
check_mlprefix(pkg, 'RPROVIDES', ml)
diff --git a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
index 11ae2681f..fd0bfe127 100644
--- a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
@@ -1,7 +1,7 @@
def preferred_ml_updates(d):
# If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
# we need to mirror these variables in the multilib case;
- multilibs = d.getVar('MULTILIBS', True) or ""
+ multilibs = d.getVar('MULTILIBS') or ""
if not multilibs:
return
@@ -29,7 +29,6 @@ def preferred_ml_updates(d):
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
if "-canadian-" in pkg:
newname = localdata.expand(v)
else:
@@ -57,7 +56,6 @@ def preferred_ml_updates(d):
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
newname = localdata.expand(prov)
if newname != prov:
newval = localdata.expand(val)
@@ -80,7 +78,6 @@ def preferred_ml_updates(d):
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- bb.data.update_data(localdata)
newname = localdata.expand(prov)
if newname != prov and not d.getVar(newname, False):
d.setVar(newname, localdata.expand(newval))
@@ -102,7 +99,7 @@ def preferred_ml_updates(d):
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
@@ -111,14 +108,14 @@ def preferred_ml_updates(d):
extramp.append(translate_provide(pref, p))
d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
- abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+ abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
for p in prefixes:
for a in abisafe:
extras.append(p + "-" + a)
d.appendVar("SIGGEN_EXCLUDERECIPES_ABISAFE", " " + " ".join(extras))
- siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
extras = []
for p in prefixes:
for a in siggen_exclude:
@@ -128,7 +125,7 @@ def preferred_ml_updates(d):
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
- for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
+ for v in e.data.getVar("MULTILIB_VARIANTS").split():
if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(e.data)
@@ -137,17 +134,15 @@ addhandler multilib_virtclass_handler_vendor
multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
python multilib_virtclass_handler_global () {
- if not e.data:
+ variant = e.data.getVar("BBEXTENDVARIANT")
+ if variant:
return
- variant = e.data.getVar("BBEXTENDVARIANT", True)
-
- if isinstance(e, bb.event.RecipeParsed) and not variant:
- if bb.data.inherits_class('kernel', e.data) or \
+ if bb.data.inherits_class('kernel', e.data) or \
bb.data.inherits_class('module-base', e.data) or \
(bb.data.inherits_class('allarch', e.data) and\
not bb.data.inherits_class('packagegroup', e.data)):
- variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
+ variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
import oe.classextend
clsextends = []
@@ -155,21 +150,21 @@ python multilib_virtclass_handler_global () {
clsextends.append(oe.classextend.ClassExtender(variant, e.data))
# Process PROVIDES
- origprovs = provs = e.data.getVar("PROVIDES", True) or ""
+ origprovs = provs = e.data.getVar("PROVIDES") or ""
for clsextend in clsextends:
provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
e.data.setVar("PROVIDES", provs)
# Process RPROVIDES
- origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
+ origrprovs = rprovs = e.data.getVar("RPROVIDES") or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
# Process RPROVIDES_${PN}...
- for pkg in (e.data.getVar("PACKAGES", True) or "").split():
- origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
+ for pkg in (e.data.getVar("PACKAGES") or "").split():
+ origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg) or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
@@ -177,5 +172,5 @@ python multilib_virtclass_handler_global () {
}
addhandler multilib_virtclass_handler_global
-multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed"
+multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeParsed"
diff --git a/import-layers/yocto-poky/meta/classes/multilib_header.bbclass b/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
index 304c28e77..e03f5b13b 100644
--- a/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
@@ -13,13 +13,9 @@ oe_multilib_header() {
;;
*)
esac
- # We use
- # For ARM: We don't support multilib builds.
# For MIPS: "n32" is a special case, which needs to be
# distinct from both 64-bit and 32-bit.
case ${TARGET_ARCH} in
- arm*) return
- ;;
mips*) case "${MIPSPKGSFX_ABI}" in
"-n32")
ident=n32
@@ -31,9 +27,6 @@ oe_multilib_header() {
;;
*) ident=${SITEINFO_BITS}
esac
- if echo ${TARGET_ARCH} | grep -q arm; then
- return
- fi
for each_header in "$@" ; do
if [ ! -f "${D}/${includedir}/$each_header" ]; then
bberror "oe_multilib_header: Unable to find header $each_header."
diff --git a/import-layers/yocto-poky/meta/classes/native.bbclass b/import-layers/yocto-poky/meta/classes/native.bbclass
index 143f8a914..6b7f3dd76 100644
--- a/import-layers/yocto-poky/meta/classes/native.bbclass
+++ b/import-layers/yocto-poky/meta/classes/native.bbclass
@@ -95,7 +95,7 @@ libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
-do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/"
+do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
# Since we actually install these into situ there is no staging prefix
STAGING_DIR_HOST = ""
@@ -112,22 +112,33 @@ PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
LIBCOVERRIDE = ""
CLASSOVERRIDE = "class-native"
MACHINEOVERRIDES = ""
+MACHINE_FEATURES = ""
PATH_prepend = "${COREBASE}/scripts/native-intercept:"
+# This class encodes staging paths into its scripts data so can only be
+# reused if we manipulate the paths.
+SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
+
python native_virtclass_handler () {
- classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
- if "native" not in classextend:
+ pn = e.data.getVar("PN")
+ if not pn.endswith("-native"):
return
- pn = e.data.getVar("PN", True)
- if not pn.endswith("-native"):
+ # Set features here to prevent appends and distro features backfill
+ # from modifying native distro features
+ features = set(d.getVar("DISTRO_FEATURES_NATIVE").split())
+ filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split())
+ d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
+
+ classextend = e.data.getVar('BBCLASSEXTEND') or ""
+ if "native" not in classextend:
return
def map_dependencies(varname, d, suffix = ""):
if suffix:
varname = varname + "_" + suffix
- deps = d.getVar(varname, True)
+ deps = d.getVar(varname)
if not deps:
return
deps = bb.utils.explode_deps(deps)
@@ -146,14 +157,14 @@ python native_virtclass_handler () {
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
map_dependencies("DEPENDS", e.data)
- for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
+ for pkg in [e.data.getVar("PN"), "", "${PN}"]:
map_dependencies("RDEPENDS", e.data, pkg)
map_dependencies("RRECOMMENDS", e.data, pkg)
map_dependencies("RSUGGESTS", e.data, pkg)
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
- provides = e.data.getVar("PROVIDES", True)
+ provides = e.data.getVar("PROVIDES")
nprovides = []
for prov in provides.split():
if prov.find(pn) != -1:
@@ -170,6 +181,11 @@ python native_virtclass_handler () {
addhandler native_virtclass_handler
native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+python do_addto_recipe_sysroot () {
+ bb.build.exec_func("extend_recipe_sysroot", d)
+}
+addtask addto_recipe_sysroot after do_populate_sysroot
+
inherit nopackages
do_packagedata[stamp-extra-info] = ""
diff --git a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
index 31dde4a90..69fb45c8a 100644
--- a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
@@ -25,9 +25,7 @@ PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
DEPENDS_append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
-STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
-STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
-PKGDATA_DIR = "${STAGING_DIR_HOST}/pkgdata"
+PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -45,6 +43,10 @@ TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
+TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
+TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
TARGET_FPU = ""
EXTRA_OECONF_GCC_FLOAT = ""
@@ -64,17 +66,23 @@ export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
python nativesdk_virtclass_handler () {
- pn = e.data.getVar("PN", True)
+ pn = e.data.getVar("PN")
if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
return
+ # Set features here to prevent appends and distro features backfill
+ # from modifying nativesdk distro features
+ features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
+ filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
+ d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
+
e.data.setVar("MLPREFIX", "nativesdk-")
- e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", ""))
+ e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
}
python () {
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
if not pn.startswith("nativesdk-"):
return
@@ -82,7 +90,7 @@ python () {
clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
clsextend.rename_packages()
- clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
clsextend.map_depends_variable("DEPENDS")
clsextend.map_packagevars()
diff --git a/import-layers/yocto-poky/meta/classes/npm.bbclass b/import-layers/yocto-poky/meta/classes/npm.bbclass
index fce4c1146..a69bedbb2 100644
--- a/import-layers/yocto-poky/meta/classes/npm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/npm.bbclass
@@ -13,7 +13,8 @@ def npm_oe_arch_map(target_arch, d):
elif re.match('arm64$', target_arch): return 'arm'
return target_arch
-NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH', True), d)}"
+NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH'), d)}"
+NPM_INSTALL_DEV = "0"
npm_do_compile() {
# Copy in any additionally fetched modules
@@ -23,17 +24,32 @@ npm_do_compile() {
# changing the home directory to the working directory, the .npmrc will
# be created in this directory
export HOME=${WORKDIR}
- npm config set dev false
+ if [ "${NPM_INSTALL_DEV}" = "1" ]; then
+ npm config set dev true
+ else
+ npm config set dev false
+ fi
npm set cache ${WORKDIR}/npm_cache
# clear cache before every build
npm cache clear
# Install pkg into ${S} without going to the registry
- npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
+ if [ "${NPM_INSTALL_DEV}" = "1" ]; then
+ npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --no-registry install
+ else
+ npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
+ fi
}
npm_do_install() {
+ # changing the home directory to the working directory, the .npmrc will
+ # be created in this directory
+ export HOME=${WORKDIR}
mkdir -p ${NPM_INSTALLDIR}/
- cp -a ${S}/* ${NPM_INSTALLDIR}/ --no-preserve=ownership
+ npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry
+ if [ -d ${D}${prefix}/etc ] ; then
+ # This will be empty
+ rmdir ${D}${prefix}/etc
+ fi
}
python populate_packages_prepend () {
@@ -55,7 +71,7 @@ python populate_packages_prepend () {
description = pdata.get('description', None)
if description:
d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'"))
- d.appendVar('RDEPENDS_%s' % d.getVar('PN', True), ' %s' % ' '.join(pkgnames).replace('_', '-'))
+ d.appendVar('RDEPENDS_%s' % d.getVar('PN'), ' %s' % ' '.join(pkgnames).replace('_', '-'))
}
FILES_${PN} += " \
diff --git a/import-layers/yocto-poky/meta/classes/oelint.bbclass b/import-layers/yocto-poky/meta/classes/oelint.bbclass
index c4febc2cf..2589d3405 100644
--- a/import-layers/yocto-poky/meta/classes/oelint.bbclass
+++ b/import-layers/yocto-poky/meta/classes/oelint.bbclass
@@ -1,7 +1,7 @@
addtask lint before do_build
do_lint[nostamp] = "1"
python do_lint() {
- pkgname = d.getVar("PN", True)
+ pkgname = d.getVar("PN")
##############################
# Test that DESCRIPTION exists
@@ -35,7 +35,7 @@ python do_lint() {
# Check that all patches have Signed-off-by and Upstream-Status
#
srcuri = d.getVar("SRC_URI", False).split()
- fpaths = (d.getVar('FILESPATH', True) or '').split(':')
+ fpaths = (d.getVar('FILESPATH') or '').split(':')
def findPatch(patchname):
for dir in fpaths:
diff --git a/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass b/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
index 12b42675b..0296d545b 100644
--- a/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
+++ b/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
@@ -9,5 +9,5 @@ p4://.*/.* ${SOURCE_MIRROR_URL}
osc://.*/.* ${SOURCE_MIRROR_URL}
https?$://.*/.* ${SOURCE_MIRROR_URL}
ftp://.*/.* ${SOURCE_MIRROR_URL}
-npm://.*/.* ${SOURCE_MIRROR_URL}
+npm://.*/?.* ${SOURCE_MIRROR_URL}
}
diff --git a/import-layers/yocto-poky/meta/classes/package.bbclass b/import-layers/yocto-poky/meta/classes/package.bbclass
index a6f0a7a63..a03c05b9f 100644
--- a/import-layers/yocto-poky/meta/classes/package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package.bbclass
@@ -54,6 +54,14 @@ ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
# rpm is used for the per-file dependency identification
PACKAGE_DEPENDS += "rpm-native"
+
+# If your postinstall can execute at rootfs creation time rather than on
+# target but depends on a native/cross tool in order to execute, you need to
+# list that tool in PACKAGE_WRITE_DEPENDS. Target package dependencies belong
+# in the package dependencies as normal, this is just for native/cross support
+# tools at rootfs build time.
+PACKAGE_WRITE_DEPS ??= ""
+
def legitimize_package_name(s):
"""
Make sure package names are legitimate strings
@@ -120,7 +128,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
"""
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
root = d.expand(root)
output_pattern = d.expand(output_pattern)
extra_depends = d.expand(extra_depends)
@@ -130,7 +138,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
if not os.path.exists(dvar + root):
return []
- ml = d.getVar("MLPREFIX", True)
+ ml = d.getVar("MLPREFIX")
if ml:
if not output_pattern.startswith(ml):
output_pattern = ml + output_pattern
@@ -145,7 +153,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
extra_depends = " ".join(newdeps)
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
split_packages = set()
if postinst:
@@ -163,7 +171,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
objs.append(relpath)
if extra_depends == None:
- extra_depends = d.getVar("PN", True)
+ extra_depends = d.getVar("PN")
if not summary:
summary = description
@@ -189,7 +197,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = [pkg] + packages
else:
packages.append(pkg)
- oldfiles = d.getVar('FILES_' + pkg, True)
+ oldfiles = d.getVar('FILES_' + pkg)
newfile = os.path.join(root, o)
# These names will be passed through glob() so if the filename actually
# contains * or ? (rare, but possible) we need to handle that specially
@@ -214,9 +222,9 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
if extra_depends != '':
d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- if not d.getVar('DESCRIPTION_' + pkg, True):
+ if not d.getVar('DESCRIPTION_' + pkg):
d.setVar('DESCRIPTION_' + pkg, description % on)
- if not d.getVar('SUMMARY_' + pkg, True):
+ if not d.getVar('SUMMARY_' + pkg):
d.setVar('SUMMARY_' + pkg, summary % on)
if postinst:
d.setVar('pkg_postinst_' + pkg, postinst)
@@ -231,9 +239,9 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
PACKAGE_DEPENDS += "file-native"
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ""
- for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
+ for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
deps += " %s:do_populate_sysroot" % dep
d.appendVarFlag('do_package', 'depends', deps)
@@ -286,14 +294,14 @@ def files_from_filevars(filevars):
# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
def get_conffiles(pkg, d):
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
root = os.path.join(pkgdest, pkg)
cwd = os.getcwd()
os.chdir(root)
- conffiles = d.getVar('CONFFILES_%s' % pkg, True);
+ conffiles = d.getVar('CONFFILES_%s' % pkg);
if conffiles == None:
- conffiles = d.getVar('CONFFILES', True)
+ conffiles = d.getVar('CONFFILES')
if conffiles == None:
conffiles = ""
conffiles = conffiles.split()
@@ -318,7 +326,7 @@ def get_conffiles(pkg, d):
return conf_list
def checkbuildpath(file, d):
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
with open(file) as f:
file_content = f.read()
if tmpdir in file_content:
@@ -335,9 +343,9 @@ def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
import stat
- dvar = d.getVar('PKGD', True)
- objcopy = d.getVar("OBJCOPY", True)
- debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
+ dvar = d.getVar('PKGD')
+ objcopy = d.getVar("OBJCOPY")
+ debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/debugedit")
# We ignore kernel modules, we don't generate debug info files.
if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
@@ -382,11 +390,11 @@ def copydebugsources(debugsrcdir, d):
sourcefile = d.expand("${WORKDIR}/debugsources.list")
if debugsrcdir and os.path.isfile(sourcefile):
- dvar = d.getVar('PKGD', True)
- strip = d.getVar("STRIP", True)
- objcopy = d.getVar("OBJCOPY", True)
+ dvar = d.getVar('PKGD')
+ strip = d.getVar("STRIP")
+ objcopy = d.getVar("OBJCOPY")
debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
- workdir = d.getVar("WORKDIR", True)
+ workdir = d.getVar("WORKDIR")
workparentdir = os.path.dirname(os.path.dirname(workdir))
workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
@@ -406,7 +414,8 @@ def copydebugsources(debugsrcdir, d):
bb.utils.mkdirhier(basepath)
cpath.updatecache(basepath)
- processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
+ # Ignore files from the recipe sysroots (target and native)
+ processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | "
# We need to ignore files that are not actually ours
# we do this by only paying attention to items from this package
processdebugsrc += "fgrep -zw '%s' | "
@@ -462,26 +471,23 @@ def get_package_additional_metadata (pkg_type, d):
if d.getVar(key, False) is None:
continue
d.setVarFlag(key, "type", "list")
- if d.getVarFlag(key, "separator", True) is None:
+ if d.getVarFlag(key, "separator") is None:
d.setVarFlag(key, "separator", "\\n")
metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
return "\n".join(metadata_fields).strip()
def runtime_mapping_rename (varname, pkg, d):
- #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
-
- if bb.data.inherits_class('packagegroup', d):
- return
+ #bb.note("%s before: %s" % (varname, d.getVar(varname)))
new_depends = {}
- deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
+ deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
for depend in deps:
new_depend = get_package_mapping(depend, pkg, d)
new_depends[new_depend] = deps[depend]
d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
- #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
+ #bb.note("%s after: %s" % (varname, d.getVar(varname)))
#
# Package functions suitable for inclusion in PACKAGEFUNCS
@@ -492,34 +498,34 @@ python package_get_auto_pr() {
import re
# Support per recipe PRSERV_HOST
- pn = d.getVar('PN', True)
- host = d.getVar("PRSERV_HOST_" + pn, True)
+ pn = d.getVar('PN')
+ host = d.getVar("PRSERV_HOST_" + pn)
if not (host is None):
d.setVar("PRSERV_HOST", host)
- pkgv = d.getVar("PKGV", True)
+ pkgv = d.getVar("PKGV")
# PR Server not active, handle AUTOINC
- if not d.getVar('PRSERV_HOST', True):
+ if not d.getVar('PRSERV_HOST'):
if 'AUTOINC' in pkgv:
d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
return
auto_pr = None
- pv = d.getVar("PV", True)
- version = d.getVar("PRAUTOINX", True)
- pkgarch = d.getVar("PACKAGE_ARCH", True)
- checksum = d.getVar("BB_TASKHASH", True)
+ pv = d.getVar("PV")
+ version = d.getVar("PRAUTOINX")
+ pkgarch = d.getVar("PACKAGE_ARCH")
+ checksum = d.getVar("BB_TASKHASH")
- if d.getVar('PRSERV_LOCKDOWN', True):
- auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
+ if d.getVar('PRSERV_LOCKDOWN'):
+ auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
if auto_pr is None:
bb.fatal("Can NOT get PRAUTO from lockdown exported file")
d.setVar('PRAUTO',str(auto_pr))
return
try:
- conn = d.getVar("__PRSERV_CONN", True)
+ conn = d.getVar("__PRSERV_CONN")
if conn is None:
conn = oe.prservice.prserv_make_conn(d)
if conn is not None:
@@ -540,19 +546,19 @@ python package_get_auto_pr() {
LOCALEBASEPN ??= "${PN}"
python package_do_split_locales() {
- if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
+ if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
bb.debug(1, "package requested not splitting locales")
return
- packages = (d.getVar('PACKAGES', True) or "").split()
+ packages = (d.getVar('PACKAGES') or "").split()
- datadir = d.getVar('datadir', True)
+ datadir = d.getVar('datadir')
if not datadir:
bb.note("datadir not defined")
return
- dvar = d.getVar('PKGD', True)
- pn = d.getVar('LOCALEBASEPN', True)
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('LOCALEBASEPN')
if pn + '-locale' in packages:
packages.remove(pn + '-locale')
@@ -565,10 +571,10 @@ python package_do_split_locales() {
locales = os.listdir(localedir)
- summary = d.getVar('SUMMARY', True) or pn
- description = d.getVar('DESCRIPTION', True) or ""
- locale_section = d.getVar('LOCALE_SECTION', True)
- mlprefix = d.getVar('MLPREFIX', True) or ""
+ summary = d.getVar('SUMMARY') or pn
+ description = d.getVar('DESCRIPTION') or ""
+ locale_section = d.getVar('LOCALE_SECTION')
+ mlprefix = d.getVar('MLPREFIX') or ""
for l in sorted(locales):
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
@@ -589,14 +595,14 @@ python package_do_split_locales() {
# glibc-localedata-translit* won't install as a dependency
# for some other package which breaks meta-toolchain
# Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split()
+ #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
#rdep.append('%s-locale*' % pn)
#d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
}
python perform_packagecopy () {
- dest = d.getVar('D', True)
- dvar = d.getVar('PKGD', True)
+ dest = d.getVar('D')
+ dvar = d.getVar('PKGD')
# Start by package population by taking a copy of the installed
# files to operate on
@@ -730,8 +736,8 @@ python fixup_perms () {
# paths are resolved via BBPATH
def get_fs_perms_list(d):
str = ""
- bbpath = d.getVar('BBPATH', True)
- fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
+ bbpath = d.getVar('BBPATH')
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES')
if not fs_perms_tables:
fs_perms_tables = 'files/fs-perms.txt'
for conf_file in fs_perms_tables.split():
@@ -740,7 +746,7 @@ python fixup_perms () {
- dvar = d.getVar('PKGD', True)
+ dvar = d.getVar('PKGD')
fs_perms_table = {}
fs_link_table = {}
@@ -769,10 +775,10 @@ python fixup_perms () {
'oldincludedir' ]
for path in target_path_vars:
- dir = d.getVar(path, True) or ""
+ dir = d.getVar(path) or ""
if dir == "":
continue
- fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
+ fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
# Now we actually load from the configuration files
for conf in get_fs_perms_list(d).split():
@@ -854,20 +860,20 @@ python fixup_perms () {
python split_and_strip_files () {
import stat, errno
- dvar = d.getVar('PKGD', True)
- pn = d.getVar('PN', True)
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
oldcwd = os.getcwd()
os.chdir(dvar)
# We default to '.debug' style
- if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
# Single debug-file-directory style debug info
debugappend = ".debug"
debugdir = ""
debuglibdir = "/usr/lib/debug"
debugsrcdir = "/usr/src/debug"
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src':
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
# Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
debugappend = ""
debugdir = "/.debug"
@@ -918,10 +924,10 @@ python split_and_strip_files () {
symlinks = {}
kernmods = []
inodes = {}
- libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
- baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
- if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1' or \
- d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
+ d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
for root, dirs, files in cpath.walk(dvar):
for f in files:
file = os.path.join(root, f)
@@ -962,7 +968,7 @@ python split_and_strip_files () {
elf_file = isELF(file)
if elf_file & 1:
if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
else:
msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
@@ -991,7 +997,7 @@ python split_and_strip_files () {
#
# First lets process debug splitting
#
- if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
for file in elffiles:
src = file[len(dvar):]
dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
@@ -1054,8 +1060,8 @@ python split_and_strip_files () {
#
# Now lets go back over things and strip them
#
- if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
- strip = d.getVar("STRIP", True)
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
+ strip = d.getVar("STRIP")
sfiles = []
for file in elffiles:
elf_file = int(elffiles[file])
@@ -1075,16 +1081,16 @@ python split_and_strip_files () {
python populate_packages () {
import glob, re
- workdir = d.getVar('WORKDIR', True)
- outdir = d.getVar('DEPLOY_DIR', True)
- dvar = d.getVar('PKGD', True)
- packages = d.getVar('PACKAGES', True)
- pn = d.getVar('PN', True)
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dvar = d.getVar('PKGD')
+ packages = d.getVar('PACKAGES')
+ pn = d.getVar('PN')
bb.utils.mkdirhier(outdir)
os.chdir(dvar)
- autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG", True) or False)
+ autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
# Sanity check PACKAGES for duplicates
# Sanity should be moved to sanity.bbclass once we have the infrastucture
@@ -1099,7 +1105,7 @@ python populate_packages () {
else:
package_list.append(pkg)
d.setVar('PACKAGES', ' '.join(package_list))
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
seen = []
@@ -1120,7 +1126,7 @@ python populate_packages () {
root = os.path.join(pkgdest, pkg)
bb.utils.mkdirhier(root)
- filesvar = d.getVar('FILES_%s' % pkg, True) or ""
+ filesvar = d.getVar('FILES_%s' % pkg) or ""
if "//" in filesvar:
msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
package_qa_handle_error("files-invalid", msg, d)
@@ -1188,7 +1194,7 @@ python populate_packages () {
# Handle LICENSE_EXCLUSION
package_list = []
for pkg in packages.split():
- if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
+ if d.getVar('LICENSE_EXCLUSION-' + pkg):
msg = "%s has an incompatible license. Excluding from packaging." % pkg
package_qa_handle_error("incompatible-license", msg, d)
else:
@@ -1207,7 +1213,7 @@ python populate_packages () {
if unshipped != []:
msg = pn + ": Files/directories were installed but not shipped in any package:"
- if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
else:
for f in unshipped:
@@ -1220,7 +1226,7 @@ populate_packages[dirs] = "${D}"
python package_fixsymlinks () {
import errno
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
packages = d.getVar("PACKAGES", False).split()
dangling_links = {}
@@ -1255,7 +1261,7 @@ python package_fixsymlinks () {
bb.note("%s contains dangling symlink to %s" % (pkg, l))
for pkg in newrdepends:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
for p in newrdepends[pkg]:
if p not in rdepends:
rdepends[p] = []
@@ -1286,11 +1292,11 @@ python emit_pkgdata() {
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
- val = d.getVar('%s_%s' % (var, pkg), True)
+ val = d.getVar('%s_%s' % (var, pkg))
if val:
f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
return val
- val = d.getVar('%s' % (var), True)
+ val = d.getVar('%s' % (var))
if val:
f.write('%s: %s\n' % (var, encode(val)))
return val
@@ -1309,9 +1315,9 @@ python emit_pkgdata() {
with open(subdata_file, 'w') as fd:
fd.write("PKG_%s: %s" % (ml_pkg, pkg))
- packages = d.getVar('PACKAGES', True)
- pkgdest = d.getVar('PKGDEST', True)
- pkgdatadir = d.getVar('PKGDESTWORK', True)
+ packages = d.getVar('PACKAGES')
+ pkgdest = d.getVar('PKGDEST')
+ pkgdatadir = d.getVar('PKGDESTWORK')
# Take shared lock since we're only reading, not writing
lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
@@ -1321,9 +1327,9 @@ python emit_pkgdata() {
f.write("PACKAGES: %s\n" % packages)
f.close()
- pn = d.getVar('PN', True)
- global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split()
- variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split()
+ pn = d.getVar('PN')
+ global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
+ variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_pkgs(variants, pn, packages, pkgdatadir)
@@ -1331,10 +1337,10 @@ python emit_pkgdata() {
if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
for pkg in packages.split():
- pkgval = d.getVar('PKG_%s' % pkg, True)
+ pkgval = d.getVar('PKG_%s' % pkg)
if pkgval is None:
pkgval = pkg
d.setVar('PKG_%s' % pkg, pkg)
@@ -1342,11 +1348,14 @@ python emit_pkgdata() {
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
total_size = 0
+ seen = set()
for f in pkgfiles[pkg]:
relpth = os.path.relpath(f, pkgdestpkg)
fstat = os.lstat(f)
- total_size += fstat.st_size
files[os.sep + relpth] = fstat.st_size
+ if fstat.st_ino not in seen:
+ seen.add(fstat.st_ino)
+ total_size += fstat.st_size
d.setVar('FILES_INFO', json.dumps(files))
subdata_file = pkgdatadir + "/runtime/%s" % pkg
@@ -1371,17 +1380,18 @@ python emit_pkgdata() {
write_if_exists(sf, pkg, 'PKG')
write_if_exists(sf, pkg, 'ALLOW_EMPTY')
write_if_exists(sf, pkg, 'FILES')
+ write_if_exists(sf, pkg, 'CONFFILES')
write_if_exists(sf, pkg, 'pkg_postinst')
write_if_exists(sf, pkg, 'pkg_postrm')
write_if_exists(sf, pkg, 'pkg_preinst')
write_if_exists(sf, pkg, 'pkg_prerm')
write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
write_if_exists(sf, pkg, 'FILES_INFO')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
+ for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
+ for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
@@ -1394,9 +1404,9 @@ python emit_pkgdata() {
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
- allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
+ allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
if not allow_empty:
- allow_empty = d.getVar('ALLOW_EMPTY', True)
+ allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
os.chdir(root)
g = glob('*')
@@ -1424,7 +1434,13 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
+# In Morty and earlier releases, and on master (Rocko), the RPM file
+# dependencies are always enabled. However, since they were broken with the
+# release of Pyro and enabling them may cause build problems for some packages,
+# they are not enabled by default in Pyro. Setting ENABLE_RPM_FILEDEPS_FOR_PYRO
+# to "1" will enable them again.
+ENABLE_RPM_FILEDEPS_FOR_PYRO ??= "0"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps${@' --alldeps' if d.getVar('ENABLE_RPM_FILEDEPS_FOR_PYRO') == '1' else ''}"
# Collect perfile run-time dependency metadata
# Output:
@@ -1435,19 +1451,19 @@ RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LI
# FILERDEPENDS_filepath_pkg - per file dep
python package_do_filedeps() {
- if d.getVar('SKIP_FILEDEPS', True) == '1':
+ if d.getVar('SKIP_FILEDEPS') == '1':
return
- pkgdest = d.getVar('PKGDEST', True)
- packages = d.getVar('PACKAGES', True)
- rpmdeps = d.getVar('RPMDEPS', True)
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar('PACKAGES')
+ rpmdeps = d.getVar('RPMDEPS')
def chunks(files, n):
return [files[i:i+n] for i in range(0, len(files), n)]
pkglist = []
for pkg in packages.split():
- if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1':
+ if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
continue
if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
continue
@@ -1496,22 +1512,22 @@ python package_do_shlibs() {
return
lib_re = re.compile("^.*\.so")
- libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True))
+ libdir_re = re.compile(".*/%s$" % d.getVar('baselib'))
- packages = d.getVar('PACKAGES', True)
- targetos = d.getVar('TARGET_OS', True)
+ packages = d.getVar('PACKAGES')
+ targetos = d.getVar('TARGET_OS')
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
- ver = d.getVar('PKGV', True)
+ ver = d.getVar('PKGV')
if not ver:
msg = "PKGV not defined"
package_qa_handle_error("pkgv-undefined", msg, d)
return
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
- shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
# Take shared lock since we're only reading, not writing
lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
@@ -1519,7 +1535,7 @@ python package_do_shlibs() {
def linux_so(file, needed, sonames, renames, pkgver):
needs_ldconfig = False
ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
- cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
+ cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
@@ -1601,28 +1617,44 @@ python package_do_shlibs() {
if name and name not in needed[pkg]:
needed[pkg].append((name, file, []))
- if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
+ def mingw_dll(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+
+ if file.endswith(".dll"):
+ # assume all dlls are shared objects provided by the package
+ sonames.append((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
+
+ if (file.endswith(".dll") or file.endswith(".exe")):
+ # use objdump to search for "DLL Name: .*\.dll"
+ p = sub.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout = sub.PIPE, stderr= sub.PIPE)
+ out, err = p.communicate()
+ # process the output, grabbing all .dll names
+ if p.returncode == 0:
+ for m in re.finditer("DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
+ dllname = m.group(1)
+ if dllname:
+ needed[pkg].append((dllname, file, []))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
snap_symlinks = True
else:
snap_symlinks = False
- if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
- use_ldconfig = True
- else:
- use_ldconfig = False
+ use_ldconfig = bb.utils.contains('DISTRO_FEATURES', 'ldconfig', True, False, d)
needed = {}
shlib_provider = oe.package.read_shlib_providers(d)
for pkg in packages.split():
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
+ private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
needs_ldconfig = False
bb.debug(2, "calculating shlib provides for %s" % pkg)
- pkgver = d.getVar('PKGV_' + pkg, True)
+ pkgver = d.getVar('PKGV_' + pkg)
if not pkgver:
- pkgver = d.getVar('PV_' + pkg, True)
+ pkgver = d.getVar('PV_' + pkg)
if not pkgver:
pkgver = ver
@@ -1635,6 +1667,8 @@ python package_do_shlibs() {
continue
if targetos == "darwin" or targetos == "darwin8":
darwin_so(file, needed, sonames, renames, pkgver)
+ elif targetos.startswith("mingw"):
+ mingw_dll(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
ldconfig = linux_so(file, needed, sonames, renames, pkgver)
needs_ldconfig = needs_ldconfig or ldconfig
@@ -1659,18 +1693,18 @@ python package_do_shlibs() {
fd.close()
if needs_ldconfig and use_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('ldconfig_postinst_fragment', True)
+ postinst += d.getVar('ldconfig_postinst_fragment')
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
bb.utils.unlockfile(lf)
- assumed_libs = d.getVar('ASSUME_SHLIBS', True)
+ assumed_libs = d.getVar('ASSUME_SHLIBS')
if assumed_libs:
- libdir = d.getVar("libdir", True)
+ libdir = d.getVar("libdir")
for e in assumed_libs.split():
l, dep_pkg = e.split(":")
lib_ver = None
@@ -1682,7 +1716,7 @@ python package_do_shlibs() {
shlib_provider[l] = {}
shlib_provider[l][libdir] = (dep_pkg, lib_ver)
- libsearchpath = [d.getVar('libdir', True), d.getVar('base_libdir', True)]
+ libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
for pkg in packages.split():
bb.debug(2, "calculating shlib requirements for %s" % pkg)
@@ -1736,12 +1770,12 @@ python package_do_shlibs() {
python package_do_pkgconfig () {
import re
- packages = d.getVar('PACKAGES', True)
- workdir = d.getVar('WORKDIR', True)
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES')
+ workdir = d.getVar('WORKDIR')
+ pkgdest = d.getVar('PKGDEST')
- shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
- shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
pc_re = re.compile('(.*)\.pc$')
var_re = re.compile('(.*)=(.*)')
@@ -1773,7 +1807,7 @@ python package_do_pkgconfig () {
m = field_re.match(l)
if m:
hdr = m.group(1)
- exp = bb.data.expand(m.group(2), pd)
+ exp = pd.expand(m.group(2))
if hdr == 'Requires':
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
@@ -1826,7 +1860,7 @@ python package_do_pkgconfig () {
def read_libdep_files(d):
pkglibdeps = {}
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
for pkg in packages:
pkglibdeps[pkg] = {}
for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
@@ -1846,9 +1880,9 @@ def read_libdep_files(d):
python read_shlibdeps () {
pkglibdeps = read_libdep_files(d)
- packages = d.getVar('PACKAGES', True).split()
+ packages = d.getVar('PACKAGES').split()
for pkg in packages:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
for dep in pkglibdeps[pkg]:
# Add the dep if it's not already there, or if no comparison is set
if dep not in rdepends:
@@ -1873,14 +1907,14 @@ python package_depchains() {
package.
"""
- packages = d.getVar('PACKAGES', True)
- postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
- prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
+ packages = d.getVar('PACKAGES')
+ postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
#bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
for depend in depends:
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
@@ -1901,7 +1935,7 @@ python package_depchains() {
def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
#bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
for depend in rdepends:
if depend.find('virtual-locale-') != -1:
@@ -1924,12 +1958,12 @@ python package_depchains() {
list.append(dep)
depends = []
- for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
add_dep(depends, dep)
rdepends = []
for pkg in packages.split():
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
add_dep(rdepends, dep)
#bb.note('rdepends is %s' % rdepends)
@@ -1959,11 +1993,11 @@ python package_depchains() {
for pkg in pkglibdeps:
for k in pkglibdeps[pkg]:
add_dep(pkglibdeplist, k)
- dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (bb.data.inherits_class('packagegroup', d)))
+ dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
for suffix in pkgs:
for pkg in pkgs[suffix]:
- if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', True):
+ if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
@@ -1976,19 +2010,19 @@ python package_depchains() {
pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
else:
rdeps = []
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
add_dep(rdeps, dep)
pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
}
# Since bitbake can't determine which variables are accessed during package
# iteration, we need to list them here:
-PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE"
+PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS"
def gen_packagevar(d):
ret = []
- pkgs = (d.getVar("PACKAGES", True) or "").split()
- vars = (d.getVar("PACKAGEVARS", True) or "").split()
+ pkgs = (d.getVar("PACKAGES") or "").split()
+ vars = (d.getVar("PACKAGEVARS") or "").split()
for p in pkgs:
for v in vars:
ret.append(v + "_" + p)
@@ -2036,16 +2070,16 @@ python do_package () {
# Sanity test the setup
###########################################################################
- packages = (d.getVar('PACKAGES', True) or "").split()
+ packages = (d.getVar('PACKAGES') or "").split()
if len(packages) < 1:
bb.debug(1, "No packages to build, skipping do_package")
return
- workdir = d.getVar('WORKDIR', True)
- outdir = d.getVar('DEPLOY_DIR', True)
- dest = d.getVar('D', True)
- dvar = d.getVar('PKGD', True)
- pn = d.getVar('PN', True)
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dest = d.getVar('D')
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
if not workdir or not outdir or not dest or not dvar or not pn:
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
@@ -2063,7 +2097,7 @@ python do_package () {
# code pre-expands some frequently used variables
def expandVar(x, d):
- d.setVar(x, d.getVar(x, True))
+ d.setVar(x, d.getVar(x))
for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
expandVar(x, d)
@@ -2072,7 +2106,7 @@ python do_package () {
# Setup PKGD (from D)
###########################################################################
- for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split():
+ for f in (d.getVar('PACKAGEBUILDPKGD') or '').split():
bb.build.exec_func(f, d)
###########################################################################
@@ -2081,7 +2115,7 @@ python do_package () {
cpath = oe.cachedpath.CachedPath()
- for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split():
+ for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
bb.build.exec_func(f, d)
###########################################################################
@@ -2091,18 +2125,18 @@ python do_package () {
# Build global list of files in each split package
global pkgfiles
pkgfiles = {}
- packages = d.getVar('PACKAGES', True).split()
- pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages:
pkgfiles[pkg] = []
for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
for file in files:
pkgfiles[pkg].append(walkroot + os.sep + file)
- for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
+ for f in (d.getVar('PACKAGEFUNCS') or '').split():
bb.build.exec_func(f, d)
- qa_sane = d.getVar("QA_SANE", True)
+ qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
}
@@ -2149,7 +2183,7 @@ def mapping_rename_hook(d):
Rewrite variables to account for package renaming in things
like debian.bbclass or manual PKG variable name changes
"""
- pkg = d.getVar("PKG", True)
+ pkg = d.getVar("PKG")
runtime_mapping_rename("RDEPENDS", pkg, d)
runtime_mapping_rename("RRECOMMENDS", pkg, d)
runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/import-layers/yocto-poky/meta/classes/package_deb.bbclass b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
index fb6034cab..eacabcdb6 100644
--- a/import-layers/yocto-poky/meta/classes/package_deb.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
@@ -6,17 +6,19 @@ inherit package
IMAGE_PKGTYPE ?= "deb"
-DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True))}"
+DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
APTCONF_TARGET = "${WORKDIR}"
-APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
+APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
def debian_arch_map(arch, tune):
tune_features = tune.split()
+ if arch == "allarch":
+ return "all"
if arch in ["i586", "i686"]:
return "i386"
if arch == "x86_64":
@@ -53,25 +55,26 @@ python do_package_deb () {
import textwrap
import subprocess
import collections
+ import codecs
oldcwd = os.getcwd()
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- outdir = d.getVar('PKGWRITEDIRDEB', True)
+ outdir = d.getVar('PKGWRITEDIRDEB')
if not outdir:
bb.error("PKGWRITEDIRDEB not defined, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
@@ -80,7 +83,7 @@ python do_package_deb () {
bb.debug(1, "No packages; nothing to do")
return
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
def cleanupcontrol(root):
for p in ['CONTROL', 'DEBIAN']:
@@ -96,17 +99,16 @@ python do_package_deb () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ pkgname = localdata.getVar('PKG_%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
- bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
- pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
+ pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
bb.utils.mkdirhier(pkgoutdir)
os.chdir(root)
@@ -114,22 +116,18 @@ python do_package_deb () {
from glob import glob
g = glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
bb.utils.unlockfile(lf)
continue
controldir = os.path.join(root, 'DEBIAN')
bb.utils.mkdirhier(controldir)
os.chmod(controldir, 0o755)
- try:
- import codecs
- ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
- except OSError:
- bb.utils.unlockfile(lf)
- bb.fatal("unable to open control file for writing")
+
+ ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
fields = []
- pe = d.getVar('PKGE', True)
+ pe = d.getVar('PKGE')
if pe and int(pe) > 0:
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
@@ -141,7 +139,7 @@ python do_package_deb () {
fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
fields.append(["OE: %s\n", ['PN']])
fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
- if d.getVar('HOMEPAGE', True):
+ if d.getVar('HOMEPAGE'):
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
# Package, Version, Maintainer, Description - mandatory
@@ -151,10 +149,10 @@ python do_package_deb () {
def pullData(l, d):
l2 = []
for i in l:
- data = d.getVar(i, True)
+ data = d.getVar(i)
if data is None:
- raise KeyError(f)
- if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
+ raise KeyError(i)
+ if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
data = 'all'
elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
# The params in deb package control don't allow character
@@ -165,44 +163,31 @@ python do_package_deb () {
return l2
ctrlfile.write("Package: %s\n" % pkgname)
- if d.getVar('PACKAGE_ARCH', True) == "all":
+ if d.getVar('PACKAGE_ARCH') == "all":
ctrlfile.write("Multi-Arch: foreign\n")
# check for required fields
- try:
- for (c, fs) in fields:
- for f in fs:
- if localdata.getVar(f, False) is None:
- raise KeyError(f)
- # Special behavior for description...
- if 'DESCRIPTION' in fs:
- summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
- ctrlfile.write('Description: %s\n' % summary)
- description = localdata.getVar('DESCRIPTION', True) or "."
- description = textwrap.dedent(description).strip()
- if '\\n' in description:
- # Manually indent
- for t in description.split('\\n'):
- # We don't limit the width when manually indent, but we do
- # need the textwrap.fill() to set the initial_indent and
- # subsequent_indent, so set a large width
- ctrlfile.write('%s\n' % textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' '))
- else:
- # Auto indent
- ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
-
- else:
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
- except KeyError:
- import sys
- (type, value, traceback) = sys.exc_info()
- bb.utils.unlockfile(lf)
- ctrlfile.close()
- bb.fatal("Missing field for deb generation: %s" % value)
+ for (c, fs) in fields:
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
+ ctrlfile.write('Description: %s\n' % summary)
+ description = localdata.getVar('DESCRIPTION') or "."
+ description = textwrap.dedent(description).strip()
+ if '\\n' in description:
+ # Manually indent
+ for t in description.split('\\n'):
+ ctrlfile.write(' %s\n' % (t.strip() or '.'))
+ else:
+ # Auto indent
+ ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
+
+ else:
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
# more fields
custom_fields_chunk = get_package_additional_metadata("deb", localdata)
- if custom_fields_chunk is not None:
+ if custom_fields_chunk:
ctrlfile.write(custom_fields_chunk)
ctrlfile.write("\n")
@@ -231,7 +216,7 @@ python do_package_deb () {
elif (v or "").startswith("> "):
var[dep][i] = var[dep][i].replace("> ", ">> ")
- rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
+ rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
debian_cmp_remap(rdepends)
for dep in list(rdepends.keys()):
if dep == pkg:
@@ -239,20 +224,24 @@ python do_package_deb () {
continue
if '*' in dep:
del rdepends[dep]
- rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
+ rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
debian_cmp_remap(rrecommends)
for dep in list(rrecommends.keys()):
if '*' in dep:
del rrecommends[dep]
- rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
+ rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
debian_cmp_remap(rsuggests)
# Deliberately drop version information here, not wanted/supported by deb
- rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
+ # Remove file paths if any from rprovides, debian does not support custom providers
+ for key in list(rprovides.keys()):
+ if key.startswith('/'):
+ del rprovides[key]
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
debian_cmp_remap(rprovides)
- rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
+ rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
debian_cmp_remap(rreplaces)
- rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
+ rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
debian_cmp_remap(rconflicts)
if rdepends:
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
@@ -269,15 +258,11 @@ python do_package_deb () {
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = localdata.getVar('pkg_%s' % script, True)
+ scriptvar = localdata.getVar('pkg_%s' % script)
if not scriptvar:
continue
scriptvar = scriptvar.strip()
- try:
- scriptfile = open(os.path.join(controldir, script), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- bb.fatal("unable to open %s script file for writing" % script)
+ scriptfile = open(os.path.join(controldir, script), 'w')
if scriptvar.startswith("#!"):
pos = scriptvar.find("\n") + 1
@@ -297,21 +282,14 @@ python do_package_deb () {
conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
- try:
- conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- bb.fatal("unable to open conffiles for writing")
+ conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
for f in conffiles_str.split():
if os.path.exists(oe.path.join(root, f)):
conffiles.write('%s\n' % f)
conffiles.close()
os.chdir(basedir)
- ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
- if ret != 0:
- bb.utils.unlockfile(lf)
- bb.fatal("dpkg-deb execution failed")
+ subprocess.check_output("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH"), root, pkgoutdir), shell=True)
cleanupcontrol(root)
bb.utils.unlockfile(lf)
@@ -328,7 +306,7 @@ do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
python do_package_write_deb_setscene () {
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
@@ -338,7 +316,7 @@ python do_package_write_deb_setscene () {
addtask do_package_write_deb_setscene
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_deb', 'depends', deps)
d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
@@ -351,6 +329,7 @@ python do_package_write_deb () {
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[umask] = "022"
+do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_deb after do_packagedata do_package
diff --git a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
index e7e7d4929..a1e51ee69 100644
--- a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
@@ -11,8 +11,8 @@ PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
OPKGBUILDCMD ??= "opkg-build"
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
-OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
-OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
+OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
+OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "") != ""]}"
OPKGLIBDIR = "${localstatedir}/lib"
@@ -24,15 +24,15 @@ python do_package_ipk () {
oldcwd = os.getcwd()
- workdir = d.getVar('WORKDIR', True)
- outdir = d.getVar('PKGWRITEDIRIPK', True)
- tmpdir = d.getVar('TMPDIR', True)
- pkgdest = d.getVar('PKGDEST', True)
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('PKGWRITEDIRIPK')
+ tmpdir = d.getVar('TMPDIR')
+ pkgdest = d.getVar('PKGDEST')
if not workdir or not outdir or not tmpdir:
bb.error("Variables incorrectly set, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
@@ -48,7 +48,7 @@ python do_package_ipk () {
if os.path.exists(p):
bb.utils.prunedir(p)
- recipesource = os.path.basename(d.getVar('FILE', True))
+ recipesource = os.path.basename(d.getVar('FILE'))
for pkg in packages.split():
localdata = bb.data.createCopy(d)
@@ -58,16 +58,15 @@ python do_package_ipk () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ pkgname = localdata.getVar('PKG_%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
- bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
- arch = localdata.getVar('PACKAGE_ARCH', True)
+ arch = localdata.getVar('PACKAGE_ARCH')
if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
# Spread packages across subdirectories so each isn't too crowded
@@ -100,20 +99,16 @@ python do_package_ipk () {
from glob import glob
g = glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
bb.utils.unlockfile(lf)
continue
controldir = os.path.join(root, 'CONTROL')
bb.utils.mkdirhier(controldir)
- try:
- ctrlfile = open(os.path.join(controldir, 'control'), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- bb.fatal("unable to open control file for writing")
+ ctrlfile = open(os.path.join(controldir, 'control'), 'w')
fields = []
- pe = d.getVar('PKGE', True)
+ pe = d.getVar('PKGE')
if pe and int(pe) > 0:
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
@@ -125,46 +120,43 @@ python do_package_ipk () {
fields.append(["License: %s\n", ['LICENSE']])
fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
fields.append(["OE: %s\n", ['PN']])
- if d.getVar('HOMEPAGE', True):
+ if d.getVar('HOMEPAGE'):
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
def pullData(l, d):
l2 = []
for i in l:
- l2.append(d.getVar(i, True))
+ l2.append(d.getVar(i))
return l2
ctrlfile.write("Package: %s\n" % pkgname)
# check for required fields
- try:
- for (c, fs) in fields:
- for f in fs:
- if localdata.getVar(f, False) is None:
- raise KeyError(f)
- # Special behavior for description...
- if 'DESCRIPTION' in fs:
- summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
- ctrlfile.write('Description: %s\n' % summary)
- description = localdata.getVar('DESCRIPTION', True) or "."
- description = textwrap.dedent(description).strip()
- if '\\n' in description:
- # Manually indent
- for t in description.split('\\n'):
- # We don't limit the width when manually indent, but we do
- # need the textwrap.fill() to set the initial_indent and
- # subsequent_indent, so set a large width
- ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' '))
- else:
- # Auto indent
- ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
+ for (c, fs) in fields:
+ for f in fs:
+ if localdata.getVar(f, False) is None:
+ raise KeyError(f)
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
+ ctrlfile.write('Description: %s\n' % summary)
+ description = localdata.getVar('DESCRIPTION') or "."
+ description = textwrap.dedent(description).strip()
+ if '\\n' in description:
+ # Manually indent
+ for t in description.split('\\n'):
+ # We don't limit the width when manually indent, but we do
+ # need the textwrap.fill() to set the initial_indent and
+ # subsequent_indent, so set a large width
+ line = textwrap.fill(t.strip(),
+ width=100000,
+ initial_indent=' ',
+ subsequent_indent=' ') or '.'
+ ctrlfile.write('%s\n' % line)
else:
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
- except KeyError:
- import sys
- (type, value, traceback) = sys.exc_info()
- ctrlfile.close()
- bb.utils.unlockfile(lf)
- bb.fatal("Missing field for ipk generation: %s" % value)
+ # Auto indent
+ ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
+ else:
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
# more fields
custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
@@ -187,19 +179,19 @@ python do_package_ipk () {
elif (v or "").startswith("> "):
var[dep][i] = var[dep][i].replace("> ", ">> ")
- rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
+ rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
debian_cmp_remap(rdepends)
- rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
+ rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
debian_cmp_remap(rrecommends)
- rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
+ rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
debian_cmp_remap(rsuggests)
# Deliberately drop version information here, not wanted/supported by ipk
- rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
debian_cmp_remap(rprovides)
- rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
+ rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
debian_cmp_remap(rreplaces)
- rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
+ rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
debian_cmp_remap(rconflicts)
if rdepends:
@@ -218,40 +210,29 @@ python do_package_ipk () {
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = localdata.getVar('pkg_%s' % script, True)
+ scriptvar = localdata.getVar('pkg_%s' % script)
if not scriptvar:
continue
- try:
- scriptfile = open(os.path.join(controldir, script), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- bb.fatal("unable to open %s script file for writing" % script)
+ scriptfile = open(os.path.join(controldir, script), 'w')
scriptfile.write(scriptvar)
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0o755)
conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
- try:
- conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
- except OSError:
- bb.utils.unlockfile(lf)
- bb.fatal("unable to open conffiles for writing")
+ conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
for f in conffiles_str.split():
if os.path.exists(oe.path.join(root, f)):
conffiles.write('%s\n' % f)
conffiles.close()
os.chdir(basedir)
- ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
- d.getVar("OPKGBUILDCMD", True), pkg, pkgoutdir), shell=True)
- if ret != 0:
- bb.utils.unlockfile(lf)
- bb.fatal("opkg-build execution failed")
+ subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
+ d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir), shell=True)
- if d.getVar('IPK_SIGN_PACKAGES', True) == '1':
- ipkver = "%s-%s" % (d.getVar('PKGV', True), d.getVar('PKGR', True))
- ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH', True))
+ if d.getVar('IPK_SIGN_PACKAGES') == '1':
+ ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
+ ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
sign_ipk(d, ipk_to_sign)
cleanupcontrol(root)
@@ -267,7 +248,7 @@ do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
python do_package_write_ipk_setscene () {
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
@@ -277,7 +258,7 @@ python do_package_write_ipk_setscene () {
addtask do_package_write_ipk_setscene
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_ipk', 'depends', deps)
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
@@ -290,6 +271,7 @@ python do_package_write_ipk () {
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[umask] = "022"
+do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_ipk after do_packagedata do_package
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
index c431545f7..1deaf832d 100644
--- a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
@@ -7,15 +7,33 @@ RPMBUILD="rpmbuild"
PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
-# Maintaining the perfile dependencies has singificant overhead when writing the
+# Maintaining the perfile dependencies has singificant overhead when writing the
# packages. When set, this value merges them for efficiency.
MERGEPERFILEDEPS = "1"
+# Filter dependencies based on a provided function.
+def filter_deps(var, f):
+ import collections
+
+ depends_dict = bb.utils.explode_dep_versions2(var)
+ newdeps_dict = collections.OrderedDict()
+ for dep in depends_dict:
+ if f(dep):
+ newdeps_dict[dep] = depends_dict[dep]
+ return bb.utils.join_deps(newdeps_dict, commasep=False)
+
+# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
+# dependencies for nativesdk packages.
+def filter_nativesdk_deps(srcname, var):
+ if var and srcname.startswith("nativesdk-"):
+ var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
+ return var
+
# Construct per file dependencies file
def write_rpm_perfiledata(srcname, d):
- workdir = d.getVar('WORKDIR', True)
- packages = d.getVar('PACKAGES', True)
- pkgd = d.getVar('PKGD', True)
+ workdir = d.getVar('WORKDIR')
+ packages = d.getVar('PACKAGES')
+ pkgd = d.getVar('PKGD')
def dump_filerdeps(varname, outfile, d):
outfile.write("#!/usr/bin/env python\n\n")
@@ -23,10 +41,11 @@ def write_rpm_perfiledata(srcname, d):
outfile.write('deps = {\n')
for pkg in packages.split():
dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
- dependsflist = (d.getVar(dependsflist_key, True) or "")
+ dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
key = "FILE" + varname + "_" + dfile + "_" + pkg
- depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
+ deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
+ depends_dict = bb.utils.explode_dep_versions(deps)
file = dfile.replace("@underscore@", "_")
file = file.replace("@closebrace@", "]")
file = file.replace("@openbrace@", "[")
@@ -55,10 +74,7 @@ def write_rpm_perfiledata(srcname, d):
# OE-core dependencies a.k.a. RPM requires
outdepends = workdir + "/" + srcname + ".requires"
- try:
- dependsfile = open(outdepends, 'w')
- except OSError:
- bb.fatal("unable to open spec file for writing")
+ dependsfile = open(outdepends, 'w')
dump_filerdeps('RDEPENDS', dependsfile, d)
@@ -68,10 +84,7 @@ def write_rpm_perfiledata(srcname, d):
# OE-core / RPM Provides
outprovides = workdir + "/" + srcname + ".provides"
- try:
- providesfile = open(outprovides, 'w')
- except OSError:
- bb.fatal("unable to open spec file for writing")
+ providesfile = open(outprovides, 'w')
dump_filerdeps('RPROVIDES', providesfile, d)
@@ -86,15 +99,15 @@ python write_specfile () {
# append information for logs and patches to %prep
def add_prep(d,spec_files_bottom):
- if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
- spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
+ spec_files_bottom.append('%%prep -n %s' % d.getVar('PN') )
spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
spec_files_bottom.append('')
# append the name of tarball to key word 'SOURCE' in xxx.spec.
def tail_source(d):
- if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR')
if not os.path.exists(ar_outdir):
return
source_list = os.listdir(ar_outdir)
@@ -107,27 +120,6 @@ python write_specfile () {
os.chown(f, 0, 0)
spec_preamble_top.append('Source%s: %s' % (source_number, source))
source_number += 1
- # We need a simple way to remove the MLPREFIX from the package name,
- # and dependency information...
- def strip_multilib(name, d):
- multilibs = d.getVar('MULTILIBS', True) or ""
- for ext in multilibs.split():
- eext = ext.split(':')
- if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
- name = "".join(name.split(eext[1] + '-'))
- return name
-
- def strip_multilib_deps(deps, d):
- depends = bb.utils.explode_dep_versions2(deps or "")
- newdeps = {}
- for dep in depends:
- newdeps[strip_multilib(dep, d)] = depends[dep]
- return bb.utils.join_deps(newdeps)
-
-# ml = d.getVar("MLPREFIX", True)
-# if ml and name and len(ml) != 0 and name.find(ml) == 0:
-# return ml.join(name.split(ml, 1)[1:])
-# return name
# In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
# This format is similar to OE, however there are restrictions on the
@@ -144,7 +136,7 @@ python write_specfile () {
# after renaming we cannot look up the dependencies in the packagedata
# store.
def translate_vers(varname, d):
- depends = d.getVar(varname, True)
+ depends = d.getVar(varname)
if depends:
depends_dict = bb.utils.explode_dep_versions2(depends)
newdeps_dict = {}
@@ -197,6 +189,8 @@ python write_specfile () {
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
continue
path = path.replace("%", "%%%%%%%%")
+ path = path.replace("[", "?")
+ path = path.replace("]", "?")
# Treat all symlinks to directories as normal files.
# os.walk() lists them as directories.
@@ -216,6 +210,8 @@ python write_specfile () {
if dir == "CONTROL" or dir == "DEBIAN":
continue
dir = dir.replace("%", "%%%%%%%%")
+ dir = dir.replace("[", "?")
+ dir = dir.replace("]", "?")
# All packages own the directories their files are in...
target.append('%dir "' + path + '/' + dir + '"')
else:
@@ -230,6 +226,8 @@ python write_specfile () {
if file == "CONTROL" or file == "DEBIAN":
continue
file = file.replace("%", "%%%%%%%%")
+ file = file.replace("[", "?")
+ file = file.replace("]", "?")
if conffiles.count(path + '/' + file):
target.append('%config "' + path + '/' + file + '"')
else:
@@ -248,10 +246,10 @@ python write_specfile () {
def get_perfile(varname, pkg, d):
deps = []
dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
- dependsflist = (d.getVar(dependsflist_key, True) or "")
+ dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
key = "FILE" + varname + "_" + dfile + "_" + pkg
- depends = d.getVar(key, True)
+ depends = d.getVar(key)
if depends:
deps.append(depends)
return " ".join(deps)
@@ -269,33 +267,33 @@ python write_specfile () {
else:
spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
if not pkgdest:
bb.fatal("No PKGDEST")
- outspecfile = d.getVar('OUTSPECFILE', True)
+ outspecfile = d.getVar('OUTSPECFILE')
if not outspecfile:
bb.fatal("No OUTSPECFILE")
# Construct the SPEC file...
- srcname = strip_multilib(d.getVar('PN', True), d)
- srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
- srcversion = d.getVar('PKGV', True).replace('-', '+')
- srcrelease = d.getVar('PKGR', True)
- srcepoch = (d.getVar('PKGE', True) or "")
- srclicense = d.getVar('LICENSE', True)
- srcsection = d.getVar('SECTION', True)
- srcmaintainer = d.getVar('MAINTAINER', True)
- srchomepage = d.getVar('HOMEPAGE', True)
- srcdescription = d.getVar('DESCRIPTION', True) or "."
+ srcname = d.getVar('PN')
+ srcsummary = (d.getVar('SUMMARY') or d.getVar('DESCRIPTION') or ".")
+ srcversion = d.getVar('PKGV').replace('-', '+')
+ srcrelease = d.getVar('PKGR')
+ srcepoch = (d.getVar('PKGE') or "")
+ srclicense = d.getVar('LICENSE')
+ srcsection = d.getVar('SECTION')
+ srcmaintainer = d.getVar('MAINTAINER')
+ srchomepage = d.getVar('HOMEPAGE')
+ srcdescription = d.getVar('DESCRIPTION') or "."
srccustomtagschunk = get_package_additional_metadata("rpm", d)
- srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d)
+ srcdepends = d.getVar('DEPENDS')
srcrdepends = []
srcrrecommends = []
srcrsuggests = []
@@ -318,8 +316,8 @@ python write_specfile () {
spec_files_top = []
spec_files_bottom = []
- perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
- extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA", True) or "0") == "1"
+ perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
+ extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
for pkg in packages.split():
localdata = bb.data.createCopy(d)
@@ -328,29 +326,27 @@ python write_specfile () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg, True)
+ pkgname = localdata.getVar('PKG_%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
- bb.data.update_data(localdata)
-
conffiles = get_conffiles(pkg, d)
- dirfiles = localdata.getVar('DIRFILES', True)
+ dirfiles = localdata.getVar('DIRFILES')
if dirfiles is not None:
dirfiles = dirfiles.split()
- splitname = strip_multilib(pkgname, d)
+ splitname = pkgname
- splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
- splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
- splitrelease = (localdata.getVar('PKGR', True) or "")
- splitepoch = (localdata.getVar('PKGE', True) or "")
- splitlicense = (localdata.getVar('LICENSE', True) or "")
- splitsection = (localdata.getVar('SECTION', True) or "")
- splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
+ splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
+ splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
+ splitrelease = (localdata.getVar('PKGR') or "")
+ splitepoch = (localdata.getVar('PKGE') or "")
+ splitlicense = (localdata.getVar('LICENSE') or "")
+ splitsection = (localdata.getVar('SECTION') or "")
+ splitdescription = (localdata.getVar('DESCRIPTION') or ".")
splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
translate_vers('RDEPENDS', localdata)
@@ -363,18 +359,18 @@ python write_specfile () {
# Map the dependencies into their final form
mapping_rename_hook(localdata)
- splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d)
- splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d)
- splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d)
- splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d)
- splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d)
- splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d)
+ splitrdepends = localdata.getVar('RDEPENDS')
+ splitrrecommends = localdata.getVar('RRECOMMENDS')
+ splitrsuggests = localdata.getVar('RSUGGESTS')
+ splitrprovides = localdata.getVar('RPROVIDES')
+ splitrreplaces = localdata.getVar('RREPLACES')
+ splitrconflicts = localdata.getVar('RCONFLICTS')
splitrobsoletes = []
- splitrpreinst = localdata.getVar('pkg_preinst', True)
- splitrpostinst = localdata.getVar('pkg_postinst', True)
- splitrprerm = localdata.getVar('pkg_prerm', True)
- splitrpostrm = localdata.getVar('pkg_postrm', True)
+ splitrpreinst = localdata.getVar('pkg_preinst')
+ splitrpostinst = localdata.getVar('pkg_postinst')
+ splitrprerm = localdata.getVar('pkg_prerm')
+ splitrpostrm = localdata.getVar('pkg_postrm')
if not perfiledeps:
@@ -382,6 +378,8 @@ python write_specfile () {
splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
+ splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
+
# Gather special src/first package data
if srcname == splitname:
srcrdepends = splitrdepends
@@ -452,25 +450,10 @@ python write_specfile () {
if splitrpostrm:
print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
- # Suggests in RPM are like recommends in OE-core!
- print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
- # While there is no analog for suggests... (So call them recommends for now)
- print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
+ print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
+ print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
-
- # conflicts can not be in a provide! We will need to filter it.
- if splitrconflicts:
- depends_dict = bb.utils.explode_dep_versions2(splitrconflicts)
- newdeps_dict = {}
- for dep in depends_dict:
- if dep not in splitrprovides:
- newdeps_dict[dep] = depends_dict[dep]
- if newdeps_dict:
- splitrconflicts = bb.utils.join_deps(newdeps_dict)
- else:
- splitrconflicts = ""
-
print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
spec_preamble_bottom.append('')
@@ -562,25 +545,10 @@ python write_specfile () {
if srcrpostrm:
print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
- # Suggests in RPM are like recommends in OE-core!
- print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
- # While there is no analog for suggests... (So call them recommends for now)
- print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
- print_deps(srcrprovides, "Provides", spec_preamble_top, d)
+ print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
+ print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
+ print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d)
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
-
- # conflicts can not be in a provide! We will need to filter it.
- if srcrconflicts:
- depends_dict = bb.utils.explode_dep_versions2(srcrconflicts)
- newdeps_dict = {}
- for dep in depends_dict:
- if dep not in srcrprovides:
- newdeps_dict[dep] = depends_dict[dep]
- if newdeps_dict:
- srcrconflicts = bb.utils.join_deps(newdeps_dict)
- else:
- srcrconflicts = ""
-
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
spec_preamble_top.append('')
@@ -614,14 +582,11 @@ python write_specfile () {
spec_scriptlets_top.append('')
# Write the SPEC file
- try:
- specfile = open(outspecfile, 'w')
- except OSError:
- bb.fatal("unable to open spec file for writing")
+ specfile = open(outspecfile, 'w')
# RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
# of the generated spec file
- external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
+ external_preamble = d.getVar("RPMSPEC_PREAMBLE")
if external_preamble:
specfile.write(external_preamble + "\n")
@@ -649,23 +614,15 @@ python write_specfile () {
write_specfile[vardepsexclude] = "OVERRIDES"
python do_package_rpm () {
- # We need a simple way to remove the MLPREFIX from the package name,
- # and dependency information...
- def strip_multilib(name, d):
- ml = d.getVar("MLPREFIX", True)
- if ml and name and len(ml) != 0 and name.find(ml) >= 0:
- return "".join(name.split(ml))
- return name
-
- workdir = d.getVar('WORKDIR', True)
- tmpdir = d.getVar('TMPDIR', True)
- pkgd = d.getVar('PKGD', True)
- pkgdest = d.getVar('PKGDEST', True)
+ workdir = d.getVar('WORKDIR')
+ tmpdir = d.getVar('TMPDIR')
+ pkgd = d.getVar('PKGD')
+ pkgdest = d.getVar('PKGDEST')
if not workdir or not pkgd or not tmpdir:
bb.error("Variables incorrectly set, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
@@ -674,42 +631,43 @@ python do_package_rpm () {
# If the spec file already exist, and has not been stored into
# pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
# so remove it before doing rpmbuild src.rpm.
- srcname = strip_multilib(d.getVar('PN', True), d)
+ srcname = d.getVar('PN')
outspecfile = workdir + "/" + srcname + ".spec"
if os.path.isfile(outspecfile):
os.remove(outspecfile)
d.setVar('OUTSPECFILE', outspecfile)
bb.build.exec_func('write_specfile', d)
- perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
+ perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
if perfiledeps:
outdepends, outprovides = write_rpm_perfiledata(srcname, d)
# Setup the rpmbuild arguments...
- rpmbuild = d.getVar('RPMBUILD', True)
- targetsys = d.getVar('TARGET_SYS', True)
- targetvendor = d.getVar('HOST_VENDOR', True)
- package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
- sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX', True) or "nativesdk").replace("-", "_")
- if package_arch not in "all any noarch".split() and not package_arch.endswith(sdkpkgsuffix):
- ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
- d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
- else:
- d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
+ rpmbuild = d.getVar('RPMBUILD')
+ targetsys = d.getVar('TARGET_SYS')
+ targetvendor = d.getVar('HOST_VENDOR')
+ # Too many places in dnf stack assume that arch-independent packages are "noarch".
+ # Let's not fight against this.
+ package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_").replace("all", "noarch")
+ sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
+ d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
- bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR', True))
- pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}')
- magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
+ bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
+ pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
bb.utils.mkdirhier(pkgwritedir)
os.chmod(pkgwritedir, 0o755)
cmd = rpmbuild
- cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
+ cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
- cmd = cmd + " --define '_builddir " + d.getVar('S', True) + "'"
+ cmd = cmd + " --define '_builddir " + d.getVar('S') + "'"
cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
+ cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
+ cmd = cmd + " --define '_build_id_links none'"
+ cmd = cmd + " --define '_binary_payload w6T.xzdio'"
+ cmd = cmd + " --define '_source_payload w6T.xzdio'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
@@ -718,11 +676,10 @@ python do_package_rpm () {
cmd = cmd + " --define '__find_provides %{nil}'"
cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
cmd = cmd + " --define 'debug_package %{nil}'"
- cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
cmd = cmd + " --define '_tmppath " + workdir + "'"
- if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
- cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
- cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
+ cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
+ cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR') + "'"
cmdsrpm = cmdsrpm + " -bs " + outspecfile
# Build the .src.rpm
d.setVar('SBUILDSPEC', cmdsrpm + "\n")
@@ -730,17 +687,20 @@ python do_package_rpm () {
bb.build.exec_func('SBUILDSPEC', d)
cmd = cmd + " -bb " + outspecfile
+ # rpm 4 creates various empty directories in _topdir, let's clean them up
+ cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
+
# Build the rpm package!
- d.setVar('BUILDSPEC', cmd + "\n")
+ d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
d.setVarFlag('BUILDSPEC', 'func', '1')
bb.build.exec_func('BUILDSPEC', d)
- if d.getVar('RPM_SIGN_PACKAGES', True) == '1':
+ if d.getVar('RPM_SIGN_PACKAGES') == '1':
bb.build.exec_func("sign_rpm", d)
}
python () {
- if d.getVar('PACKAGES', True) != '':
+ if d.getVar('PACKAGES') != '':
deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_rpm', 'depends', deps)
d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
@@ -766,9 +726,10 @@ python do_package_write_rpm () {
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[umask] = "022"
+do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_rpm after do_packagedata do_package
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
-PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/import-layers/yocto-poky/meta/classes/package_tar.bbclass b/import-layers/yocto-poky/meta/classes/package_tar.bbclass
index e217814af..ce3ab4c8e 100644
--- a/import-layers/yocto-poky/meta/classes/package_tar.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_tar.bbclass
@@ -7,27 +7,27 @@ python do_package_tar () {
oldcwd = os.getcwd()
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- outdir = d.getVar('DEPLOY_DIR_TAR', True)
+ outdir = d.getVar('DEPLOY_DIR_TAR')
if not outdir:
bb.error("DEPLOY_DIR_TAR not defined, unable to package")
return
- dvar = d.getVar('D', True)
+ dvar = d.getVar('D')
if not dvar:
bb.error("D not defined, unable to package")
return
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
- pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = d.getVar('PKGDEST')
bb.utils.mkdirhier(outdir)
bb.utils.mkdirhier(dvar)
@@ -38,7 +38,6 @@ python do_package_tar () {
overrides = localdata.getVar('OVERRIDES', False)
localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
- bb.data.update_data(localdata)
bb.utils.mkdirhier(root)
basedir = os.path.dirname(root)
@@ -46,7 +45,7 @@ python do_package_tar () {
os.chdir(root)
dlist = os.listdir(root)
if not dlist:
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
continue
args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
ret = subprocess.call(args + [tarfn] + dlist)
@@ -57,8 +56,8 @@ python do_package_tar () {
}
python () {
- if d.getVar('PACKAGES', True) != '':
- deps = (d.getVarFlag('do_package_write_tar', 'depends', True) or "").split()
+ if d.getVar('PACKAGES') != '':
+ deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
deps.append('tar-native:do_populate_sysroot')
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
diff --git a/import-layers/yocto-poky/meta/classes/packagedata.bbclass b/import-layers/yocto-poky/meta/classes/packagedata.bbclass
index 3397f1e36..a903e5cfd 100644
--- a/import-layers/yocto-poky/meta/classes/packagedata.bbclass
+++ b/import-layers/yocto-poky/meta/classes/packagedata.bbclass
@@ -2,10 +2,10 @@ python read_subpackage_metadata () {
import oe.packagedata
vars = {
- "PN" : d.getVar('PN', True),
- "PE" : d.getVar('PE', True),
- "PV" : d.getVar('PV', True),
- "PR" : d.getVar('PR', True),
+ "PN" : d.getVar('PN'),
+ "PE" : d.getVar('PE'),
+ "PV" : d.getVar('PV'),
+ "PR" : d.getVar('PR'),
}
data = oe.packagedata.read_pkgdata(vars["PN"], d)
@@ -13,7 +13,7 @@ python read_subpackage_metadata () {
for key in data.keys():
d.setVar(key, data[key])
- for pkg in d.getVar('PACKAGES', True).split():
+ for pkg in d.getVar('PACKAGES').split():
sdata = oe.packagedata.read_subpkgdata(pkg, d)
for key in sdata.keys():
if key in vars:
diff --git a/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass b/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
index aa01def74..c0e9be549 100644
--- a/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
+++ b/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
@@ -31,7 +31,7 @@ python() {
# This assumes that the package_write task is called package_write_<pkgtype>
# and that the directory in which packages should be written is
# pointed to by the variable DEPLOY_DIR_<PKGTYPE>
- for pkgclass in (d.getVar('PACKAGE_CLASSES', True) or '').split():
+ for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
if pkgclass.startswith('package_'):
pkgtype = pkgclass.split('_', 1)[1]
pkgwritefunc = 'do_package_write_%s' % pkgtype
@@ -51,7 +51,7 @@ python() {
d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
- if d.getVarFlag(pkgwritefunc, 'noexec', True) or not d.getVarFlag(pkgwritefunc, 'task', True):
+ if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'):
# Packaging is disabled for this recipe, we shouldn't do anything
continue
@@ -71,7 +71,7 @@ python() {
# This isn't the real task function - it's a template that we use in the
# anonymous python code above
fakeroot python do_package_compare () {
- currenttask = d.getVar('BB_CURRENTTASK', True)
+ currenttask = d.getVar('BB_CURRENTTASK')
pkgtype = currenttask.rsplit('_', 1)[1]
package_compare_impl(pkgtype, d)
}
@@ -83,12 +83,12 @@ def package_compare_impl(pkgtype, d):
import subprocess
import oe.sstatesig
- pn = d.getVar('PN', True)
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper(), True)
+ pn = d.getVar('PN')
+ deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
prepath = deploydir + '-prediff/'
# Find out PKGR values are
- pkgdatadir = d.getVar('PKGDATA_DIR', True)
+ pkgdatadir = d.getVar('PKGDATA_DIR')
packages = []
try:
with open(os.path.join(pkgdatadir, pn), 'r') as f:
@@ -138,7 +138,7 @@ def package_compare_impl(pkgtype, d):
files = []
docopy = False
manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
- mlprefix = d.getVar('MLPREFIX', True)
+ mlprefix = d.getVar('MLPREFIX')
# Copy recipe's all packages if one of the packages are different to make
# they have the same PR.
with open(manifest, 'r') as f:
@@ -215,7 +215,7 @@ def package_compare_impl(pkgtype, d):
# multilib), they're identical in theory, but sstate.bbclass
# copies it again, so keep align with that.
if os.path.exists(destpath) and pkgtype == 'rpm' \
- and d.getVar('PACKAGE_ARCH', True) == 'all':
+ and d.getVar('PACKAGE_ARCH') == 'all':
os.unlink(destpath)
if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
# Use a hard link to save space
@@ -229,10 +229,10 @@ def package_compare_impl(pkgtype, d):
do_cleansstate[postfuncs] += "pfs_cleanpkgs"
python pfs_cleanpkgs () {
import errno
- for pkgclass in (d.getVar('PACKAGE_CLASSES', True) or '').split():
+ for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
if pkgclass.startswith('package_'):
pkgtype = pkgclass.split('_', 1)[1]
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper(), True)
+ deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
prepath = deploydir + '-prediff'
pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
try:
diff --git a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
index 3928c8a4a..eea2e5b9f 100644
--- a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
+++ b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
@@ -16,15 +16,15 @@ PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
LICENSE ?= "MIT"
-inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')}
+inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
# This automatically adds -dbg and -dev flavours of all PACKAGES
# to the list. Their dependencies (RRECOMMENDS) are handled as usual
# by package_depchains in a following step.
# Also mark all packages as ALLOW_EMPTY
python () {
- packages = d.getVar('PACKAGES', True).split()
- if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
+ packages = d.getVar('PACKAGES').split()
+ if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1':
types = ['', '-dbg', '-dev']
if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
types.append('-ptest')
@@ -40,16 +40,18 @@ python () {
DEPCHAIN_DBGDEFAULTDEPS = "1"
# We only need the packaging tasks - disable the rest
-do_fetch[noexec] = "1"
-do_unpack[noexec] = "1"
-do_patch[noexec] = "1"
-do_configure[noexec] = "1"
-do_compile[noexec] = "1"
-do_install[noexec] = "1"
-do_populate_sysroot[noexec] = "1"
+deltask do_fetch
+deltask do_unpack
+deltask do_patch
+deltask do_configure
+deltask do_compile
+deltask do_install
+deltask do_populate_sysroot
python () {
- initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True)
+ if bb.data.inherits_class('nativesdk', d):
+ return
+ initman = d.getVar("VIRTUAL-RUNTIME_init_manager")
if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
}
diff --git a/import-layers/yocto-poky/meta/classes/patch.bbclass b/import-layers/yocto-poky/meta/classes/patch.bbclass
index 1f6927be0..8f35cb4f9 100644
--- a/import-layers/yocto-poky/meta/classes/patch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/patch.bbclass
@@ -10,110 +10,65 @@ PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
inherit terminal
-def src_patches(d, all = False ):
- workdir = d.getVar('WORKDIR', True)
- fetch = bb.fetch2.Fetch([], d)
- patches = []
- sources = []
- for url in fetch.urls:
- local = patch_path(url, fetch, workdir)
- if not local:
- if all:
- local = fetch.localpath(url)
- sources.append(local)
- continue
-
- urldata = fetch.ud[url]
- parm = urldata.parm
- patchname = parm.get('pname') or os.path.basename(local)
-
- apply, reason = should_apply(parm, d)
- if not apply:
- if reason:
- bb.note("Patch %s %s" % (patchname, reason))
- continue
-
- patchparm = {'patchname': patchname}
- if "striplevel" in parm:
- striplevel = parm["striplevel"]
- elif "pnum" in parm:
- #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
- striplevel = parm["pnum"]
- else:
- striplevel = '1'
- patchparm['striplevel'] = striplevel
-
- patchdir = parm.get('patchdir')
- if patchdir:
- patchparm['patchdir'] = patchdir
-
- localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
- patches.append(localurl)
-
- if all:
- return sources
-
- return patches
+python () {
+ if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
+ extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
+ try:
+ extratasks.remove('do_unpack')
+ except ValueError:
+ # For some recipes do_unpack doesn't exist, ignore it
+ pass
+
+ d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
+ for task in extratasks:
+ d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
+}
-def patch_path(url, fetch, workdir):
- """Return the local path of a patch, or None if this isn't a patch"""
+python patch_task_patch_prefunc() {
+ # Prefunc for do_patch
+ func = d.getVar('BB_RUNTASK')
+ srcsubdir = d.getVar('S')
- local = fetch.localpath(url)
- base, ext = os.path.splitext(os.path.basename(local))
- if ext in ('.gz', '.bz2', '.Z'):
- local = os.path.join(workdir, base)
- ext = os.path.splitext(base)[1]
+ patchdir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patchdir):
+ if os.listdir(patchdir):
+ d.setVar('PATCH_HAS_PATCHES_DIR', '1')
+ else:
+ os.rmdir(patchdir)
+}
- urldata = fetch.ud[url]
- if "apply" in urldata.parm:
- apply = oe.types.boolean(urldata.parm["apply"])
- if not apply:
- return
- elif ext not in (".diff", ".patch"):
- return
+python patch_task_postfunc() {
+ # Prefunc for task functions between do_unpack and do_patch
+ import oe.patch
+ import shutil
+ func = d.getVar('BB_RUNTASK')
+ srcsubdir = d.getVar('S')
+
+ if os.path.exists(srcsubdir):
+ if func == 'do_patch':
+ haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
+ patchdir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patchdir):
+ shutil.rmtree(patchdir)
+ if haspatches:
+ stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
+ if stdout:
+ bb.process.run('git checkout patches', cwd=srcsubdir)
+ stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
+ if stdout:
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
+ bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(useroptions), func, oe.patch.GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
+}
- return local
+def src_patches(d, all=False, expand=True):
+ import oe.patch
+ return oe.patch.src_patches(d, all, expand)
def should_apply(parm, d):
"""Determine if we should apply the given patch"""
-
- if "mindate" in parm or "maxdate" in parm:
- pn = d.getVar('PN', True)
- srcdate = d.getVar('SRCDATE_%s' % pn, True)
- if not srcdate:
- srcdate = d.getVar('SRCDATE', True)
-
- if srcdate == "now":
- srcdate = d.getVar('DATE', True)
-
- if "maxdate" in parm and parm["maxdate"] < srcdate:
- return False, 'is outdated'
-
- if "mindate" in parm and parm["mindate"] > srcdate:
- return False, 'is predated'
-
-
- if "minrev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and srcrev < parm["minrev"]:
- return False, 'applies to later revisions'
-
- if "maxrev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and srcrev > parm["maxrev"]:
- return False, 'applies to earlier revisions'
-
- if "rev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and parm["rev"] not in srcrev:
- return False, "doesn't apply to revision"
-
- if "notrev" in parm:
- srcrev = d.getVar('SRCREV', True)
- if srcrev and parm["notrev"] in srcrev:
- return False, "doesn't apply to revision"
-
- return True, None
+ import oe.patch
+ return oe.patch.should_apply(parm, d)
should_apply[vardepsexclude] = "DATE SRCDATE"
@@ -126,20 +81,20 @@ python patch_do_patch() {
"git": oe.patch.GitApplyTree,
}
- cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
+ cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
resolvermap = {
"noop": oe.patch.NOOPResolver,
"user": oe.patch.UserResolver,
}
- rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
+ rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
classes = {}
- s = d.getVar('S', True)
+ s = d.getVar('S')
- os.putenv('PATH', d.getVar('PATH', True))
+ os.putenv('PATH', d.getVar('PATH'))
# We must use one TMPDIR per process so that the "patch" processes
# don't generate the same temp file name.
diff --git a/import-layers/yocto-poky/meta/classes/perl-version.bbclass b/import-layers/yocto-poky/meta/classes/perl-version.bbclass
new file mode 100644
index 000000000..fafe68a77
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/perl-version.bbclass
@@ -0,0 +1,24 @@
+PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
+
+# Determine the staged version of perl from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
+def get_perl_version(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile("^version='(\d*\.\d*\.\d*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+PERLVERSION := "${@get_perl_version(d)}"
+PERLVERSION[vardepvalue] = ""
diff --git a/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass b/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
index 3f48a0f34..b3e507f61 100644
--- a/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
@@ -8,6 +8,8 @@ inherit qemu
PIXBUF_PACKAGES ??= "${PN}"
+PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native"
+
pixbufcache_common() {
if [ "x$D" != "x" ]; then
$INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
@@ -28,42 +30,35 @@ fi
}
python populate_packages_append() {
- pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split()
+ pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
for pkg in pixbuf_pkgs:
bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += d.getVar('pixbufcache_common', True)
+ postinst += d.getVar('pixbufcache_common')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += d.getVar('pixbufcache_common', True)
+ postrm += d.getVar('pixbufcache_common')
d.setVar('pkg_postrm_%s' % pkg, postrm)
}
gdkpixbuf_complete() {
- GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
+GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
}
-#
-# Add an sstate postinst hook to update the cache for native packages.
-# An error exit during populate_sysroot_setscene allows bitbake to
-# try to recover by re-building the package.
-#
DEPENDS_append_class-native = " gdk-pixbuf-native"
-SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
+SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
# See base.bbclass for the other half of this
pixbufcache_sstate_postinst() {
- if [ "${BB_CURRENTTASK}" = "populate_sysroot" ]; then
- ${gdkpixbuf_complete}
- elif [ "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]; then
- if [ -x ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders ]; then
- echo "${gdkpixbuf_complete}" >> ${STAGING_DIR}/sstatecompletions
- fi
- fi
+ mkdir -p ${SYSROOT_DESTDIR}${bindir}
+ dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
+ echo '#!/bin/sh' > $dest
+ echo "${gdkpixbuf_complete}" >> $dest
+ chmod 0755 $dest
}
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
index 69aae2644..563582e0a 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -11,13 +11,13 @@ COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
def complementary_globs(featurevar, d):
all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
globs = []
- features = set((d.getVar(featurevar, True) or '').split())
+ features = set((d.getVar(featurevar) or '').split())
for name, glob in all_globs.items():
if name in features:
globs.append(glob)
return ' '.join(globs)
-SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
+SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
inherit rootfs_${IMAGE_PKGTYPE}
@@ -34,10 +34,7 @@ SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_TARGET_TASK ?= " \
- ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
- ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target-dbg')} \
- "
+TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
@@ -57,34 +54,41 @@ SDK_PRE_INSTALL_COMMAND ?= ""
SDK_POST_INSTALL_COMMAND ?= ""
SDK_RELOCATE_AFTER_INSTALL ?= "1"
-SDKEXTPATH ?= "~/${@d.getVar('DISTRO', True)}_sdk"
-SDK_TITLE ?= "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} SDK"
+SDKEXTPATH ?= "~/${@d.getVar('DISTRO')}_sdk"
+SDK_TITLE ?= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST", True))
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST"))
pkgs = sdk_list_installed_packages(d, True)
if not os.path.exists(sdkmanifestdir):
bb.utils.mkdirhier(sdkmanifestdir)
- with open(d.getVar('SDK_TARGET_MANIFEST', True), 'w') as output:
+ with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output:
output.write(format_pkg_list(pkgs, 'ver'))
}
+python write_sdk_test_data() {
+ from oe.data import export2json
+ testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
+ bb.utils.mkdirhier(os.path.dirname(testdata))
+ export2json(d, testdata)
+}
+
python write_host_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST", True))
+ sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST"))
pkgs = sdk_list_installed_packages(d, False)
if not os.path.exists(sdkmanifestdir):
bb.utils.mkdirhier(sdkmanifestdir)
- with open(d.getVar('SDK_HOST_MANIFEST', True), 'w') as output:
+ with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output:
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
+POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; write_sdk_test_data ; "
POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; "
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
@@ -93,7 +97,7 @@ def populate_sdk_common(d):
from oe.sdk import populate_sdk
from oe.manifest import create_manifest, Manifest
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
@@ -101,13 +105,13 @@ def populate_sdk_common(d):
ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
- d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK", True))
- d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", True))
+ d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
+ d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
# create target/host SDK manifests
- create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
+ create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
- create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
+ create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
populate_sdk(d)
@@ -134,7 +138,7 @@ fakeroot create_sdk_files() {
python check_sdk_sysroots() {
# Fails build if there are broken or dangling symlinks in SDK sysroots
- if d.getVar('CHECK_SDK_SYSROOTS', True) != '1':
+ if d.getVar('CHECK_SDK_SYSROOTS') != '1':
# disabled, bail out
return
@@ -142,8 +146,8 @@ python check_sdk_sysroots() {
return os.path.abspath(path)
# Get scan root
- SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT', True),
- d.getVar('SDKPATH', True)))
+ SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'),
+ d.getVar('SDKPATH')))
bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
@@ -218,10 +222,11 @@ EOF
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
- -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE", True).replace('&', '\&')}#g' \
+ -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\&')}#g' \
-e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
-e '/@SDK_PRE_INSTALL_COMMAND@/d' \
-e '/@SDK_POST_INSTALL_COMMAND@/d' \
+ -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d)}#g' \
${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# add execution permission
@@ -241,8 +246,7 @@ populate_sdk_log_check() {
echo "log_check: Using $lf_path as logfile"
- if test -e "$lf_path"
- then
+ if [ -e "$lf_path" ]; then
${IMAGE_PKGTYPE}_log_check $target $lf_path
else
echo "Cannot find logfile [$lf_path]"
@@ -268,7 +272,7 @@ do_populate_sdk[file-checksums] += "${COREBASE}/meta/files/toolchain-shar-reloca
${COREBASE}/meta/files/toolchain-shar-extract.sh:True"
do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
-do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
-do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
+do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
+do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
addtask populate_sdk
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
index 39f614274..8b8a341e3 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -11,8 +11,6 @@ TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
-SDK_RDEPENDS_append_task-populate-sdk-ext = " ${SDK_TARGETS}"
-
SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
SDK_EXT = ""
@@ -21,7 +19,7 @@ SDK_EXT_task-populate-sdk-ext = "-ext"
# Options are full or minimal
SDK_EXT_TYPE ?= "full"
SDK_INCLUDE_PKGDATA ?= "0"
-SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE', True) == 'full' else '0'}"
+SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
SDK_RECRDEP_TASKS ?= ""
@@ -43,19 +41,21 @@ SDK_TARGETS ?= "${PN}"
def get_sdk_install_targets(d, images_only=False):
sdk_install_targets = ''
- if images_only or d.getVar('SDK_EXT_TYPE', True) != 'minimal':
- sdk_install_targets = d.getVar('SDK_TARGETS', True)
+ if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal':
+ sdk_install_targets = d.getVar('SDK_TARGETS')
depd = d.getVar('BB_TASKDEPDATA', False)
+ tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
+ tasklist.remove('do_build')
for v in depd.values():
- if v[1] == 'do_image_complete':
+ if v[1] in tasklist:
if v[0] not in sdk_install_targets:
sdk_install_targets += ' {}'.format(v[0])
if not images_only:
- if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+ if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
- if d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1':
+ if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1':
sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
return sdk_install_targets
@@ -77,13 +77,13 @@ COREBASE_FILES ?= " \
SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
B_task-populate-sdk-ext = "${SDK_DIR}"
-TOOLCHAINEXT_OUTPUTNAME = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
-SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} Extensible SDK"
+SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
@@ -110,10 +110,11 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
try:
with open(sdkbasepath + '/conf/local.conf', 'a') as f:
# Force the use of sstate from the build system
- f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR', True))
+ f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
f.write('SSTATE_MIRRORS_forcevariable = ""\n')
# Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND_forcevariable = ""\n')
# Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
# be different and we won't be able to find our native sstate)
if not bb.data.inherits_class('uninative', d):
@@ -121,7 +122,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
# Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
# will not allow in its COREBASE path, so we need to rename the directory temporarily
- temp_sdkbasepath = d.getVar('SDK_OUTPUT', True) + '/tmp-renamed-sdk'
+ temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk'
# Delete any existing temp dir
try:
shutil.rmtree(temp_sdkbasepath)
@@ -130,7 +131,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
os.rename(sdkbasepath, temp_sdkbasepath)
try:
cmdprefix = '. %s .; ' % conf_initpath
- logfile = d.getVar('WORKDIR', True) + '/tasklist_bb_log.txt'
+ logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
try:
oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
except bb.process.ExecutionError as e:
@@ -152,7 +153,7 @@ python copy_buildsystem () {
import glob
import oe.copy_buildsystem
- oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT', True)
+ oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
conf_bbpath = ''
conf_initpath = ''
@@ -160,10 +161,10 @@ python copy_buildsystem () {
# Copy in all metadata layers + bitbake (as repositories)
buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
- baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
+ baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
# Determine if we're building a derivative extensible SDK (from devtool build-sdk)
- derivative = (d.getVar('SDK_DERIVATIVE', True) or '') == '1'
+ derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
if derivative:
workspace_name = 'orig-workspace'
else:
@@ -171,7 +172,7 @@ python copy_buildsystem () {
layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
sdkbblayers = []
- corebase = os.path.basename(d.getVar('COREBASE', True))
+ corebase = os.path.basename(d.getVar('COREBASE'))
for layer in layers_copied:
if corebase == os.path.basename(layer):
conf_bbpath = os.path.join('layers', layer, 'bitbake')
@@ -202,8 +203,8 @@ python copy_buildsystem () {
config.set('General', 'init_path', conf_initpath)
config.set('General', 'core_meta_subdir', core_meta_subdir)
config.add_section('SDK')
- config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS', True))
- updateurl = d.getVar('SDK_UPDATE_URL', True)
+ config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS'))
+ updateurl = d.getVar('SDK_UPDATE_URL')
if updateurl:
config.set('SDK', 'updateserver', updateurl)
bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
@@ -215,7 +216,7 @@ python copy_buildsystem () {
pass
# Create a layer for new recipes / appends
- bbpath = d.getVar('BBPATH', True)
+ bbpath = d.getVar('BBPATH')
bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
# Create bblayers.conf
@@ -242,22 +243,25 @@ python copy_buildsystem () {
# Copy uninative tarball
# For now this is where uninative.bbclass expects the tarball
- uninative_file = d.expand('${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2')
- uninative_checksum = bb.utils.sha256_file(uninative_file)
- uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
- bb.utils.mkdirhier(uninative_outdir)
- shutil.copy(uninative_file, uninative_outdir)
-
- env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE', True) or '').split()
+ if bb.data.inherits_class('uninative', d):
+ uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}')
+ uninative_checksum = bb.utils.sha256_file(uninative_file)
+ uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
+ bb.utils.mkdirhier(uninative_outdir)
+ shutil.copy(uninative_file, uninative_outdir)
+
+ env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
env_whitelist_values = {}
# Create local.conf
- builddir = d.getVar('TOPDIR', True)
+ builddir = d.getVar('TOPDIR')
+ if derivative and os.path.exists(builddir + '/conf/auto.conf'):
+ shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
if derivative:
shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
else:
- local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST', True) or '').split()
- local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST', True) or '').split()
+ local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
+ local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
def handle_var(varname, origvalue, op, newlines):
if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
newlines.append('# Removed original setting of %s\n' % varname)
@@ -267,8 +271,12 @@ python copy_buildsystem () {
env_whitelist_values[varname] = origvalue
return origvalue, op, 0, True
varlist = ['[^#=+ ]*']
+ oldlines = []
+ if os.path.exists(builddir + '/conf/auto.conf'):
+ with open(builddir + '/conf/auto.conf', 'r') as f:
+ oldlines += f.readlines()
with open(builddir + '/conf/local.conf', 'r') as f:
- oldlines = f.readlines()
+ oldlines += f.readlines()
(updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
with open(baseoutpath + '/conf/local.conf', 'w') as f:
@@ -282,10 +290,12 @@ python copy_buildsystem () {
# Write a newline just in case there's none at the end of the original
f.write('\n')
+ f.write('TMPDIR = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND = ""\n')
f.write('DL_DIR = "${TOPDIR}/downloads"\n')
f.write('INHERIT += "%s"\n' % 'uninative')
- f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH', True), uninative_checksum))
+ f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
# Some classes are not suitable for SDK, remove them from INHERIT
@@ -305,13 +315,13 @@ python copy_buildsystem () {
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
# Set up whitelist for run on install
- f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work *:do_package"\n\n')
+ f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n\n')
# Map gcc-dependent uninative sstate cache for installer usage
- f.write('SSTATE_MIRRORS = "file://universal/(.*) file://universal-4.9/\\1\\nfile://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
+ f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
# Allow additional config through sdk-extra.conf
fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
@@ -322,7 +332,7 @@ python copy_buildsystem () {
# If you define a sdk_extraconf() function then it can contain additional config
# (Though this is awkward; sdk-extra.conf should probably be used instead)
- extraconf = (d.getVar('sdk_extraconf', True) or '').strip()
+ extraconf = (d.getVar('sdk_extraconf') or '').strip()
if extraconf:
# Strip off any leading / trailing spaces
for line in extraconf.splitlines():
@@ -331,22 +341,6 @@ python copy_buildsystem () {
f.write('require conf/locked-sigs.inc\n')
f.write('require conf/unlocked-sigs.inc\n')
- if os.path.exists(builddir + '/conf/auto.conf'):
- if derivative:
- shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
- else:
- with open(builddir + '/conf/auto.conf', 'r') as f:
- oldlines = f.readlines()
- (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
- with open(baseoutpath + '/conf/auto.conf', 'w') as f:
- f.write('# WARNING: this configuration has been automatically generated and in\n')
- f.write('# most cases should not be edited. If you need more flexibility than\n')
- f.write('# this configuration provides, it is strongly suggested that you set\n')
- f.write('# up a proper instance of the full build system and use that instead.\n\n')
- for line in newlines:
- if line.strip() and not line.startswith('#'):
- f.write(line)
-
# Write a templateconf.cfg
with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
f.write('meta/conf\n')
@@ -355,7 +349,7 @@ python copy_buildsystem () {
# BB_ENV_EXTRAWHITE) are set in the SDK's configuration
extralines = []
for name, value in env_whitelist_values.items():
- actualvalue = d.getVar(name, True) or ''
+ actualvalue = d.getVar(name) or ''
if value != actualvalue:
extralines.append('%s = "%s"\n' % (name, actualvalue))
if extralines:
@@ -368,7 +362,7 @@ python copy_buildsystem () {
# Filter the locked signatures file to just the sstate tasks we are interested in
excluded_targets = get_sdk_install_targets(d, images_only=True)
- sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
oe.copy_buildsystem.prune_lockedsigs([],
excluded_targets.split(),
@@ -381,36 +375,36 @@ python copy_buildsystem () {
# uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
- sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1')
- sdk_ext_type = d.getVar('SDK_EXT_TYPE', True)
+ sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
+ sdk_ext_type = d.getVar('SDK_EXT_TYPE')
if sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative:
# Create the filtered task list used to generate the sstate cache shipped with the SDK
- tasklistfn = d.getVar('WORKDIR', True) + '/tasklist.txt'
+ tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
else:
tasklistfn = None
# Add packagedata if enabled
- if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
- lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base.inc'
- lockedsigs_copy = d.getVar('WORKDIR', True) + '/locked-sigs-copy.inc'
+ if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
+ lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
+ lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc'
shutil.move(lockedsigs_pruned, lockedsigs_base)
oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
lockedsigs_base,
- d.getVar('STAGING_DIR_HOST', True) + '/world-pkgdata/locked-sigs-pkgdata.inc',
+ d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc',
lockedsigs_pruned,
lockedsigs_copy)
if sdk_include_toolchain:
- lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base2.inc'
- lockedsigs_toolchain = d.getVar('STAGING_DIR_HOST', True) + '/locked-sigs/locked-sigs-extsdk-toolchain.inc'
+ lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc'
+ lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc")
shutil.move(lockedsigs_pruned, lockedsigs_base)
oe.copy_buildsystem.merge_lockedsigs([],
lockedsigs_base,
lockedsigs_toolchain,
lockedsigs_pruned)
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
- d.getVar('SSTATE_DIR', True),
+ d.getVar('SSTATE_DIR'),
sstate_out, d,
fixedlsbstring,
filterfile=tasklistfn)
@@ -420,22 +414,22 @@ python copy_buildsystem () {
# Assume the user is not going to set up an additional sstate
# mirror, thus we need to copy the additional artifacts (from
# workspace recipes) into the derivative SDK
- lockedsigs_orig = d.getVar('TOPDIR', True) + '/conf/locked-sigs.inc'
+ lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc'
if os.path.exists(lockedsigs_orig):
- lockedsigs_extra = d.getVar('WORKDIR', True) + '/locked-sigs-extra.inc'
+ lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc'
oe.copy_buildsystem.merge_lockedsigs(None,
lockedsigs_orig,
lockedsigs_pruned,
None,
lockedsigs_extra)
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
- d.getVar('SSTATE_DIR', True),
+ d.getVar('SSTATE_DIR'),
sstate_out, d,
fixedlsbstring,
filterfile=tasklistfn)
else:
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
- d.getVar('SSTATE_DIR', True),
+ d.getVar('SSTATE_DIR'),
sstate_out, d,
fixedlsbstring,
filterfile=tasklistfn)
@@ -466,24 +460,24 @@ python copy_buildsystem () {
def get_current_buildtools(d):
"""Get the file name of the current buildtools installer"""
import glob
- btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY', True), '*-buildtools-nativesdk-standalone-*.sh'))
+ btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh'))
btfiles.sort(key=os.path.getctime)
return os.path.basename(btfiles[-1])
def get_sdk_required_utilities(buildtools_fn, d):
"""Find required utilities that aren't provided by the buildtools"""
- sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES', True) or '').split()
+ sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
- buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY', True), buildtools_fn)
+ buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
filelist, _ = bb.process.run('%s -l' % buildtools_installer)
localdata = bb.data.createCopy(d)
localdata.setVar('SDKPATH', '.')
- sdkpathnative = localdata.getVar('SDKPATHNATIVE', True)
- sdkbindirs = [localdata.getVar('bindir_nativesdk', True),
- localdata.getVar('sbindir_nativesdk', True),
- localdata.getVar('base_bindir_nativesdk', True),
- localdata.getVar('base_sbindir_nativesdk', True)]
+ sdkpathnative = localdata.getVar('SDKPATHNATIVE')
+ sdkbindirs = [localdata.getVar('bindir_nativesdk'),
+ localdata.getVar('sbindir_nativesdk'),
+ localdata.getVar('base_bindir_nativesdk'),
+ localdata.getVar('base_sbindir_nativesdk')]
for line in filelist.splitlines():
splitline = line.split()
if len(splitline) > 5:
@@ -510,9 +504,10 @@ install_tools() {
done
# We can't use the same method as above because files in the sysroot won't exist at this point
# (they get populated from sstate on installation)
- if [ "${SDK_INCLUDE_TOOLCHAIN}" == "1" ] ; then
- binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE',True), d.getVar('TOPDIR', True))}
- lnr ${SDK_OUTPUT}/${SDKPATH}/$binrelpath/unfsd ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd
+ unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
+ if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
+ binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
+ lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
@@ -583,6 +578,8 @@ sdk_ext_postinst() {
# Allow bitbake environment setup to be ran as part of this sdk.
echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
+ # Work around runqemu not knowing how to get this information within the eSDK
+ echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
@@ -613,8 +610,8 @@ SDK_INSTALL_TARGETS = ""
fakeroot python do_populate_sdk_ext() {
# FIXME hopefully we can remove this restriction at some point, but uninative
# currently forces this upon us
- if d.getVar('SDK_ARCH', True) != d.getVar('BUILD_ARCH', True):
- bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH', True), d.getVar('BUILD_ARCH', True)))
+ if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
+ bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
buildtools_fn = get_current_buildtools(d)
@@ -628,10 +625,12 @@ fakeroot python do_populate_sdk_ext() {
def get_ext_sdk_depends(d):
# Note: the deps varflag is a list not a string, so we need to specify expand=False
deps = d.getVarFlag('do_image_complete', 'deps', False)
- pn = d.getVar('PN', True)
+ pn = d.getVar('PN')
deplist = ['%s:%s' % (pn, dep) for dep in deps]
- for task in ['do_image_complete', 'do_rootfs', 'do_build']:
- deplist.extend((d.getVarFlag(task, 'depends', True) or '').split())
+ tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
+ tasklist.append('do_rootfs')
+ for task in tasklist:
+ deplist.extend((d.getVarFlag(task, 'depends') or '').split())
return ' '.join(deplist)
python do_sdk_depends() {
@@ -639,13 +638,13 @@ python do_sdk_depends() {
# dependencies we don't need to (e.g. buildtools-tarball) and bringing those
# into the SDK's sstate-cache
import oe.copy_buildsystem
- sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
}
addtask sdk_depends
do_sdk_depends[dirs] = "${WORKDIR}"
-do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)}"
+do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
@@ -653,17 +652,21 @@ do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
def get_sdk_ext_rdepends(d):
localdata = d.createCopy()
localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
- bb.data.update_data(localdata)
- return localdata.getVarFlag('do_populate_sdk', 'rdepends', True)
+ return localdata.getVarFlag('do_populate_sdk', 'rdepends')
do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
- buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk \
- ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1' else ''} \
- ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1' else ''}"
+ buildtools-tarball:do_populate_sdk \
+ ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
+ ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
-do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
+# We must avoid depending on do_build here if rm_work.bbclass is active,
+# because otherwise do_rm_work may run before do_populate_sdk_ext itself.
+# We can't mark do_populate_sdk_ext and do_sdk_depends as having to
+# run before do_rm_work, because then they would also run as part
+# of normal builds.
+do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}"
# Make sure code changes can result in rebuild
do_populate_sdk_ext[vardeps] += "copy_buildsystem \
@@ -678,7 +681,7 @@ SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
SSTATETASKS += "do_populate_sdk_ext"
SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
-do_populate_sdk_ext[cleandirs] = "${SDKDEPLOYDIR}"
+do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk_ext[stamp-extra-info] = "${MACHINE}"
diff --git a/import-layers/yocto-poky/meta/classes/prexport.bbclass b/import-layers/yocto-poky/meta/classes/prexport.bbclass
index 809ec1034..6dcf99e29 100644
--- a/import-layers/yocto-poky/meta/classes/prexport.bbclass
+++ b/import-layers/yocto-poky/meta/classes/prexport.bbclass
@@ -15,7 +15,7 @@ python prexport_handler () {
if isinstance(e, bb.event.RecipeParsed):
import oe.prservice
#get all PR values for the current PRAUTOINX
- ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
+ ver = e.data.getVar('PRSERV_DUMPOPT_VERSION')
ver = ver.replace('%','-')
retval = oe.prservice.prserv_dump_db(e.data)
if not retval:
@@ -40,7 +40,7 @@ python prexport_handler () {
import oe.prservice
oe.prservice.prserv_check_avail(e.data)
#remove dumpfile
- bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
+ bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE'))
elif isinstance(e, bb.event.ParseCompleted):
import oe.prservice
#dump meta info of tables
diff --git a/import-layers/yocto-poky/meta/classes/ptest.bbclass b/import-layers/yocto-poky/meta/classes/ptest.bbclass
index fa3561e62..c19f65b9b 100644
--- a/import-layers/yocto-poky/meta/classes/ptest.bbclass
+++ b/import-layers/yocto-poky/meta/classes/ptest.bbclass
@@ -2,7 +2,7 @@ SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
This package contains a test directory ${PTEST_PATH} for package test purposes."
-PTEST_PATH ?= "${libdir}/${PN}/ptest"
+PTEST_PATH ?= "${libdir}/${BPN}/ptest"
FILES_${PN}-ptest = "${PTEST_PATH}"
SECTION_${PN}-ptest = "devel"
ALLOW_EMPTY_${PN}-ptest = "1"
@@ -61,7 +61,7 @@ python () {
d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
# Remove all '*ptest_base' tasks when ptest is not enabled
- if not(d.getVar('PTEST_ENABLED', True) == "1"):
+ if not(d.getVar('PTEST_ENABLED') == "1"):
for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
bb.build.deltask(i, d)
}
diff --git a/import-layers/yocto-poky/meta/classes/qemu.bbclass b/import-layers/yocto-poky/meta/classes/qemu.bbclass
index f2d4d1c9e..f5c578012 100644
--- a/import-layers/yocto-poky/meta/classes/qemu.bbclass
+++ b/import-layers/yocto-poky/meta/classes/qemu.bbclass
@@ -4,12 +4,12 @@
#
def qemu_target_binary(data):
- package_arch = data.getVar("PACKAGE_ARCH", True)
- qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch, True) or "")
+ package_arch = data.getVar("PACKAGE_ARCH")
+ qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
if qemu_target_binary:
return qemu_target_binary
- target_arch = data.getVar("TARGET_ARCH", True)
+ target_arch = data.getVar("TARGET_ARCH")
if target_arch in ("i486", "i586", "i686"):
target_arch = "i386"
elif target_arch == "powerpc":
@@ -26,7 +26,7 @@ def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
if qemu_binary == "qemu-allarch":
qemu_binary = "qemuwrapper"
- qemu_options = data.getVar("QEMU_OPTIONS", True)
+ qemu_options = data.getVar("QEMU_OPTIONS")
return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+ " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
@@ -52,7 +52,7 @@ def qemu_run_binary(data, rootfs_path, binary):
# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
# qemu-arm default CPU supports all required architecture levels.
-QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) or ""}"
+QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}"
QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
diff --git a/import-layers/yocto-poky/meta/classes/qemuboot.bbclass b/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
index b5cc93dc9..3468d1c67 100644
--- a/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
@@ -3,30 +3,52 @@
# boot by runqemu:
#
# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
+#
# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
+#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
+#
# QB_MEM: memory, e.g., "-m 512"
+#
# QB_MACHINE: qemu machine, e.g., "-machine virt"
+#
# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
+#
# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
# set it when support kvm.
+#
# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
# option, e.g., "console=ttyS0 console=tty"
+#
# QB_DTB: qemu dtb name
+#
# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
+#
# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
# when QB_AUDIO_DRV is set.
+#
# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
+#
+# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
+# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
+# Note, runqemu will replace @MAC@ with a predefined mac, you can set
+# a custom one, but that may cause conflicts when multiple qemus are
+# running on the same host.
+#
# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
-# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no -device virtio-net-device,netdev=net0"
+# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
-# QB_SLIRP_OPT: network option for SLIRP mode, e.g.,
-# "-netdev user,id=net0 -device virtio-net-device,netdev=net0"
+#
+# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
+#
# QB_ROOTFS_OPT: used as rootfs, e.g.,
# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
+#
# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
+#
# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
# Note, runqemu will replace "@PORT@" with the port number which is used.
@@ -40,36 +62,53 @@ QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_OPT_APPEND ?= "-show-cursor"
+QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
-# Create qemuboot.conf
-ROOTFS_POSTPROCESS_COMMAND += "write_qemuboot_conf; "
+# This should be kept align with ROOT_VM
+QB_DRIVE_TYPE ?= "/dev/sd"
-python write_qemuboot_conf() {
- import configparser
+# Create qemuboot.conf
+addtask do_write_qemuboot_conf after do_rootfs before do_image
+IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
- build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE', \
- 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME', \
- 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE', \
+def qemuboot_vars(d):
+ build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
+ 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
+ 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
'STAGING_DIR_HOST']
+ return build_vars + [k for k in d.keys() if k.startswith('QB_')]
- # Vars from bsp
- qb_vars = []
- for k in d.keys():
- if k.startswith('QB_'):
- qb_vars.append(k)
+do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
+do_write_qemuboot_conf[vardepsexclude] += "TOPDIR"
+python do_write_qemuboot_conf() {
+ import configparser
- qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('IMAGE_NAME', True))
- qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('IMAGE_LINK_NAME', True))
+ qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
+ qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ topdir="%s/"%(d.getVar('TOPDIR')).replace("//","/")
cf = configparser.ConfigParser()
cf.add_section('config_bsp')
- for k in build_vars + qb_vars:
- cf.set('config_bsp', k, '%s' % d.getVar(k, True))
+ for k in qemuboot_vars(d):
+ # qemu-helper-native sysroot is not removed by rm_work and
+ # contains all tools required by runqemu
+ if k == 'STAGING_BINDIR_NATIVE':
+ val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
+ 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
+ else:
+ val = d.getVar(k)
+ # we only want to write out relative paths so that we can relocate images
+ # and still run them
+ val=val.replace(topdir,"")
+ cf.set('config_bsp', k, '%s' % val)
# QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
# to the kernel file, which hinders relocatability of the qb conf.
# Read the link and replace it with the full filename of the target.
- kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('QB_DEFAULT_KERNEL', True))
+ kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL'))
kernel = os.path.realpath(kernel_link)
+ # we only want to write out relative paths so that we can relocate images
+ # and still run them
+ kernel=kernel.replace(topdir,"")
cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
bb.utils.mkdirhier(os.path.dirname(qemuboot))
diff --git a/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass b/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
index add34df9d..7fa4a849e 100644
--- a/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
+++ b/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
@@ -1,5 +1,5 @@
def __note(msg, d):
- bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
+ bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg))
__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
def bad_runtime_vars(cfgdata, d):
@@ -7,7 +7,7 @@ def bad_runtime_vars(cfgdata, d):
bb.data.inherits_class("cross", d):
return
- for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
+ for var in d.getVar("__recipe_sanity_badruntimevars").split():
val = d.getVar(var, False)
if val and val != cfgdata.get(var):
__note("%s should be %s_${PN}" % (var, var), d)
@@ -15,11 +15,11 @@ def bad_runtime_vars(cfgdata, d):
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
def req_vars(cfgdata, d):
- for var in d.getVar("__recipe_sanity_reqvars", True).split():
+ for var in d.getVar("__recipe_sanity_reqvars").split():
if not d.getVar(var, False):
__note("%s should be set" % var, d)
- for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
+ for var in d.getVar("__recipe_sanity_reqdiffvars").split():
val = d.getVar(var, False)
cfgval = cfgdata.get(var)
@@ -38,11 +38,11 @@ def var_renames_overwrite(cfgdata, d):
def incorrect_nonempty_PACKAGES(cfgdata, d):
if bb.data.inherits_class("native", d) or \
bb.data.inherits_class("cross", d):
- if d.getVar("PACKAGES", True):
+ if d.getVar("PACKAGES"):
return True
def can_use_autotools_base(cfgdata, d):
- cfg = d.getVar("do_configure", True)
+ cfg = d.getVar("do_configure")
if not bb.data.inherits_class("autotools", d):
return False
@@ -61,7 +61,7 @@ def can_delete_FILESPATH(cfgdata, d):
expected = cfgdata.get("FILESPATH")
expectedpaths = d.expand(expected)
unexpanded = d.getVar("FILESPATH", False)
- filespath = d.getVar("FILESPATH", True).split(":")
+ filespath = d.getVar("FILESPATH").split(":")
filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
for fp in filespath:
if not fp in expectedpaths:
@@ -70,22 +70,6 @@ def can_delete_FILESPATH(cfgdata, d):
return False
return expected != unexpanded
-def can_delete_FILESDIR(cfgdata, d):
- expected = cfgdata.get("FILESDIR")
- #expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
- unexpanded = d.getVar("FILESDIR", False)
- if unexpanded is None:
- return False
-
- expanded = os.path.normpath(d.getVar("FILESDIR", True))
- filespath = d.getVar("FILESPATH", True).split(":")
- filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
-
- return unexpanded != expected and \
- os.path.exists(expanded) and \
- (expanded in filespath or
- expanded == d.expand(expected))
-
def can_delete_others(p, cfgdata, d):
for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
"SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
@@ -96,7 +80,7 @@ def can_delete_others(p, cfgdata, d):
continue
try:
- expanded = d.getVar(k, True)
+ expanded = d.getVar(k)
cfgexpanded = d.expand(cfgunexpanded)
except bb.fetch.ParameterError:
continue
@@ -108,11 +92,10 @@ def can_delete_others(p, cfgdata, d):
(p, cfgunexpanded, unexpanded, expanded))
python do_recipe_sanity () {
- p = d.getVar("P", True)
- p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
+ p = d.getVar("P")
+ p = "%s %s %s" % (d.getVar("PN"), d.getVar("PV"), d.getVar("PR"))
sanitychecks = [
- (can_delete_FILESDIR, "candidate for removal of FILESDIR"),
(can_delete_FILESPATH, "candidate for removal of FILESPATH"),
#(can_use_autotools_base, "candidate for use of autotools_base"),
(incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
diff --git a/import-layers/yocto-poky/meta/classes/relative_symlinks.bbclass b/import-layers/yocto-poky/meta/classes/relative_symlinks.bbclass
new file mode 100644
index 000000000..315773734
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/relative_symlinks.bbclass
@@ -0,0 +1,5 @@
+do_install[postfuncs] += "install_relative_symlinks"
+
+python install_relative_symlinks () {
+ oe.path.replace_absolute_symlinks(d.getVar('D'), d)
+}
diff --git a/import-layers/yocto-poky/meta/classes/relocatable.bbclass b/import-layers/yocto-poky/meta/classes/relocatable.bbclass
index 4ca9981f4..582812c1c 100644
--- a/import-layers/yocto-poky/meta/classes/relocatable.bbclass
+++ b/import-layers/yocto-poky/meta/classes/relocatable.bbclass
@@ -1,7 +1,18 @@
inherit chrpath
-SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
+SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles"
python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
+
+relocatable_native_pcfiles () {
+ if [ -d ${SYSROOT_DESTDIR}${libdir}/pkgconfig ]; then
+ rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('libdir') + "/pkgconfig")}
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${libdir}/pkgconfig/*.pc
+ fi
+ if [ -d ${SYSROOT_DESTDIR}${datadir}/pkgconfig ]; then
+ rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('datadir') + "/pkgconfig")}
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${datadir}/pkgconfig/*.pc
+ fi
+}
diff --git a/import-layers/yocto-poky/meta/classes/report-error.bbclass b/import-layers/yocto-poky/meta/classes/report-error.bbclass
index 5bb231efc..d6fdd364a 100644
--- a/import-layers/yocto-poky/meta/classes/report-error.bbclass
+++ b/import-layers/yocto-poky/meta/classes/report-error.bbclass
@@ -10,7 +10,7 @@ ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
def errorreport_getdata(e):
import codecs
- logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
with codecs.open(datafile, 'r', 'utf-8') as f:
data = f.read()
@@ -19,7 +19,7 @@ def errorreport_getdata(e):
def errorreport_savedata(e, newdata, file):
import json
import codecs
- logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, file)
with codecs.open(datafile, 'w', 'utf-8') as f:
json.dump(newdata, f, indent=4, sort_keys=True)
@@ -29,18 +29,18 @@ python errorreport_handler () {
import json
import codecs
- logpath = e.data.getVar('ERR_REPORT_DIR', True)
+ logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
if isinstance(e, bb.event.BuildStarted):
bb.utils.mkdirhier(logpath)
data = {}
- machine = e.data.getVar("MACHINE", True)
+ machine = e.data.getVar("MACHINE")
data['machine'] = machine
- data['build_sys'] = e.data.getVar("BUILD_SYS", True)
- data['nativelsb'] = e.data.getVar("NATIVELSBSTRING", True)
- data['distro'] = e.data.getVar("DISTRO", True)
- data['target_sys'] = e.data.getVar("TARGET_SYS", True)
+ data['build_sys'] = e.data.getVar("BUILD_SYS")
+ data['nativelsb'] = e.data.getVar("NATIVELSBSTRING")
+ data['distro'] = e.data.getVar("DISTRO")
+ data['target_sys'] = e.data.getVar("TARGET_SYS")
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
@@ -51,7 +51,7 @@ python errorreport_handler () {
elif isinstance(e, bb.build.TaskFailed):
task = e.task
taskdata={}
- log = e.data.getVar('BB_LOGFILE', True)
+ log = e.data.getVar('BB_LOGFILE')
taskdata['package'] = e.data.expand("${PF}")
taskdata['task'] = task
if log:
@@ -61,7 +61,7 @@ python errorreport_handler () {
# Replace host-specific paths so the logs are cleaner
for d in ("TOPDIR", "TMPDIR"):
- s = e.data.getVar(d, True)
+ s = e.data.getVar(d)
if s:
logdata = logdata.replace(s, d)
@@ -92,7 +92,7 @@ python errorreport_handler () {
bb.utils.unlockfile(lock)
failures = jsondata['failures']
if(len(failures) > 0):
- filename = "error_report_" + e.data.getVar("BUILDNAME", True)+".txt"
+ filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
datafile = errorreport_savedata(e, jsondata, filename)
bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
diff --git a/import-layers/yocto-poky/meta/classes/rm_work.bbclass b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
index 64b6981a4..badeaeba0 100644
--- a/import-layers/yocto-poky/meta/classes/rm_work.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
@@ -10,6 +10,14 @@
#
# RM_WORK_EXCLUDE += "icu-native icu busybox"
#
+# Recipes can also configure which entries in their ${WORKDIR}
+# are preserved besides temp, which already gets excluded by default
+# because it contains logs:
+# do_install_append () {
+# echo "bar" >${WORKDIR}/foo
+# }
+# RM_WORK_EXCLUDE_ITEMS += "foo"
+RM_WORK_EXCLUDE_ITEMS = "temp"
# Use the completion scheduler by default when rm_work is active
# to try and reduce disk usage
@@ -18,9 +26,6 @@ BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
-RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
-BB_DEFAULT_TASK = "rm_work_all"
-
do_rm_work () {
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
@@ -37,7 +42,7 @@ do_rm_work () {
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
rm -rf $dir 2> /dev/null || true
- elif [ $dir != 'temp' ]; then
+ elif ! echo '${RM_WORK_EXCLUDE_ITEMS}' | grep -q -w "$dir"; then
rm -rf $dir
fi
done
@@ -66,7 +71,7 @@ do_rm_work () {
i=dummy
break
;;
- *do_rootfs*|*do_image*|*do_bootimg*|*do_bootdirectdisk*|*do_vmimg*)
+ *do_rootfs*|*do_image*|*do_bootimg*|*do_bootdirectdisk*|*do_vmimg*|*do_write_qemuboot_conf*)
i=dummy
break
;;
@@ -97,13 +102,12 @@ do_rm_work () {
rm -f $i
done
}
-addtask rm_work after do_${RMWORK_ORIG_TASK}
-
do_rm_work_all () {
:
}
do_rm_work_all[recrdeptask] = "do_rm_work"
-addtask rm_work_all after do_rm_work
+do_rm_work_all[noexec] = "1"
+addtask rm_work_all after before do_build
do_populate_sdk[postfuncs] += "rm_work_populatesdk"
rm_work_populatesdk () {
@@ -117,13 +121,52 @@ rm_work_rootfs () {
}
rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
-python () {
+# This task can be used instead of do_build to trigger building
+# without also invoking do_rm_work. It only exists when rm_work.bbclass
+# is active, otherwise do_build needs to be used.
+#
+# The intended usage is
+# ${@ d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build'}
+# in places that previously used just 'do_build'.
+RM_WORK_BUILD_WITHOUT = "do_build_without_rm_work"
+do_build_without_rm_work () {
+ :
+}
+do_build_without_rm_work[noexec] = "1"
+
+# We have to add these tasks already now, because all tasks are
+# meant to be defined before the RecipeTaskPreProcess event triggers.
+# The inject_rm_work event handler then merely changes task dependencies.
+addtask do_rm_work
+addtask do_build_without_rm_work
+addhandler inject_rm_work
+inject_rm_work[eventmask] = "bb.event.RecipeTaskPreProcess"
+python inject_rm_work() {
if bb.data.inherits_class('kernel', d):
- d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN", True))
+ d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN"))
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
- excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split()
- pn = d.getVar("PN", True)
+ excludes = (d.getVar("RM_WORK_EXCLUDE") or "").split()
+ pn = d.getVar("PN")
+
+ # Determine what do_build depends upon, without including do_build
+ # itself or our own special do_rm_work_all.
+ deps = set(bb.build.preceedtask('do_build', True, d))
+ deps.difference_update(('do_build', 'do_rm_work_all'))
+
if pn in excludes:
d.delVarFlag('rm_work_rootfs', 'cleandirs')
d.delVarFlag('rm_work_populatesdk', 'cleandirs')
+ else:
+ # Inject do_rm_work into the tasks of the current recipe such that do_build
+ # depends on it and that it runs after all other tasks that block do_build,
+ # i.e. after all work on the current recipe is done. The reason for taking
+ # this approach instead of making do_rm_work depend on do_build is that
+ # do_build inherits additional runtime dependencies on
+ # other recipes and thus will typically run much later than completion of
+ # work in the recipe itself.
+ # In practice, addtask() here merely updates the dependencies.
+ bb.build.addtask('do_rm_work', 'do_build', ' '.join(deps), d)
+
+ # Always update do_build_without_rm_work dependencies.
+ bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
}
diff --git a/import-layers/yocto-poky/meta/classes/rm_work_and_downloads.bbclass b/import-layers/yocto-poky/meta/classes/rm_work_and_downloads.bbclass
new file mode 100644
index 000000000..7c00bea59
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/rm_work_and_downloads.bbclass
@@ -0,0 +1,33 @@
+# Author: Patrick Ohly <patrick.ohly@intel.com>
+# Copyright: Copyright (C) 2015 Intel Corporation
+#
+# This file is licensed under the MIT license, see COPYING.MIT in
+# this source distribution for the terms.
+
+# This class is used like rm_work:
+# INHERIT += "rm_work_and_downloads"
+#
+# In addition to removing local build directories of a recipe, it also
+# removes the downloaded source. This is achieved by making the DL_DIR
+# recipe-specific. While reducing disk usage, it increases network usage (for
+# example, compiling the same source for target and host implies downloading
+# the source twice).
+#
+# Because the "do_fetch" task does not get re-run after removing the downloaded
+# sources, this class is also not suitable for incremental builds.
+#
+# Where it works well is in well-connected build environments with limited
+# disk space (like TravisCI).
+
+inherit rm_work
+
+# This would ensure that the existing do_rm_work() removes the downloads,
+# but does not work because some recipes have a circular dependency between
+# WORKDIR and DL_DIR (via ${SRCPV}?).
+# DL_DIR = "${WORKDIR}/downloads"
+
+# Instead go up one level and remove ourself.
+DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
+do_rm_work_append () {
+ rm -rf ${DL_DIR}
+}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
index 0c7ceea54..c19ff8738 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
@@ -14,6 +14,9 @@ ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
+# Generates test data file with data store variables expanded in json format
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+
# Write manifest
IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.rootfs.manifest"
ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
@@ -30,6 +33,23 @@ ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
+# Sort the user and group entries in /etc by ID in order to make the content
+# deterministic. Package installs are not deterministic, causing the ordering
+# of entries to change between builds. In case that this isn't desired,
+# the command can be overridden.
+#
+# Note that useradd-staticids.bbclass has to be used to ensure that
+# the numeric IDs of dynamically created entries remain stable.
+#
+# We want this to run as late as possible, in particular after
+# systemd_sysusers_create and set_user_group. Using _append is not
+# enough for that, set_user_group is added that way and would end
+# up running after us.
+SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
+python () {
+ d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
+}
+
systemd_create_users () {
for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
[ -e $conffile ] || continue
@@ -71,10 +91,10 @@ read_only_rootfs_hook () {
# and the keys under /var/run/ssh.
if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
else
- echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
fi
fi
@@ -112,7 +132,7 @@ zap_empty_root_password () {
if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
fi
-}
+}
#
# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
@@ -136,7 +156,10 @@ ssh_allow_empty_password () {
fi
if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
- sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
+ for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print`
+ do
+ sed -i 's/nullok_secure/nullok/' $f
+ done
fi
}
@@ -146,6 +169,11 @@ ssh_disable_dns_lookup () {
fi
}
+python sort_passwd () {
+ import rootfspostcommands
+ rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
+}
+
#
# Enable postinst logging if debug-tweaks is enabled
#
@@ -195,31 +223,13 @@ make_zimage_symlink_relative () {
fi
}
-insert_feed_uris () {
-
- echo "Building feeds for [${DISTRO}].."
-
- for line in ${FEED_URIS}
- do
- # strip leading and trailing spaces/tabs, then split into name and uri
- line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
- feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
- feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
-
- echo "Added $feed_name feed with URL $feed_uri"
-
- # insert new feed-sources
- echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
- done
-}
-
python write_image_manifest () {
from oe.rootfs import image_list_installed_packages
from oe.utils import format_pkg_list
- deploy_dir = d.getVar('IMGDEPLOYDIR', True)
- link_name = d.getVar('IMAGE_LINK_NAME', True)
- manifest_name = d.getVar('IMAGE_MANIFEST', True)
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ manifest_name = d.getVar('IMAGE_MANIFEST')
if not manifest_name:
return
@@ -236,7 +246,7 @@ python write_image_manifest () {
os.symlink(os.path.basename(manifest_name), manifest_link)
}
-# Can be use to create /etc/timestamp during image construction to give a reasonably
+# Can be use to create /etc/timestamp during image construction to give a reasonably
# sane default time setting
rootfs_update_timestamp () {
date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
@@ -278,3 +288,33 @@ rootfs_check_host_user_contaminated () {
rootfs_sysroot_relativelinks () {
sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
}
+
+# Generated test data json file
+python write_image_test_data() {
+ from oe.data import export2json
+
+ testdata = "%s/%s.testdata.json" % (d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('IMAGE_NAME'))
+ testdata_link = "%s/%s.testdata.json" % (d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('IMAGE_LINK_NAME'))
+
+ bb.utils.mkdirhier(os.path.dirname(testdata))
+ searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
+ export2json(d, testdata,searchString=searchString,replaceString="")
+
+ if testdata_link != testdata:
+ if os.path.lexists(testdata_link):
+ os.remove(testdata_link)
+ os.symlink(os.path.basename(testdata), testdata_link)
+}
+write_image_test_data[vardepsexclude] += "TOPDIR"
+
+# Check for unsatisfied recommendations (RRECOMMENDS)
+python rootfs_log_check_recommends() {
+ log_path = d.expand("${T}/log.do_rootfs")
+ with open(log_path, 'r') as log:
+ for line in log:
+ if 'log_check' in line:
+ continue
+
+ if 'unsatisfied recommendation for' in line:
+ bb.warn('[log_check] %s: %s' % (d.getVar('PN', True), line))
+}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
index f79fca608..262e3d555 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
@@ -3,7 +3,6 @@
#
ROOTFS_PKGMANAGE = "dpkg apt"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
@@ -12,9 +11,10 @@ do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
+do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
python rootfs_deb_bad_recommendations() {
- if d.getVar("BAD_RECOMMENDATIONS", True):
+ if d.getVar("BAD_RECOMMENDATIONS"):
bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
}
do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
@@ -25,7 +25,7 @@ opkglibdir = "${localstatedir}/lib/opkg"
python () {
# Map TARGET_ARCH to Debian's ideas about architectures
- darch = d.getVar('SDK_ARCH', True)
+ darch = d.getVar('SDK_ARCH')
if darch in ["x86", "i486", "i586", "i686", "pentium"]:
d.setVar('DEB_SDK_ARCH', 'i386')
elif darch == "x86_64":
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass
index d5c38fef7..52b468d85 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs_ipk.bbclass
@@ -7,7 +7,6 @@
EXTRAOPKGCONFIG ?= ""
ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
@@ -16,6 +15,7 @@ do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk_ext[lockfiles] += "${WORKDIR}/ipk.lock"
OPKG_PREPROCESS_COMMANDS = ""
@@ -27,8 +27,8 @@ MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
python () {
- if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
- flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask')
flags = flags.replace("do_package_write_ipk", "")
flags = flags.replace("do_deploy", "")
flags = flags.replace("do_populate_sysroot", "")
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
index 37730a710..7f305f51c 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
@@ -2,20 +2,22 @@
# Creates a root filesystem out of rpm packages
#
-ROOTFS_PKGMANAGE = "rpm smartpm"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+ROOTFS_PKGMANAGE = "rpm dnf"
-# Add 100Meg of extra space for Smart
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 102400", "" ,d)}"
+# dnf is using our custom distutils, and so will fail without these
+export STAGING_INCDIR
+export STAGING_LIBDIR
-# Smart is python based, so be sure python-native is available to us.
-EXTRANATIVEPATH += "python-native"
+# Add 100Meg of extra space for dnf
+IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "" ,d)}"
+
+# Dnf is python based, so be sure python3-native is available to us.
+EXTRANATIVEPATH += "python3-native"
# opkg is needed for update-alternatives
RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
- rpmresolve-native:do_populate_sysroot \
- python-smartpm-native:do_populate_sysroot \
- createrepo-native:do_populate_sysroot \
+ dnf-native:do_populate_sysroot \
+ createrepo-c-native:do_populate_sysroot \
opkg-native:do_populate_sysroot"
do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
@@ -25,8 +27,8 @@ do_rootfs[recrdeptask] += "do_package_write_rpm"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
python () {
- if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
- flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask')
flags = flags.replace("do_package_write_rpm", "")
flags = flags.replace("do_deploy", "")
flags = flags.replace("do_populate_sysroot", "")
@@ -35,7 +37,3 @@ python () {
d.setVar('RPM_POSTPROCESS_COMMANDS', '')
}
-# Smart is python based, so be sure python-native is available to us.
-EXTRANATIVEPATH += "python-native"
-
-rpmlibdir = "/var/lib/rpm"
diff --git a/import-layers/yocto-poky/meta/classes/sanity.bbclass b/import-layers/yocto-poky/meta/classes/sanity.bbclass
index a11b581a0..e8064ac48 100644
--- a/import-layers/yocto-poky/meta/classes/sanity.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sanity.bbclass
@@ -3,10 +3,10 @@
#
SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
- gzip gawk chrpath wget cpio perl file"
+ gzip gawk chrpath wget cpio perl file which"
def bblayers_conf_file(d):
- return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
+ return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
def sanity_conf_read(fn):
with open(fn, 'r') as f:
@@ -39,8 +39,8 @@ SANITY_DIFF_TOOL ?= "meld"
SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
python oecore_update_localconf() {
# Check we are using a valid local.conf
- current_conf = d.getVar('CONF_VERSION', True)
- conf_version = d.getVar('LOCALCONF_VERSION', True)
+ current_conf = d.getVar('CONF_VERSION')
+ conf_version = d.getVar('LOCALCONF_VERSION')
failmsg = """Your version of local.conf was generated from an older/newer version of
local.conf.sample and there have been updates made to this file. Please compare the two
@@ -59,8 +59,8 @@ is a good way to visualise the changes."""
SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
python oecore_update_siteconf() {
# If we have a site.conf, check it's valid
- current_sconf = d.getVar('SCONF_VERSION', True)
- sconf_version = d.getVar('SITE_CONF_VERSION', True)
+ current_sconf = d.getVar('SCONF_VERSION')
+ sconf_version = d.getVar('SITE_CONF_VERSION')
failmsg = """Your version of site.conf was generated from an older version of
site.conf.sample and there have been updates made to this file. Please compare the two
@@ -80,8 +80,8 @@ SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
python oecore_update_bblayers() {
# bblayers.conf is out of date, so see if we can resolve that
- current_lconf = int(d.getVar('LCONF_VERSION', True))
- lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
+ current_lconf = int(d.getVar('LCONF_VERSION'))
+ lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
Please compare your file against bblayers.conf.sample and merge any changes before continuing.
@@ -141,7 +141,7 @@ is a good way to visualise the changes."""
# Handle rename of meta-yocto -> meta-poky
# This marks the start of separate version numbers but code is needed in OE-Core
# for the migration, one last time.
- layers = d.getVar('BBLAYERS', True).split()
+ layers = d.getVar('BBLAYERS').split()
layers = [ os.path.basename(path) for path in layers ]
if 'meta-yocto' in layers:
found = False
@@ -172,7 +172,7 @@ is a good way to visualise the changes."""
}
def raise_sanity_error(msg, d, network_error=False):
- if d.getVar("SANITY_USE_EVENTS", True) == "1":
+ if d.getVar("SANITY_USE_EVENTS") == "1":
try:
bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
except TypeError:
@@ -198,8 +198,8 @@ def check_toolchain_tune_args(data, tune, multilib, errs):
return found_errors
def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
- args_set = (data.getVar("TUNE_%s" % which, True) or "").split()
- args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune), True) or "").split()
+ args_set = (data.getVar("TUNE_%s" % which) or "").split()
+ args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
args_missing = []
# If no args are listed/required, we are done.
@@ -226,9 +226,8 @@ def check_toolchain_tune(data, tune, multilib):
# Apply the overrides so we can look at the details.
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
localdata.setVar("OVERRIDES", overrides)
- bb.data.update_data(localdata)
bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
+ features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
if not features:
return "Tuning '%s' has no defined features, and cannot be used." % tune
valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
@@ -248,9 +247,9 @@ def check_toolchain_tune(data, tune, multilib):
bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
else:
tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = localdata.getVar("TUNEABI_WHITELIST", True)
+ whitelist = localdata.getVar("TUNEABI_WHITELIST")
if whitelist:
- tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune, True)
+ tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
if not tuneabi:
tuneabi = tune
if True not in [x in whitelist.split() for x in tuneabi.split()]:
@@ -264,13 +263,13 @@ def check_toolchain_tune(data, tune, multilib):
def check_toolchain(data):
tune_error_set = []
- deftune = data.getVar("DEFAULTTUNE", True)
+ deftune = data.getVar("DEFAULTTUNE")
tune_errors = check_toolchain_tune(data, deftune, 'default')
if tune_errors:
tune_error_set.append(tune_errors)
- multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
- global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split()
+ multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
+ global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
if multilibs:
seen_libs = []
@@ -282,7 +281,7 @@ def check_toolchain(data):
seen_libs.append(lib)
if not lib in global_multilibs:
tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
- tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
+ tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
if tune in seen_tunes:
tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
else:
@@ -360,27 +359,34 @@ def check_connectivity(d):
# URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
# using the same syntax as for SRC_URI. If the variable is not set
# the check is skipped
- test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
+ test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
retval = ""
+ bbn = d.getVar('BB_NO_NETWORK')
+ if bbn not in (None, '0', '1'):
+ return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
+
# Only check connectivity if network enabled and the
# CONNECTIVITY_CHECK_URIS are set
- network_enabled = not d.getVar('BB_NO_NETWORK', True)
+ network_enabled = not (bbn == '1')
check_enabled = len(test_uris)
- # Take a copy of the data store and unset MIRRORS and PREMIRRORS
- data = bb.data.createCopy(d)
- data.delVar('PREMIRRORS')
- data.delVar('MIRRORS')
if check_enabled and network_enabled:
+ # Take a copy of the data store and unset MIRRORS and PREMIRRORS
+ data = bb.data.createCopy(d)
+ data.delVar('PREMIRRORS')
+ data.delVar('MIRRORS')
try:
fetcher = bb.fetch2.Fetch(test_uris, data)
fetcher.checkstatus()
except Exception as err:
# Allow the message to be configured so that users can be
# pointed to a support mechanism.
- msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
+ msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
if len(msg) == 0:
- msg = "%s. Please ensure your network is configured correctly.\n" % err
+ msg = "%s.\n" % err
+ msg += " Please ensure your host's network is configured correctly,\n"
+ msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
+ msg += " all required sources are on local disk.\n"
retval = msg
return retval
@@ -388,7 +394,7 @@ def check_connectivity(d):
def check_supported_distro(sanity_data):
from fnmatch import fnmatch
- tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True)
+ tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
if not tested_distros:
return
@@ -411,17 +417,17 @@ def check_sanity_validmachine(sanity_data):
messages = ""
# Check TUNE_ARCH is set
- if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID':
+ if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
# Check TARGET_OS is set
- if sanity_data.getVar('TARGET_OS', True) == 'INVALID':
+ if sanity_data.getVar('TARGET_OS') == 'INVALID':
messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
# Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
- pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
- tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
- defaulttune = sanity_data.getVar('DEFAULTTUNE', True)
+ pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
+ tunepkg = sanity_data.getVar('TUNE_PKGARCH')
+ defaulttune = sanity_data.getVar('DEFAULTTUNE')
tunefound = False
seen = {}
dups = []
@@ -448,7 +454,7 @@ def check_gcc_march(sanity_data):
message = ""
# Check if -march not in BUILD_CFLAGS
- if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0:
+ if sanity_data.getVar("BUILD_CFLAGS").find("-march") < 0:
result = False
# Construct a test file
@@ -469,7 +475,7 @@ def check_gcc_march(sanity_data):
result = True;
if not result:
- build_arch = sanity_data.getVar('BUILD_ARCH', True)
+ build_arch = sanity_data.getVar('BUILD_ARCH')
status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=%s gcc_test.c -o gcc_test" % build_arch))
if status == 0:
message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
@@ -557,15 +563,15 @@ def check_perl_modules(sanity_data):
return None
def sanity_check_conffiles(d):
- funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
+ funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
for func in funcs:
conffile, current_version, required_version, func = func.split(":")
- if check_conf_exists(conffile, d) and d.getVar(current_version, True) is not None and \
- d.getVar(current_version, True) != d.getVar(required_version, True):
+ if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
+ d.getVar(current_version) != d.getVar(required_version):
try:
bb.build.exec_func(func, d, pythonexception=True)
except NotImplementedError as e:
- bb.fatal(e)
+ bb.fatal(str(e))
d.setVar("BB_INVALIDCONF", True)
def sanity_handle_abichanges(status, d):
@@ -574,55 +580,16 @@ def sanity_handle_abichanges(status, d):
#
import subprocess
- current_abi = d.getVar('OELAYOUT_ABI', True)
- abifile = d.getVar('SANITY_ABIFILE', True)
+ current_abi = d.getVar('OELAYOUT_ABI')
+ abifile = d.getVar('SANITY_ABIFILE')
if os.path.exists(abifile):
with open(abifile, "r") as f:
abi = f.read().strip()
if not abi.isdigit():
with open(abifile, "w") as f:
f.write(current_abi)
- elif abi == "2" and current_abi == "3":
- bb.note("Converting staging from layout version 2 to layout version 3")
- subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True)
- subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True)
- subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "3" and current_abi == "4":
- bb.note("Converting staging layout from version 3 to layout version 4")
- if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
- subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True)
- subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "4":
- status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n")
- elif abi == "5" and current_abi == "6":
- bb.note("Converting staging layout from version 5 to layout version 6")
- subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "7" and current_abi == "8":
- status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
- elif (abi != current_abi and current_abi == "9"):
- status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
- elif (abi != current_abi and current_abi == "10" and (abi == "8" or abi == "9")):
- bb.note("Converting staging layout from version 8/9 to layout version 10")
- cmd = d.expand("grep -r -l sysroot-providers/virtual_kernel ${SSTATE_MANIFESTS}")
- ret, result = oe.utils.getstatusoutput(cmd)
- result = result.split()
- for f in result:
- bb.note("Uninstalling manifest file %s" % f)
- sstate_clean_manifest(f, d)
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif abi == "10" and current_abi == "11":
- bb.note("Converting staging layout from version 10 to layout version 11")
- # Files in xf86-video-modesetting moved to xserver-xorg and bitbake can't currently handle that:
- subprocess.call(d.expand("rm ${TMPDIR}/sysroots/*/usr/lib/xorg/modules/drivers/modesetting_drv.so ${TMPDIR}/sysroots/*/pkgdata/runtime/xf86-video-modesetting* ${TMPDIR}/sysroots/*/pkgdata/runtime-reverse/xf86-video-modesetting* ${TMPDIR}/sysroots/*/pkgdata/shlibs2/xf86-video-modesetting*"), shell=True)
- with open(abifile, "w") as f:
- f.write(current_abi)
+ elif int(abi) <= 11 and current_abi == "12":
+ status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -670,12 +637,12 @@ def check_sanity_version_change(status, d):
missing = missing + "GNU make,"
if not check_app_exists('${BUILD_CC}', d):
- missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC", True)
+ missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC")
if not check_app_exists('${BUILD_CXX}', d):
- missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX", True)
+ missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX")
- required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
+ required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
for util in required_utilities.split():
if not check_app_exists(util, d):
@@ -685,7 +652,7 @@ def check_sanity_version_change(status, d):
missing = missing.rstrip(',')
status.addresult("Please install the following missing utilities: %s\n" % missing)
- assume_provided = d.getVar('ASSUME_PROVIDED', True).split()
+ assume_provided = d.getVar('ASSUME_PROVIDED').split()
# Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
if "diffstat-native" not in assume_provided:
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
@@ -708,7 +675,7 @@ def check_sanity_version_change(status, d):
status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
tmpdirmode = os.stat(tmpdir).st_mode
if (tmpdirmode & stat.S_ISGID):
@@ -732,7 +699,7 @@ def check_sanity_version_change(status, d):
if netcheck:
status.network_error = True
- nolibs = d.getVar('NO32LIBS', True)
+ nolibs = d.getVar('NO32LIBS')
if not nolibs:
lib32path = '/lib'
if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
@@ -741,7 +708,7 @@ def check_sanity_version_change(status, d):
if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
- bbpaths = d.getVar('BBPATH', True).split(":")
+ bbpaths = d.getVar('BBPATH').split(":")
if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
status.addresult("BBPATH references the current directory, either through " \
"an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
@@ -751,7 +718,7 @@ def check_sanity_version_change(status, d):
"references.\n" \
"Parsed BBPATH is" + str(bbpaths));
- oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True)
+ oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
if not oes_bb_conf:
status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
@@ -786,26 +753,26 @@ def check_sanity_everybuild(status, d):
# Check the bitbake version meets minimum requirements
from distutils.version import LooseVersion
- minversion = d.getVar('BB_MIN_VERSION', True)
+ minversion = d.getVar('BB_MIN_VERSION')
if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
sanity_check_locale(d)
- paths = d.getVar('PATH', True).split(":")
+ paths = d.getVar('PATH').split(":")
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
# Check that the DISTRO is valid, if set
# need to take into account DISTRO renaming DISTRO
- distro = d.getVar('DISTRO', True)
+ distro = d.getVar('DISTRO')
if distro and distro != "nodistro":
if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
- status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True))
+ status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
# Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
# set, since so much relies on it being set.
- dldir = d.getVar('DL_DIR', True)
+ dldir = d.getVar('DL_DIR')
if not dldir:
status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
@@ -814,9 +781,9 @@ def check_sanity_everybuild(status, d):
# Check that the MACHINE is valid, if it is set
machinevalid = True
- if d.getVar('MACHINE', True):
+ if d.getVar('MACHINE'):
if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
- status.addresult('Please set a valid MACHINE in your local.conf or environment\n')
+ status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
machinevalid = False
else:
status.addresult(check_sanity_validmachine(d))
@@ -827,7 +794,7 @@ def check_sanity_everybuild(status, d):
status.addresult(check_toolchain(d))
# Check that the SDKMACHINE is valid, if it is set
- if d.getVar('SDKMACHINE', True):
+ if d.getVar('SDKMACHINE'):
if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
status.addresult('Specified SDKMACHINE value is not valid\n')
elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
@@ -840,7 +807,7 @@ def check_sanity_everybuild(status, d):
status.addresult("Please use a umask which allows a+rx and u+rwx\n")
os.umask(omask)
- if d.getVar('TARGET_ARCH', True) == "arm":
+ if d.getVar('TARGET_ARCH') == "arm":
# This path is no longer user-readable in modern (very recent) Linux
try:
if os.path.exists("/proc/sys/vm/mmap_min_addr"):
@@ -853,7 +820,7 @@ def check_sanity_everybuild(status, d):
except:
pass
- oeroot = d.getVar('COREBASE', True)
+ oeroot = d.getVar('COREBASE')
if oeroot.find('+') != -1:
status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
if oeroot.find('@') != -1:
@@ -866,20 +833,18 @@ def check_sanity_everybuild(status, d):
mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
protocols = ['http', 'ftp', 'file', 'https', \
'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh']
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3' ]
for mirror_var in mirror_vars:
- mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n')
- for mirror_entry in mirrors:
- mirror_entry = mirror_entry.strip()
- if not mirror_entry:
- # ignore blank lines
- continue
+ mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
- try:
- pattern, mirror = mirror_entry.split()
- except ValueError:
- bb.warn('Invalid %s: %s, should be 2 members.' % (mirror_var, mirror_entry.strip()))
- continue
+ # Split into pairs
+ if len(mirrors) % 2 != 0:
+ bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, mirrors.strip()))
+ continue
+ mirrors = list(zip(*[iter(mirrors)]*2))
+
+ for mirror_entry in mirrors:
+ pattern, mirror = mirror_entry
decoded = bb.fetch2.decodeurl(pattern)
try:
@@ -907,7 +872,7 @@ def check_sanity_everybuild(status, d):
check_symlink(mirror_base, d)
# Check that TMPDIR hasn't changed location since the last time we were run
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
checkfile = os.path.join(tmpdir, "saved_tmpdir")
if os.path.exists(checkfile):
with open(checkfile, "r") as f:
@@ -946,8 +911,8 @@ def check_sanity(sanity_data):
status = SanityStatus()
- tmpdir = sanity_data.getVar('TMPDIR', True)
- sstate_dir = sanity_data.getVar('SSTATE_DIR', True)
+ tmpdir = sanity_data.getVar('TMPDIR')
+ sstate_dir = sanity_data.getVar('SSTATE_DIR')
check_symlink(sstate_dir, sanity_data)
@@ -971,7 +936,7 @@ def check_sanity(sanity_data):
check_sanity_everybuild(status, sanity_data)
- sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
+ sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
network_error = False
# NATIVELSBSTRING var may have been overridden with "universal", so
# get actual host distribution id and version
diff --git a/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass b/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass
index a481f6d9a..e5057b779 100644
--- a/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_ipk.bbclass
@@ -29,10 +29,10 @@ IPK_GPG_SIGNATURE_TYPE ?= 'ASC'
python () {
# Check configuration
for var in ('IPK_GPG_NAME', 'IPK_GPG_PASSPHRASE_FILE'):
- if not d.getVar(var, True):
+ if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
- sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE", True)
+ sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE")
if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
raise_sanity_error("Bad value for IPK_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
}
@@ -42,11 +42,11 @@ def sign_ipk(d, ipk_to_sign):
bb.debug(1, 'Signing ipk: %s' % ipk_to_sign)
- signer = get_signer(d, d.getVar('IPK_GPG_BACKEND', True))
- sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE', True)
+ signer = get_signer(d, d.getVar('IPK_GPG_BACKEND'))
+ sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE')
is_ascii_sig = (sig_type.upper() != "BIN")
signer.detach_sign(ipk_to_sign,
- d.getVar('IPK_GPG_NAME', True),
- d.getVar('IPK_GPG_PASSPHRASE_FILE', True),
+ d.getVar('IPK_GPG_NAME'),
+ d.getVar('IPK_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
diff --git a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
index 31a6e9b04..71df03bab 100644
--- a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
@@ -31,10 +31,10 @@ PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
python () {
# Check sanity of configuration
for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
- if not d.getVar(var, True):
+ if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
- sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE", True)
+ sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE")
if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
raise_sanity_error("Bad value for PACKAGE_FEED_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
}
diff --git a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
index a8ea75faa..bc2e94710 100644
--- a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
@@ -22,31 +22,24 @@ RPM_GPG_BACKEND ?= 'local'
python () {
- if d.getVar('RPM_GPG_PASSPHRASE_FILE', True):
+ if d.getVar('RPM_GPG_PASSPHRASE_FILE'):
raise_sanity_error('RPM_GPG_PASSPHRASE_FILE is replaced by RPM_GPG_PASSPHRASE', d)
# Check configuration
for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
- if not d.getVar(var, True):
+ if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
-
- # Set the expected location of the public key
- d.setVar('RPM_GPG_PUBKEY', os.path.join(d.getVar('STAGING_DIR_TARGET', False),
- d.getVar('sysconfdir', False),
- 'pki',
- 'rpm-gpg',
- 'RPM-GPG-KEY-${DISTRO_VERSION}'))
}
python sign_rpm () {
import glob
from oe.gpg_sign import get_signer
- signer = get_signer(d, d.getVar('RPM_GPG_BACKEND', True))
- rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR', True) + '/*')
+ signer = get_signer(d, d.getVar('RPM_GPG_BACKEND'))
+ rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR') + '/*')
signer.sign_rpms(rpms,
- d.getVar('RPM_GPG_NAME', True),
- d.getVar('RPM_GPG_PASSPHRASE', True))
+ d.getVar('RPM_GPG_NAME'),
+ d.getVar('RPM_GPG_PASSPHRASE'))
}
do_package_index[depends] += "signing-keys:do_deploy"
diff --git a/import-layers/yocto-poky/meta/classes/siteconfig.bbclass b/import-layers/yocto-poky/meta/classes/siteconfig.bbclass
index 45dce489d..bb491d299 100644
--- a/import-layers/yocto-poky/meta/classes/siteconfig.bbclass
+++ b/import-layers/yocto-poky/meta/classes/siteconfig.bbclass
@@ -2,12 +2,12 @@ python siteconfig_do_siteconfig () {
shared_state = sstate_state_fromvars(d)
if shared_state['task'] != 'populate_sysroot':
return
- if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
+ if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME'), 'site_config')):
bb.debug(1, "No site_config directory, skipping do_siteconfig")
return
+ sstate_install(shared_state, d)
bb.build.exec_func('do_siteconfig_gencache', d)
sstate_clean(shared_state, d)
- sstate_install(shared_state, d)
}
EXTRASITECONFIG ?= ""
diff --git a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
index 6eca004c5..2c33732be 100644
--- a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
@@ -89,6 +89,8 @@ def siteinfo_data(d):
"mips64el-linux-musl": "mips64el-linux",
"mips64-linux-gnun32": "mips-linux bit-32",
"mips64el-linux-gnun32": "mipsel-linux bit-32",
+ "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
+ "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
"powerpc-linux": "powerpc32-linux",
"powerpc-linux-musl": "powerpc-linux powerpc32-linux",
"powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
@@ -113,14 +115,14 @@ def siteinfo_data(d):
# Add in any extra user supplied data which may come from a BSP layer, removing the
# need to always change this class directly
- extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS", True) or "").split()
+ extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split()
for m in extra_siteinfo:
call = m + "(archinfo, osinfo, targetinfo, d)"
locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
- hostarch = d.getVar("HOST_ARCH", True)
- hostos = d.getVar("HOST_OS", True)
+ hostarch = d.getVar("HOST_ARCH")
+ hostos = d.getVar("HOST_OS")
target = "%s-%s" % (hostarch, hostos)
sitedata = []
@@ -144,7 +146,7 @@ python () {
d.setVar("SITEINFO_ENDIANNESS", "be")
else:
bb.error("Unable to determine endianness for architecture '%s'" %
- d.getVar("HOST_ARCH", True))
+ d.getVar("HOST_ARCH"))
bb.fatal("Please add your architecture to siteinfo.bbclass")
if "bit-32" in sitedata:
@@ -153,14 +155,14 @@ python () {
d.setVar("SITEINFO_BITS", "64")
else:
bb.error("Unable to determine bit size for architecture '%s'" %
- d.getVar("HOST_ARCH", True))
+ d.getVar("HOST_ARCH"))
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
def siteinfo_get_files(d, aclocalcache = False):
sitedata = siteinfo_data(d)
sitefiles = ""
- for path in d.getVar("BBPATH", True).split(":"):
+ for path in d.getVar("BBPATH").split(":"):
for element in sitedata:
filename = os.path.join(path, "site", element)
if os.path.exists(filename):
@@ -177,7 +179,7 @@ def siteinfo_get_files(d, aclocalcache = False):
# issues and the directory being created/removed whilst this code executes. This can happen
# when a multilib recipe is parsed along with its base variant which may be running at the time
# causing rare but nasty failures
- path_siteconfig = d.getVar('ACLOCALDIR', True)
+ path_siteconfig = d.getVar('ACLOCALDIR')
if path_siteconfig and os.path.isdir(path_siteconfig):
for i in os.listdir(path_siteconfig):
if not i.endswith("_config"):
diff --git a/import-layers/yocto-poky/meta/classes/spdx.bbclass b/import-layers/yocto-poky/meta/classes/spdx.bbclass
index 89394d3a9..c5f544d2a 100644
--- a/import-layers/yocto-poky/meta/classes/spdx.bbclass
+++ b/import-layers/yocto-poky/meta/classes/spdx.bbclass
@@ -26,20 +26,20 @@ python do_spdx () {
import json, shutil
info = {}
- info['workdir'] = d.getVar('WORKDIR', True)
- info['sourcedir'] = d.getVar('SPDX_S', True)
- info['pn'] = d.getVar('PN', True)
- info['pv'] = d.getVar('PV', True)
- info['spdx_version'] = d.getVar('SPDX_VERSION', True)
- info['data_license'] = d.getVar('DATA_LICENSE', True)
-
- sstatedir = d.getVar('SPDXSSTATEDIR', True)
+ info['workdir'] = d.getVar('WORKDIR')
+ info['sourcedir'] = d.getVar('SPDX_S')
+ info['pn'] = d.getVar('PN')
+ info['pv'] = d.getVar('PV')
+ info['spdx_version'] = d.getVar('SPDX_VERSION')
+ info['data_license'] = d.getVar('DATA_LICENSE')
+
+ sstatedir = d.getVar('SPDXSSTATEDIR')
sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
- manifest_dir = d.getVar('SPDX_MANIFEST_DIR', True)
+ manifest_dir = d.getVar('SPDX_MANIFEST_DIR')
info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
- info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR', True)
+ info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR')
info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
# Make sure important dirs exist
@@ -74,9 +74,9 @@ python do_spdx () {
foss_license_info = cached_spdx['Licenses']
else:
## setup fossology command
- foss_server = d.getVar('FOSS_SERVER', True)
- foss_flags = d.getVar('FOSS_WGET_FLAGS', True)
- foss_full_spdx = d.getVar('FOSS_FULL_SPDX', True) == "true" or False
+ foss_server = d.getVar('FOSS_SERVER')
+ foss_flags = d.getVar('FOSS_WGET_FLAGS')
+ foss_full_spdx = d.getVar('FOSS_FULL_SPDX') == "true" or False
foss_command = "wget %s --post-file=%s %s"\
% (foss_flags, info['tar_file'], foss_server)
diff --git a/import-layers/yocto-poky/meta/classes/sstate.bbclass b/import-layers/yocto-poky/meta/classes/sstate.bbclass
index 5b92c5485..0a12935be 100644
--- a/import-layers/yocto-poky/meta/classes/sstate.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sstate.bbclass
@@ -11,7 +11,7 @@ def generate_sstatefn(spec, hash, d):
SSTATE_PKGARCH = "${PACKAGE_ARCH}"
SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_TASKHASH'), d)}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
@@ -25,14 +25,15 @@ PV[vardepvalue] = "${PV}"
SSTATE_EXTRAPATH[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
+SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/noarch/"
# Avoid docbook/sgml catalog warnings for now
SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
# Archive the sources for many architectures in one deploy folder
SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
-SSTATE_SCAN_FILES ?= "*.la *-config *_config"
-SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
+SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
+SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
+SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
@@ -53,7 +54,7 @@ SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
SSTATEPOSTINSTFUNCS = ""
-EXTRA_STAGING_FIXMES ?= ""
+EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
SSTATECLEANFUNCS = ""
# Check whether sstate exists for tasks that support sstate and are in the
@@ -82,7 +83,7 @@ python () {
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
- elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
+ elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
d.setVar('SSTATE_PKGARCH', "allarch")
else:
d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
@@ -92,13 +93,7 @@ python () {
d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
- # These classes encode staging paths into their scripts data so can only be
- # reused if we manipulate the paths
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
- scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
- d.setVar('SSTATE_SCAN_CMD', scan_cmd)
-
- unique_tasks = sorted(set((d.getVar('SSTATETASKS', True) or "").split()))
+ unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
d.setVar('SSTATETASKS', " ".join(unique_tasks))
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
@@ -116,19 +111,20 @@ def sstate_init(task, d):
def sstate_state_fromvars(d, task = None):
if task is None:
- task = d.getVar('BB_CURRENTTASK', True)
+ task = d.getVar('BB_CURRENTTASK')
if not task:
bb.fatal("sstate code running without task context?!")
task = task.replace("_setscene", "")
if task.startswith("do_"):
task = task[3:]
- inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
- outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
- plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
- lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
- lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
- interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
+ inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
+ outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
+ plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
+ lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
+ lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
+ interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
+ fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
if not task or len(inputs) != len(outputs):
bb.fatal("sstate variables not setup correctly?!")
@@ -144,6 +140,7 @@ def sstate_state_fromvars(d, task = None):
ss['lockfiles-shared'] = lockfilesshared
ss['plaindirs'] = plaindirs
ss['interceptfuncs'] = interceptfuncs
+ ss['fixmedir'] = fixmedir
return ss
def sstate_add(ss, source, dest, d):
@@ -193,15 +190,18 @@ def sstate_install(ss, d):
srcdir = os.path.join(walkroot, dir)
dstdir = srcdir.replace(state[1], state[2])
#bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
+ if os.path.islink(srcdir):
+ sharedfiles.append(dstdir)
+ continue
if not dstdir.endswith("/"):
dstdir = dstdir + "/"
shareddirs.append(dstdir)
# Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
+ whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
match = []
for f in sharedfiles:
- if os.path.exists(f):
+ if os.path.exists(f) and not os.path.islink(f):
f = os.path.normpath(f)
realmatch = True
for w in whitelist:
@@ -211,25 +211,27 @@ def sstate_install(ss, d):
break
if realmatch:
match.append(f)
- sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
+ sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
- if search_output != "":
- match.append("Matched in %s" % search_output.rstrip())
+ if search_output:
+ match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
+ else:
+ match.append(" (not matched to any task)")
if match:
bb.error("The recipe %s is trying to install files into a shared " \
"area when those files already exist. Those files and their manifest " \
- "location are:\n %s\nPlease verify which recipe should provide the " \
- "above files.\nThe build has stopped as continuing in this scenario WILL " \
- "break things, if not now, possibly in the future (we've seen builds fail " \
+ "location are:\n %s\nPlease verify which recipe should provide the " \
+ "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
+ "break things - if not now, possibly in the future (we've seen builds fail " \
"several months later). If the system knew how to recover from this " \
- "automatically it would however there are several different scenarios " \
+ "automatically it would, however there are several different scenarios " \
"which can result in this and we don't know which one this is. It may be " \
"you have switched providers of something like virtual/kernel (e.g. from " \
"linux-yocto to linux-yocto-dev), in that case you need to execute the " \
"clean task for both recipes and it will resolve this error. It may be " \
"you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
- "those recipes should again resolve this error however switching " \
- "DISTRO_FEATURES on an existing build directory is not supported, you " \
+ "those recipes should again resolve this error, however switching " \
+ "DISTRO_FEATURES on an existing build directory is not supported - you " \
"should really clean out tmp and rebuild (reusing sstate should be safe). " \
"It could be the overlapping files detected are harmless in which case " \
"adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
@@ -237,9 +239,13 @@ def sstate_install(ss, d):
"things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
"be to resolve the conflict. If in doubt, please ask on the mailing list, " \
"sharing the error and filelist above." % \
- (d.getVar('PN', True), "\n ".join(match)))
+ (d.getVar('PN'), "\n ".join(match)))
bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
+ if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
+ sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
+ sharedfiles.append(ss['fixmedir'] + "/fixmepath")
+
# Write out the manifest
f = open(manifest, "w")
for file in sharedfiles:
@@ -258,7 +264,7 @@ def sstate_install(ss, d):
i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
l = bb.utils.lockfile(i + ".lock")
- filedata = d.getVar("STAMP", True) + " " + d2.getVar("SSTATE_MANFILEPREFIX", True) + " " + d.getVar("WORKDIR", True) + "\n"
+ filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
manifests = []
if os.path.exists(i):
with open(i, "r") as f:
@@ -273,7 +279,7 @@ def sstate_install(ss, d):
if os.path.exists(state[1]):
oe.path.copyhardlinktree(state[1], state[2])
- for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
+ for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
# All hooks should run in the SSTATE_INSTDIR
bb.build.exec_func(postinst, d, (sstateinst,))
@@ -284,20 +290,11 @@ sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANF
sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
def sstate_installpkg(ss, d):
- import oe.path
- import subprocess
from oe.gpg_sign import get_signer
- def prepdir(dir):
- # remove dir if it exists, ensure any parent directories do exist
- if os.path.exists(dir):
- oe.path.remove(dir)
- bb.utils.mkdirhier(dir)
- oe.path.remove(dir)
-
sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
- sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
+ sstatefetch = d.getVar('SSTATE_PKGNAME') + '_' + ss['task'] + ".tgz"
+ sstatepkg = d.getVar('SSTATE_PKG') + '_' + ss['task'] + ".tgz"
if not os.path.exists(sstatepkg):
pstaging_fetch(sstatefetch, sstatepkg, d)
@@ -311,22 +308,52 @@ def sstate_installpkg(ss, d):
d.setVar('SSTATE_INSTDIR', sstateinst)
d.setVar('SSTATE_PKG', sstatepkg)
- if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
+ if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
signer = get_signer(d, 'local')
if not signer.verify(sstatepkg + '.sig'):
bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
- for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
+ # Empty sstateinst directory, ensure its clean
+ if os.path.exists(sstateinst):
+ oe.path.remove(sstateinst)
+ bb.utils.mkdirhier(sstateinst)
+
+ sstateinst = d.getVar("SSTATE_INSTDIR")
+ d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
+
+ for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
+ # All hooks should run in the SSTATE_INSTDIR
+ bb.build.exec_func(f, d, (sstateinst,))
+
+ return sstate_installpkgdir(ss, d)
+
+def sstate_installpkgdir(ss, d):
+ import oe.path
+ import subprocess
+
+ sstateinst = d.getVar("SSTATE_INSTDIR")
+ d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
+
+ for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
# All hooks should run in the SSTATE_INSTDIR
bb.build.exec_func(f, d, (sstateinst,))
+ def prepdir(dir):
+ # remove dir if it exists, ensure any parent directories do exist
+ if os.path.exists(dir):
+ oe.path.remove(dir)
+ bb.utils.mkdirhier(dir)
+ oe.path.remove(dir)
+
for state in ss['dirs']:
+ if d.getVar('SSTATE_SKIP_CREATION') == '1':
+ continue
prepdir(state[1])
os.rename(sstateinst + state[0], state[1])
sstate_install(ss, d)
for plain in ss['plaindirs']:
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
src = sstateinst + "/" + plain.replace(workdir, '')
dest = plain
bb.utils.mkdirhier(src)
@@ -342,28 +369,40 @@ python sstate_hardcode_path_unpack () {
# sstate_hardcode_path(d)
import subprocess
- sstateinst = d.getVar('SSTATE_INSTDIR', True)
- fixmefn = sstateinst + "fixmepath"
+ sstateinst = d.getVar('SSTATE_INSTDIR')
+ sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
+ fixmefn = sstateinst + "fixmepath"
if os.path.isfile(fixmefn):
- staging = d.getVar('STAGING_DIR', True)
- staging_target = d.getVar('STAGING_DIR_TARGET', True)
- staging_host = d.getVar('STAGING_DIR_HOST', True)
-
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
- elif bb.data.inherits_class('cross', d):
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
- else:
+ staging_target = d.getVar('RECIPE_SYSROOT')
+ staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
+ elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
+ else:
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
- extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
+ extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
for fixmevar in extra_staging_fixmes.split():
- fixme_path = d.getVar(fixmevar, True)
+ fixme_path = d.getVar(fixmevar)
sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
# Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
+ # Defer do_populate_sysroot relocation command
+ if sstatefixmedir:
+ bb.utils.mkdirhier(sstatefixmedir)
+ with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
+ sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
+ f.write(sstate_hardcode_cmd)
+ bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
+ return
+
bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
subprocess.call(sstate_hardcode_cmd, shell=True)
@@ -375,17 +414,17 @@ python sstate_hardcode_path_unpack () {
def sstate_clean_cachefile(ss, d):
import oe.path
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC') + "*_" + ss['task'] + ".tgz*"
bb.note("Removing %s" % sstatepkgfile)
oe.path.remove(sstatepkgfile)
def sstate_clean_cachefiles(d):
- for task in (d.getVar('SSTATETASKS', True) or "").split():
+ for task in (d.getVar('SSTATETASKS') or "").split():
ld = d.createCopy()
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d):
+def sstate_clean_manifest(manifest, d, prefix=None):
import oe.path
mfile = open(manifest)
@@ -394,6 +433,8 @@ def sstate_clean_manifest(manifest, d):
for entry in entries:
entry = entry.strip()
+ if prefix and not entry.startswith("/"):
+ entry = prefix + "/" + entry
bb.debug(2, "Removing manifest: %s" % entry)
# We can race against another package populating directories as we're removing them
# so we ignore errors here.
@@ -404,7 +445,7 @@ def sstate_clean_manifest(manifest, d):
elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
os.rmdir(entry[:-1])
else:
- oe.path.remove(entry)
+ os.remove(entry)
except OSError:
pass
@@ -422,8 +463,8 @@ def sstate_clean(ss, d):
import glob
d2 = d.createCopy()
- stamp_clean = d.getVar("STAMPCLEAN", True)
- extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
+ stamp_clean = d.getVar("STAMPCLEAN")
+ extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
@@ -465,7 +506,7 @@ def sstate_clean(ss, d):
oe.path.remove(stfile)
# Removes the users/groups created by the package
- for cleanfunc in (d.getVar('SSTATECLEANFUNCS', True) or '').split():
+ for cleanfunc in (d.getVar('SSTATECLEANFUNCS') or '').split():
bb.build.exec_func(cleanfunc, d)
sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
@@ -473,13 +514,13 @@ sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
CLEANFUNCS += "sstate_cleanall"
python sstate_cleanall() {
- bb.note("Removing shared state for package %s" % d.getVar('PN', True))
+ bb.note("Removing shared state for package %s" % d.getVar('PN'))
- manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
+ manifest_dir = d.getVar('SSTATE_MANIFESTS')
if not os.path.exists(manifest_dir):
return
- tasks = d.getVar('SSTATETASKS', True).split()
+ tasks = d.getVar('SSTATETASKS').split()
for name in tasks:
ld = d.createCopy()
shared_state = sstate_state_fromvars(ld, name)
@@ -495,29 +536,29 @@ python sstate_hardcode_path () {
# Note: the logic in this function needs to match the reverse logic
# in sstate_installpkg(ss, d)
- staging = d.getVar('STAGING_DIR', True)
- staging_target = d.getVar('STAGING_DIR_TARGET', True)
- staging_host = d.getVar('STAGING_DIR_HOST', True)
- sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
+ staging_target = d.getVar('RECIPE_SYSROOT')
+ staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
+ sstate_builddir = d.getVar('SSTATE_BUILDDIR')
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
- sstate_grep_cmd = "grep -l -e '%s'" % (staging)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
- elif bb.data.inherits_class('cross', d):
- sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
- else:
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
+ elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIRHOST:g'" % (staging_target, staging_host)
+ else:
+ sstate_grep_cmd = "grep -l -e '%s'" % (staging_target)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % (staging_target)
- extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
+ extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
for fixmevar in extra_staging_fixmes.split():
- fixme_path = d.getVar(fixmevar, True)
+ fixme_path = d.getVar(fixmevar)
sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
+ sstate_grep_cmd += " -e '%s'" % (fixme_path)
fixmefn = sstate_builddir + "fixmepath"
- sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
+ sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
sstate_filelist_cmd = "tee %s" % (fixmefn)
# fixmepath file needs relative paths, drop sstate_builddir prefix
@@ -532,96 +573,81 @@ python sstate_hardcode_path () {
sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
- subprocess.call(sstate_hardcode_cmd, shell=True)
+ subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
# If the fixmefn is empty, remove it..
if os.stat(fixmefn).st_size == 0:
os.remove(fixmefn)
else:
bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
- subprocess.call(sstate_filelist_relative_cmd, shell=True)
+ subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
}
def sstate_package(ss, d):
import oe.path
- def make_relative_symlink(path, outputpath, d):
- # Replace out absolute TMPDIR paths in symlinks with relative ones
- if not os.path.islink(path):
- return
- link = os.readlink(path)
- if not os.path.isabs(link):
- return
- if not link.startswith(tmpdir):
- return
-
- depth = outputpath.rpartition(tmpdir)[2].count('/')
- base = link.partition(tmpdir)[2].strip()
- while depth > 1:
- base = "/.." + base
- depth -= 1
- base = "." + base
-
- bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
- os.remove(path)
- os.symlink(base, path)
-
- tmpdir = d.getVar('TMPDIR', True)
+ tmpdir = d.getVar('TMPDIR')
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
+ sstatepkg = d.getVar('SSTATE_PKG') + '_'+ ss['task'] + ".tgz"
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
bb.utils.mkdirhier(os.path.dirname(sstatepkg))
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
- if d.getVar('SSTATE_SKIP_CREATION', True) == '1':
+ if d.getVar('SSTATE_SKIP_CREATION') == '1':
continue
srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
+ # Find and error for absolute symlinks. We could attempt to relocate but its not
+ # clear where the symlink is relative to in this context. We could add that markup
+ # to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
- for file in files:
+ for file in files + dirs:
srcpath = os.path.join(walkroot, file)
- dstpath = srcpath.replace(state[1], state[2])
- make_relative_symlink(srcpath, dstpath, d)
- for dir in dirs:
- srcpath = os.path.join(walkroot, dir)
- dstpath = srcpath.replace(state[1], state[2])
- make_relative_symlink(srcpath, dstpath, d)
+ if not os.path.islink(srcpath):
+ continue
+ link = os.readlink(srcpath)
+ if not os.path.isabs(link):
+ continue
+ if not link.startswith(tmpdir):
+ continue
+ bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
- oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
+ os.rename(state[1], sstatebuild + state[0])
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
for plain in ss['plaindirs']:
pdir = plain.replace(workdir, sstatebuild)
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
- oe.path.copyhardlinktree(plain, pdir)
+ os.rename(plain, pdir)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_PKG', sstatepkg)
- for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + \
+ for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
['sstate_create_package', 'sstate_sign_package'] + \
- (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
+ (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
# All hooks should run in SSTATE_BUILDDIR.
bb.build.exec_func(f, d, (sstatebuild,))
bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+ d.setVar('SSTATE_INSTDIR', sstatebuild)
+
return
def pstaging_fetch(sstatefetch, sstatepkg, d):
import bb.fetch2
# Only try and fetch if the user has configured a mirror
- mirrors = d.getVar('SSTATE_MIRRORS', True)
+ mirrors = d.getVar('SSTATE_MIRRORS')
if not mirrors:
return
# Copy the data object and override DL_DIR and SRC_URI
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
dldir = localdata.expand("${SSTATE_DIR}")
bb.utils.mkdirhier(dldir)
@@ -633,14 +659,14 @@ def pstaging_fetch(sstatefetch, sstatepkg, d):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
+ if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
localdata.delVar('BB_NO_NETWORK')
# Try a fetch from the sstate mirror, if it fails just return and
# we will build the package
uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
- if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
+ if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
for srcuri in uris:
@@ -667,14 +693,21 @@ sstate_task_prefunc[dirs] = "${WORKDIR}"
python sstate_task_postfunc () {
shared_state = sstate_state_fromvars(d)
- sstate_install(shared_state, d)
for intercept in shared_state['interceptfuncs']:
- bb.build.exec_func(intercept, d, (d.getVar("WORKDIR", True),))
+ bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
+
omask = os.umask(0o002)
if omask != 0o002:
bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
sstate_package(shared_state, d)
os.umask(omask)
+
+ sstateinst = d.getVar("SSTATE_INSTDIR")
+ d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
+
+ sstate_installpkgdir(shared_state, d)
+
+ bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
}
sstate_task_postfunc[dirs] = "${WORKDIR}"
@@ -699,21 +732,18 @@ sstate_create_package () {
fi
chmod 0664 $TFILE
mv -f $TFILE ${SSTATE_PKG}
-
- cd ${WORKDIR}
- rm -rf ${SSTATE_BUILDDIR}
}
python sstate_sign_package () {
from oe.gpg_sign import get_signer
- if d.getVar('SSTATE_SIG_KEY', True):
+ if d.getVar('SSTATE_SIG_KEY'):
signer = get_signer(d, 'local')
- sstate_pkg = d.getVar('SSTATE_PKG', True)
+ sstate_pkg = d.getVar('SSTATE_PKG')
if os.path.exists(sstate_pkg + '.sig'):
os.unlink(sstate_pkg + '.sig')
signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
- d.getVar('SSTATE_SIG_PASSPHRASE', True), armor=False)
+ d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
}
#
@@ -736,7 +766,6 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
ret = []
missed = []
- missing = []
extension = ".tgz"
if siginfo:
extension = extension + ".siginfo"
@@ -746,7 +775,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
splithashfn = sq_hashfn[task].split(" ")
spec = splithashfn[1]
if splithashfn[0] == "True":
- extrapath = d.getVar("NATIVELSBSTRING", True) + "/"
+ extrapath = d.getVar("NATIVELSBSTRING") + "/"
else:
extrapath = ""
@@ -758,18 +787,6 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
return spec, extrapath, tname
- def sstate_pkg_to_pn(pkg, d):
- """
- Translate an sstate filename to a PN value by way of SSTATE_PKGSPEC. This is slightly hacky but
- we don't have access to everything in this context.
- """
- pkgspec = d.getVar('SSTATE_PKGSPEC', False)
- try:
- idx = pkgspec.split(':').index('${PN}')
- except ValueError:
- bb.fatal('Unable to find ${PN} in SSTATE_PKGSPEC')
- return pkg.split(':')[idx]
-
for task in range(len(sq_fn)):
@@ -785,11 +802,10 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
missed.append(task)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
- mirrors = d.getVar("SSTATE_MIRRORS", True)
+ mirrors = d.getVar("SSTATE_MIRRORS")
if mirrors:
# Copy the data object and override DL_DIR and SRC_URI
localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
dldir = localdata.expand("${SSTATE_DIR}")
localdata.delVar('MIRRORS')
@@ -801,11 +817,9 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
+ if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
localdata.delVar('BB_NO_NETWORK')
- whitelist = bb.runqueue.get_setscene_enforce_whitelist(d)
-
from bb.fetch2 import FetchConnectionCache
def checkstatus_init(thread_worker):
thread_worker.connection_cache = FetchConnectionCache()
@@ -832,12 +846,6 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
except:
missed.append(task)
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- if whitelist:
- pn = sstate_pkg_to_pn(sstatefile, d)
- taskname = sq_task[task]
- if not bb.runqueue.check_setscene_enforce_whitelist(pn, taskname, whitelist):
- missing.append(task)
- bb.error('Sstate artifact unavailable for %s.%s' % (pn, taskname))
pass
bb.event.fire(bb.event.ProcessProgress("Checking sstate mirror object availability", len(tasklist) - thread_worker.tasks.qsize()), d)
@@ -865,10 +873,8 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
bb.event.disable_threadlock()
bb.event.fire(bb.event.ProcessFinished("Checking sstate mirror object availability"), d)
- if whitelist and missing:
- bb.fatal('Required artifacts were unavailable - exiting')
- inheritlist = d.getVar("INHERIT", True)
+ inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
for task in missed:
@@ -888,24 +894,31 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
BB_SETSCENE_DEPVALID = "setscene_depvalid"
-def setscene_depvalid(task, taskdependees, notneeded, d):
+def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
# task is included in taskdependees too
+ # Return - False - We need this dependency
+ # - True - We can skip this dependency
- bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
+ def logit(msg, log):
+ if log is not None:
+ log.append(msg)
+ else:
+ bb.debug(2, msg)
- def isNativeCross(x):
- return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
+ logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
- def isPostInstDep(x):
- if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native", "ca-certificates-native"]:
- return True
- return False
+ def isNativeCross(x):
+ return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
# We only need to trigger populate_lic through direct dependencies
if taskdependees[task][1] == "do_populate_lic":
return True
+ # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
+ if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
+ return True
+
# We only need to trigger packagedata through direct dependencies
# but need to preserve packagedata on packagedata links
if taskdependees[task][1] == "do_packagedata":
@@ -915,7 +928,7 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
return True
for dep in taskdependees:
- bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
+ logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
if task == dep:
continue
if dep in notneeded:
@@ -923,10 +936,11 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
# do_package_write_* and do_package doesn't need do_package
if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
continue
- # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
- if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
- return False
+ # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
+ return False
+ # do_package/packagedata/package_qa don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
continue
# Native/Cross packages don't exist and are noexec anyway
if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
@@ -968,7 +982,7 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
# Safe fallthrough default
- bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
+ logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
return False
return True
@@ -977,15 +991,15 @@ sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
python sstate_eventhandler() {
d = e.data
# When we write an sstate package we rewrite the SSTATE_PKG
- spkg = d.getVar('SSTATE_PKG', True)
+ spkg = d.getVar('SSTATE_PKG')
if not spkg.endswith(".tgz"):
- taskname = d.getVar("BB_RUNTASK", True)[3:]
- spec = d.getVar('SSTATE_PKGSPEC', True)
- swspec = d.getVar('SSTATE_SWSPEC', True)
+ taskname = d.getVar("BB_RUNTASK")[3:]
+ spec = d.getVar('SSTATE_PKGSPEC')
+ swspec = d.getVar('SSTATE_SWSPEC')
if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
d.setVar("SSTATE_EXTRAPATH", "")
- sstatepkg = d.getVar('SSTATE_PKG', True)
+ sstatepkg = d.getVar('SSTATE_PKG')
bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
}
@@ -1004,7 +1018,7 @@ python sstate_eventhandler2() {
stamps = e.stamps.values()
removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
seen = []
- for a in d.getVar("SSTATE_ARCHS", True).split():
+ for a in d.getVar("SSTATE_ARCHS").split():
toremove = []
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
diff --git a/import-layers/yocto-poky/meta/classes/staging.bbclass b/import-layers/yocto-poky/meta/classes/staging.bbclass
index bfabd06f3..984051d6a 100644
--- a/import-layers/yocto-poky/meta/classes/staging.bbclass
+++ b/import-layers/yocto-poky/meta/classes/staging.bbclass
@@ -31,6 +31,7 @@ SYSROOT_DIRS_BLACKLIST = " \
${datadir}/applications \
${datadir}/fonts \
${datadir}/pixmaps \
+ ${libdir}/${PN}/ptest \
"
sysroot_stage_dir() {
@@ -69,8 +70,8 @@ sysroot_stage_all() {
python sysroot_strip () {
import stat, errno
- dvar = d.getVar('SYSROOT_DESTDIR', True)
- pn = d.getVar('PN', True)
+ dvar = d.getVar('SYSROOT_DESTDIR')
+ pn = d.getVar('PN')
os.chdir(dvar)
@@ -103,9 +104,9 @@ python sysroot_strip () {
elffiles = {}
inodes = {}
- libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
- baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
- if (d.getVar('INHIBIT_SYSROOT_STRIP', True) != '1'):
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ if (d.getVar('INHIBIT_SYSROOT_STRIP') != '1'):
#
# First lets figure out all of the files we may have to process
#
@@ -136,7 +137,7 @@ python sysroot_strip () {
elf_file = isELF(file)
if elf_file & 1:
if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+ if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
else:
bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn))
@@ -154,7 +155,7 @@ python sysroot_strip () {
#
# Now strip them (in parallel)
#
- strip = d.getVar("STRIP", True)
+ strip = d.getVar("STRIP")
sfiles = []
for file in elffiles:
elf_file = int(elffiles[file])
@@ -172,52 +173,16 @@ addtask populate_sysroot after do_install
SYSROOT_PREPROCESS_FUNCS ?= ""
SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
-# We clean out any existing sstate from the sysroot if we rerun configure
-python sysroot_cleansstate () {
- ss = sstate_state_fromvars(d, "populate_sysroot")
- sstate_clean(ss, d)
-}
-do_configure[prefuncs] += "sysroot_cleansstate"
-
-
-BB_SETSCENE_VERIFY_FUNCTION2 = "sysroot_checkhashes2"
-
-def sysroot_checkhashes2(covered, tasknames, fns, d, invalidtasks):
- problems = set()
- configurefns = set()
- for tid in invalidtasks:
- if tasknames[tid] == "do_configure" and tid not in covered:
- configurefns.add(fns[tid])
- for tid in covered:
- if tasknames[tid] == "do_populate_sysroot" and fns[tid] in configurefns:
- problems.add(tid)
- return problems
-
-BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
-
-def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
- problems = set()
- configurefnids = set()
- if not invalidtasks:
- invalidtasks = range(len(tasknames))
- for task in invalidtasks:
- if tasknames[task] == "do_configure" and task not in covered:
- configurefnids.add(fnids[task])
- for task in covered:
- if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
- problems.add(task)
- return problems
-
python do_populate_sysroot () {
bb.build.exec_func("sysroot_stage_all", d)
bb.build.exec_func("sysroot_strip", d)
- for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
+ for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
bb.build.exec_func(f, d)
- pn = d.getVar("PN", True)
- multiprov = d.getVar("MULTI_PROVIDER_WHITELIST", True).split()
+ pn = d.getVar("PN")
+ multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
bb.utils.mkdirhier(provdir)
- for p in d.getVar("PROVIDES", True).split():
+ for p in d.getVar("PROVIDES").split():
if p in multiprov:
continue
p = p.replace("/", "_")
@@ -228,15 +193,483 @@ python do_populate_sysroot () {
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
+POPULATESYSROOTDEPS = ""
+POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
+POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
+do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
+
SSTATETASKS += "do_populate_sysroot"
do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
-do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
+do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
+do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
python do_populate_sysroot_setscene () {
sstate_setscene(d)
}
addtask do_populate_sysroot_setscene
+def staging_copyfile(c, target, dest, postinsts, seendirs):
+ import errno
+
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ if "/usr/bin/postinst-" in c:
+ postinsts.append(dest)
+ if os.path.islink(c):
+ linkto = os.readlink(c)
+ if os.path.lexists(dest):
+ if not os.path.islink(dest):
+ raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
+ if os.readlink(dest) == linkto:
+ return dest
+ raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
+ os.symlink(linkto, dest)
+ #bb.warn(c)
+ else:
+ try:
+ os.link(c, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(c, dest)
+ else:
+ raise
+ return dest
+
+def staging_copydir(c, target, dest, seendirs):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+
+def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
+ import subprocess
+
+ if not fixme:
+ return
+ cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
+ for fixmevar in ['COMPONENTS_DIR', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
+ fixme_path = d.getVar(fixmevar)
+ cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
+ bb.debug(2, cmd)
+ subprocess.check_output(cmd, shell=True)
+
+
+def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
+ import glob
+ import subprocess
+
+ fixme = []
+ postinsts = []
+ seendirs = set()
+ stagingdir = d.getVar("STAGING_DIR")
+ if native:
+ pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
+ targetdir = nativesysroot
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ targetdir = targetsysroot
+
+ bb.utils.mkdirhier(targetdir)
+ for pkgarch in pkgarchs:
+ for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
+ if manifest.endswith("-initial.populate_sysroot"):
+ # skip glibc-initial and libgcc-initial due to file overlap
+ continue
+ tmanifest = targetdir + "/" + os.path.basename(manifest)
+ if os.path.exists(tmanifest):
+ continue
+ try:
+ os.link(manifest, tmanifest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(manifest, tmanifest)
+ else:
+ raise
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ if l.endswith("/fixmepath"):
+ fixme.append(l)
+ continue
+ if l.endswith("/fixmepath.cmd"):
+ continue
+ dest = l.replace(stagingdir, "")
+ dest = targetdir + "/" + "/".join(dest.split("/")[3:])
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ try:
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+ except FileExistsError:
+ continue
+
+ staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
+ for p in postinsts:
+ subprocess.check_output(p, shell=True)
+
+#
+# Manifests here are complicated. The main sysroot area has the unpacked sstate
+# which us unrelocated and tracked by the main sstate manifests. Each recipe
+# specific sysroot has manifests for each dependency that is installed there.
+# The task hash is used to tell whether the data needs to be reinstalled. We
+# use a symlink to point to the currently installed hash. There is also a
+# "complete" stamp file which is used to mark if installation completed. If
+# something fails (e.g. a postinst), this won't get written and we would
+# remove and reinstall the dependency. This also means partially installed
+# dependencies should get cleaned up correctly.
+#
+
+python extend_recipe_sysroot() {
+ import copy
+ import subprocess
+ import errno
+ import collections
+ import glob
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ if mytaskname.endswith("_setscene"):
+ mytaskname = mytaskname.replace("_setscene", "")
+ workdir = d.getVar("WORKDIR")
+ #bb.warn(str(taskdepdata))
+ pn = d.getVar("PN")
+
+ stagingdir = d.getVar("STAGING_DIR")
+ sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ recipesysroot = d.getVar("RECIPE_SYSROOT")
+ recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
+ current_variant = d.getVar("BBEXTENDVARIANT")
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps:
+ lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
+ staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
+ staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
+ bb.utils.unlockfile(lock)
+ return
+
+ start = None
+ configuredeps = []
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to figure out which sysroot files we need to expose to this task.
+ # This needs to match what would get restored from sstate, which is controlled
+ # ultimately by calls from bitbake to setscene_depvalid().
+ # That function expects a setscene dependency tree. We build a dependency tree
+ # condensed to inter-sstate task dependencies, similar to that used by setscene
+ # tasks. We can then call into setscene_depvalid() and decide
+ # which dependencies we can "see" and should expose in the recipe specific sysroot.
+ setscenedeps = copy.deepcopy(taskdepdata)
+
+ start = set([start])
+
+ sstatetasks = d.getVar("SSTATETASKS").split()
+
+ def print_dep_tree(deptree):
+ data = ""
+ for dep in deptree:
+ deps = " " + "\n ".join(deptree[dep][3]) + "\n"
+ data = "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
+ return data
+
+ #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
+
+ #bb.note(" start2 is %s" % str(start))
+
+ # If start is an sstate task (like do_package) we need to add in its direct dependencies
+ # else the code below won't recurse into them.
+ for dep in set(start):
+ for dep2 in setscenedeps[dep][3]:
+ start.add(dep2)
+ start.remove(dep)
+
+ #bb.note(" start3 is %s" % str(start))
+
+ # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
+ for dep in taskdepdata:
+ data = setscenedeps[dep]
+ if data[1] not in sstatetasks:
+ for dep2 in setscenedeps:
+ data2 = setscenedeps[dep2]
+ if dep in data2[3]:
+ data2[3].update(setscenedeps[dep][3])
+ data2[3].remove(dep)
+ if dep in start:
+ start.update(setscenedeps[dep][3])
+ start.remove(dep)
+ del setscenedeps[dep]
+
+ # Remove circular references
+ for dep in setscenedeps:
+ if dep in setscenedeps[dep][3]:
+ setscenedeps[dep][3].remove(dep)
+
+ #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
+ #bb.note(" start is %s" % str(start))
+
+ # Direct dependencies should be present and can be depended upon
+ for dep in set(start):
+ if setscenedeps[dep][1] == "do_populate_sysroot":
+ if dep not in configuredeps:
+ configuredeps.append(dep)
+ bb.note("Direct dependencies are %s" % str(configuredeps))
+ #bb.note(" or %s" % str(start))
+
+ msgbuf = []
+ # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
+ # for ones that would be restored from sstate.
+ done = list(start)
+ next = list(start)
+ while next:
+ new = []
+ for dep in next:
+ data = setscenedeps[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ taskdeps = {}
+ taskdeps[dep] = setscenedeps[dep][:2]
+ taskdeps[datadep] = setscenedeps[datadep][:2]
+ retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
+ if retval:
+ msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
+ continue
+ done.append(datadep)
+ new.append(datadep)
+ if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
+ configuredeps.append(datadep)
+ msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
+ else:
+ msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
+ next = new
+
+ bb.note("\n".join(msgbuf))
+
+ depdir = recipesysrootnative + "/installeddeps"
+ bb.utils.mkdirhier(depdir)
+ bb.utils.mkdirhier(sharedmanifests)
+
+ lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
+
+ fixme = {}
+ fixme[''] = []
+ fixme['native'] = []
+ seendirs = set()
+ postinsts = []
+ multilibs = {}
+ manifests = {}
+
+ for f in os.listdir(depdir):
+ if not f.endswith(".complete"):
+ continue
+ f = depdir + "/" + f
+ if os.path.islink(f) and not os.path.exists(f):
+ bb.note("%s no longer exists, removing from sysroot" % f)
+ lnk = os.readlink(f.replace(".complete", ""))
+ sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ os.unlink(f)
+ os.unlink(f.replace(".complete", ""))
+
+ installed = []
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
+ bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
+ continue
+ installed.append(c)
+
+ # We want to remove anything which this task previously installed but is no longer a dependency
+ taskindex = depdir + "/" + "index." + mytaskname
+ if os.path.exists(taskindex):
+ potential = []
+ with open(taskindex, "r") as f:
+ for l in f:
+ l = l.strip()
+ if l not in installed:
+ fl = depdir + "/" + l
+ if not os.path.exists(fl):
+ # Was likely already uninstalled
+ continue
+ potential.append(l)
+ # We need to ensure not other task needs this dependency. We hold the sysroot
+ # lock so we ca search the indexes to check
+ if potential:
+ for i in glob.glob(depdir + "/index.*"):
+ if i.endswith("." + mytaskname):
+ continue
+ with open(i, "r") as f:
+ for l in f:
+ l = l.strip()
+ if l in potential:
+ potential.remove(l)
+ for l in potential:
+ fl = depdir + "/" + l
+ bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
+ lnk = os.readlink(fl)
+ sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ os.unlink(fl)
+ os.unlink(fl + ".complete")
+
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if c not in installed:
+ continue
+ taskhash = setscenedeps[dep][5]
+ taskmanifest = depdir + "/" + c + "." + taskhash
+
+ if os.path.exists(depdir + "/" + c):
+ lnk = os.readlink(depdir + "/" + c)
+ if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
+ bb.note("%s exists in sysroot, skipping" % c)
+ continue
+ else:
+ bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
+ sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ os.unlink(depdir + "/" + c)
+ if os.path.lexists(depdir + "/" + c + ".complete"):
+ os.unlink(depdir + "/" + c + ".complete")
+ elif os.path.lexists(depdir + "/" + c):
+ os.unlink(depdir + "/" + c)
+
+ os.symlink(c + "." + taskhash, depdir + "/" + c)
+
+ d2 = d
+ destsysroot = recipesysroot
+ variant = ''
+ if setscenedeps[dep][2].startswith("virtual:multilib"):
+ variant = setscenedeps[dep][2].split(":")[2]
+ if variant != current_variant:
+ if variant not in multilibs:
+ multilibs[variant] = get_multilib_datastore(variant, d)
+ d2 = multilibs[variant]
+ destsysroot = d2.getVar("RECIPE_SYSROOT")
+
+ native = False
+ if c.endswith("-native"):
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
+ native = True
+ elif c.startswith("nativesdk-"):
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
+ elif "-cross-" in c:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}_${TARGET_ARCH}-%s.populate_sysroot" % c)
+ native = True
+ elif "-crosssdk" in c:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
+ native = True
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.populate_sysroot" % (pkgarch, c))
+ if os.path.exists(manifest):
+ break
+ if not os.path.exists(manifest):
+ bb.warn("Manifest %s not found?" % manifest)
+ else:
+ newmanifest = collections.OrderedDict()
+ if native:
+ fm = fixme['native']
+ targetdir = recipesysrootnative
+ else:
+ fm = fixme['']
+ targetdir = destsysroot
+ with open(manifest, "r") as f:
+ manifests[dep] = manifest
+ for l in f:
+ l = l.strip()
+ if l.endswith("/fixmepath"):
+ fm.append(l)
+ continue
+ if l.endswith("/fixmepath.cmd"):
+ continue
+ dest = l.replace(stagingdir, "")
+ dest = targetdir + "/" + "/".join(dest.split("/")[3:])
+ newmanifest[l] = dest
+ # Having multiple identical manifests in each sysroot eats diskspace so
+ # create a shared pool of them and hardlink if we can.
+ # We create the manifest in advance so that if something fails during installation,
+ # or the build is interrupted, subsequent exeuction can cleanup.
+ sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
+ if not os.path.exists(sharedm):
+ smlock = bb.utils.lockfile(sharedm + ".lock")
+ # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
+ # but python can lose file handles so we need to do this under a lock.
+ if not os.path.exists(sharedm):
+ with open(sharedm, 'w') as m:
+ for l in newmanifest:
+ dest = newmanifest[l]
+ m.write(dest.replace(workdir + "/", "") + "\n")
+ bb.utils.unlockfile(smlock)
+ try:
+ os.link(sharedm, taskmanifest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(sharedm, taskmanifest)
+ else:
+ raise
+ # Finally actually install the files
+ for l in newmanifest:
+ dest = newmanifest[l]
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ for f in fixme:
+ if f == '':
+ staging_processfixme(fixme[f], recipesysroot, recipesysroot, recipesysrootnative, d)
+ elif f == 'native':
+ staging_processfixme(fixme[f], recipesysrootnative, recipesysroot, recipesysrootnative, d)
+ else:
+ staging_processfixme(fixme[f], multilibs[f].getVar("RECIPE_SYSROOT"), recipesysroot, recipesysrootnative, d)
+
+ for p in postinsts:
+ subprocess.check_output(p, shell=True)
+
+ for dep in manifests:
+ c = setscenedeps[dep][0]
+ os.symlink(manifests[dep], depdir + "/" + c + ".complete")
+
+ with open(taskindex, "w") as f:
+ for l in sorted(installed):
+ f.write(l + "\n")
+
+ bb.utils.unlockfile(lock)
+}
+extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+
+python do_prepare_recipe_sysroot () {
+ bb.build.exec_func("extend_recipe_sysroot", d)
+}
+addtask do_prepare_recipe_sysroot before do_configure after do_fetch
+
+# Clean out the recipe specific sysroots before do_fetch
+# (use a prefunc so we can order before extend_recipe_sysroot if it gets added)
+python clean_recipe_sysroot() {
+ return
+}
+clean_recipe_sysroot[cleandirs] += "${RECIPE_SYSROOT} ${RECIPE_SYSROOT_NATIVE}"
+do_fetch[prefuncs] += "clean_recipe_sysroot"
+
+python staging_taskhandler() {
+ bbtasks = e.tasklist
+ for task in bbtasks:
+ deps = d.getVarFlag(task, "depends")
+ if deps and "populate_sysroot" in deps:
+ d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
+}
+staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
+addhandler staging_taskhandler
diff --git a/import-layers/yocto-poky/meta/classes/syslinux.bbclass b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
index 7778fd708..d6f882420 100644
--- a/import-layers/yocto-poky/meta/classes/syslinux.bbclass
+++ b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
@@ -84,12 +84,12 @@ python build_syslinux_cfg () {
import copy
import sys
- workdir = d.getVar('WORKDIR', True)
+ workdir = d.getVar('WORKDIR')
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- labels = d.getVar('LABELS', True)
+ labels = d.getVar('LABELS')
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
@@ -98,7 +98,7 @@ python build_syslinux_cfg () {
bb.debug(1, "No labels, nothing to do")
return
- cfile = d.getVar('SYSLINUX_CFG', True)
+ cfile = d.getVar('SYSLINUX_CFG')
if not cfile:
bb.fatal('Unable to read SYSLINUX_CFG')
@@ -109,39 +109,39 @@ python build_syslinux_cfg () {
cfgfile.write('# Automatically created by OE\n')
- opts = d.getVar('SYSLINUX_OPTS', True)
+ opts = d.getVar('SYSLINUX_OPTS')
if opts:
for opt in opts.split(';'):
cfgfile.write('%s\n' % opt)
- allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS', True)
+ allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS')
if allowoptions:
cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
else:
cfgfile.write('ALLOWOPTIONS 1\n')
- syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
- syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
- syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
+ syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE')
+ syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY')
+ syslinux_serial = d.getVar('SYSLINUX_SERIAL')
if syslinux_serial:
cfgfile.write('SERIAL %s\n' % syslinux_serial)
- menu = (d.getVar('AUTO_SYSLINUXMENU', True) == "1")
+ menu = (d.getVar('AUTO_SYSLINUXMENU') == "1")
if menu and syslinux_serial:
cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
else:
cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
- timeout = d.getVar('SYSLINUX_TIMEOUT', True)
+ timeout = d.getVar('SYSLINUX_TIMEOUT')
if timeout:
cfgfile.write('TIMEOUT %s\n' % timeout)
else:
cfgfile.write('TIMEOUT 50\n')
- prompt = d.getVar('SYSLINUX_PROMPT', True)
+ prompt = d.getVar('SYSLINUX_PROMPT')
if prompt:
cfgfile.write('PROMPT %s\n' % prompt)
else:
@@ -151,38 +151,37 @@ python build_syslinux_cfg () {
cfgfile.write('ui vesamenu.c32\n')
cfgfile.write('menu title Select kernel options and boot kernel\n')
cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
- splash = d.getVar('SYSLINUX_SPLASH', True)
+ splash = d.getVar('SYSLINUX_SPLASH')
if splash:
cfgfile.write('menu background splash.lss\n')
for label in labels.split():
localdata = bb.data.createCopy(d)
- overrides = localdata.getVar('OVERRIDES', True)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
bb.fatal('OVERRIDES not defined')
localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
btypes = [ [ "", syslinux_default_console ] ]
if menu and syslinux_serial:
btypes = [ [ "Graphics console ", syslinux_default_console ],
[ "Serial console ", syslinux_serial_tty ] ]
- root= d.getVar('SYSLINUX_ROOT', True)
+ root= d.getVar('SYSLINUX_ROOT')
if not root:
bb.fatal('SYSLINUX_ROOT not defined')
for btype in btypes:
cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
- exargs = d.getVar('SYSLINUX_KERNEL_ARGS', True)
+ exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
if exargs:
btype[1] += " " + exargs
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
append = root + " " + append
cfgfile.write('APPEND ')
diff --git a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
index 05244c7e5..959775992 100644
--- a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
@@ -4,9 +4,7 @@
# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
# The original standalone gummiboot project is dead without any more
-# maintenance. As a start point, we replace all gummitboot occurrences
-# with systemd-boot in gummiboot.bbclass to have a base version of this
-# systemd-boot.bbclass.
+# maintenance.
#
# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
# (images built by image-live.bbclass or image-vm.bbclass)
@@ -39,6 +37,8 @@ efi_populate() {
install -d ${DEST}/loader
install -d ${DEST}/loader/entries
install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
for i in ${SYSTEMD_BOOT_ENTRIES}; do
install -m 0644 ${i} ${DEST}/loader/entries
@@ -50,6 +50,7 @@ efi_iso_populate() {
efi_populate $iso_dir
mkdir -p ${EFIIMGDIR}/${EFIDIR}
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp -r $iso_dir/loader ${EFIIMGDIR}
cp $iso_dir/vmlinuz ${EFIIMGDIR}
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
@@ -63,8 +64,8 @@ efi_hddimg_populate() {
}
python build_efi_cfg() {
- s = d.getVar("S", True)
- labels = d.getVar('LABELS', True)
+ s = d.getVar("S")
+ labels = d.getVar('LABELS')
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
@@ -73,7 +74,10 @@ python build_efi_cfg() {
bb.debug(1, "No labels, nothing to do")
return
- cfile = d.getVar('SYSTEMD_BOOT_CFG', True)
+ cfile = d.getVar('SYSTEMD_BOOT_CFG')
+ cdir = os.path.dirname(cfile)
+ if not os.path.exists(cdir):
+ os.makedirs(cdir)
try:
cfgfile = open(cfile, 'w')
except OSError:
@@ -81,7 +85,7 @@ python build_efi_cfg() {
cfgfile.write('# Automatically created by OE\n')
cfgfile.write('default %s\n' % (labels.split()[0]))
- timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT', True)
+ timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
if timeout:
cfgfile.write('timeout %s\n' % timeout)
else:
@@ -91,7 +95,7 @@ python build_efi_cfg() {
for label in labels.split():
localdata = d.createCopy()
- overrides = localdata.getVar('OVERRIDES', True)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
bb.fatal('OVERRIDES not defined')
@@ -102,13 +106,12 @@ python build_efi_cfg() {
except OSError:
bb.fatal('Unable to open %s' % entryfile)
localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
entrycfg.write('title %s\n' % label)
entrycfg.write('linux /vmlinuz\n')
- append = localdata.getVar('APPEND', True)
- initrd = localdata.getVar('INITRD', True)
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
if initrd:
entrycfg.write('initrd /initrd\n')
diff --git a/import-layers/yocto-poky/meta/classes/systemd.bbclass b/import-layers/yocto-poky/meta/classes/systemd.bbclass
index 4ea1f45e9..c4b4bb9b7 100644
--- a/import-layers/yocto-poky/meta/classes/systemd.bbclass
+++ b/import-layers/yocto-poky/meta/classes/systemd.bbclass
@@ -17,6 +17,7 @@ python __anonymous() {
# files.
if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
d.appendVar("DEPENDS", " systemd-systemctl-native")
+ d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native")
if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
}
@@ -29,6 +30,10 @@ if [ -n "$D" ]; then
fi
if type systemctl >/dev/null 2>/dev/null; then
+ if [ -z "$D" ]; then
+ systemctl daemon-reload
+ fi
+
systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
@@ -65,14 +70,14 @@ python systemd_populate_packages() {
return
def get_package_var(d, var, pkg):
- val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip()
+ val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
if val == "":
- val = (d.getVar(var, True) or "").strip()
+ val = (d.getVar(var) or "").strip()
return val
# Check if systemd-packages already included in PACKAGES
def systemd_check_package(pkg_systemd):
- packages = d.getVar('PACKAGES', True)
+ packages = d.getVar('PACKAGES')
if not pkg_systemd in packages.split():
bb.error('%s does not appear in package list, please add it' % pkg_systemd)
@@ -84,25 +89,24 @@ python systemd_populate_packages() {
# variable.
localdata = d.createCopy()
localdata.prependVar("OVERRIDES", pkg + ":")
- bb.data.update_data(localdata)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += localdata.getVar('systemd_postinst', True)
+ postinst += localdata.getVar('systemd_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += localdata.getVar('systemd_prerm', True)
+ prerm += localdata.getVar('systemd_prerm')
d.setVar('pkg_prerm_%s' % pkg, prerm)
# Add files to FILES_*-systemd if existent and not already done
def systemd_append_file(pkg_systemd, file_append):
appended = False
- if os.path.exists(oe.path.join(d.getVar("D", True), file_append)):
+ if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
var_name = "FILES_" + pkg_systemd
files = d.getVar(var_name, False) or ""
if file_append not in files.split():
@@ -114,7 +118,7 @@ python systemd_populate_packages() {
def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
# avoid infinite recursion
if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
- fullpath = oe.path.join(d.getVar("D", True), path, service)
+ fullpath = oe.path.join(d.getVar("D"), path, service)
if service.find('.service') != -1:
# for *.service add *@.service
service_base = service.replace('.service', '')
@@ -137,9 +141,9 @@ python systemd_populate_packages() {
# Check service-files and call systemd_add_files_and_parse for each entry
def systemd_check_services():
- searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),]
- searchpaths.append(d.getVar("systemd_system_unitdir", True))
- systemd_packages = d.getVar('SYSTEMD_PACKAGES', True)
+ searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
+ searchpaths.append(d.getVar("systemd_system_unitdir"))
+ systemd_packages = d.getVar('SYSTEMD_PACKAGES')
keys = 'Also'
# scan for all in SYSTEMD_SERVICE[]
@@ -154,11 +158,11 @@ python systemd_populate_packages() {
base = re.sub('@[^.]+.', '@.', service)
for path in searchpaths:
- if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
+ if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
path_found = path
break
elif base is not None:
- if os.path.exists(oe.path.join(d.getVar("D", True), path, base)):
+ if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
path_found = path
break
@@ -168,10 +172,10 @@ python systemd_populate_packages() {
bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
# Run all modifications once when creating package
- if os.path.exists(d.getVar("D", True)):
- for pkg in d.getVar('SYSTEMD_PACKAGES', True).split():
+ if os.path.exists(d.getVar("D")):
+ for pkg in d.getVar('SYSTEMD_PACKAGES').split():
systemd_check_package(pkg)
- if d.getVar('SYSTEMD_SERVICE_' + pkg, True):
+ if d.getVar('SYSTEMD_SERVICE_' + pkg):
systemd_generate_package_scripts(pkg)
systemd_check_services()
}
@@ -181,7 +185,7 @@ PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
python rm_systemd_unitdir (){
import shutil
if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
- systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True))
+ systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir'))
if os.path.exists(systemd_unitdir):
shutil.rmtree(systemd_unitdir)
systemd_libdir = os.path.dirname(systemd_unitdir)
@@ -192,12 +196,12 @@ do_install[postfuncs] += "rm_systemd_unitdir "
python rm_sysvinit_initddir (){
import shutil
- sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d"))
+ sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d"))
if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
os.path.exists(sysv_initddir):
- systemd_system_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_system_unitdir', True))
+ systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir'))
# If systemd_system_unitdir contains anything, delete sysv_initddir
if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
diff --git a/import-layers/yocto-poky/meta/classes/terminal.bbclass b/import-layers/yocto-poky/meta/classes/terminal.bbclass
index a94f755a4..a27e10c6e 100644
--- a/import-layers/yocto-poky/meta/classes/terminal.bbclass
+++ b/import-layers/yocto-poky/meta/classes/terminal.bbclass
@@ -3,7 +3,7 @@ OE_TERMINAL[type] = 'choice'
OE_TERMINAL[choices] = 'auto none \
${@oe_terminal_prioritized()}'
-OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
+OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE CACHED_CONFIGUREVARS CONFIGUREOPTS EXTRA_OECONF'
OE_TERMINAL_EXPORTS[type] = 'list'
XAUTHORITY ?= "${HOME}/.Xauthority"
@@ -19,9 +19,9 @@ def emit_terminal_func(command, envdata, d):
envdata.setVar(cmd_func, 'exec ' + command)
envdata.setVarFlag(cmd_func, 'func', '1')
- runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
+ runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
- runfile = os.path.join(d.getVar('T', True), runfile)
+ runfile = os.path.join(d.getVar('T'), runfile)
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
@@ -44,7 +44,7 @@ def oe_terminal(command, title, d):
envdata.setVarFlag(v, 'export', '1')
for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
- value = d.getVar(export, True)
+ value = d.getVar(export)
if value is not None:
os.environ[export] = str(value)
envdata.setVar(export, str(value))
@@ -60,12 +60,17 @@ def oe_terminal(command, title, d):
for key in origbbenv:
if key in envdata:
continue
- value = origbbenv.getVar(key, True)
+ value = origbbenv.getVar(key)
if value is not None:
os.environ[key] = str(value)
envdata.setVar(key, str(value))
envdata.setVarFlag(key, 'export', '1')
+ # Use original PATH as a fallback
+ path = d.getVar('PATH') + ":" + origbbenv.getVar('PATH')
+ os.environ['PATH'] = path
+ envdata.setVar('PATH', path)
+
# A complex PS1 might need more escaping of chars.
# Lets not export PS1 instead.
envdata.delVar("PS1")
@@ -88,8 +93,12 @@ def oe_terminal(command, title, d):
try:
oe.terminal.spawn_preferred(command, title, None, d)
- except oe.terminal.NoSupportedTerminals:
- bb.fatal('No valid terminal found, unable to open devshell')
+ except oe.terminal.NoSupportedTerminals as nosup:
+ nosup.terms.remove("false")
+ cmds = '\n\t'.join(nosup.terms).replace("{command}",
+ "do_terminal").replace("{title}", title)
+ bb.fatal('No valid terminal found, unable to open devshell.\n' +
+ 'Tried the following commands:\n\t%s' % cmds)
except oe.terminal.ExecutionError as exc:
bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
diff --git a/import-layers/yocto-poky/meta/classes/testexport.bbclass b/import-layers/yocto-poky/meta/classes/testexport.bbclass
index 514702082..56edda994 100644
--- a/import-layers/yocto-poky/meta/classes/testexport.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testexport.bbclass
@@ -33,162 +33,136 @@ TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-nativ
TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
-python do_testexport() {
- testexport_main(d)
-}
-
addtask testexport
do_testexport[nostamp] = "1"
do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}"
do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}"
-def exportTests(d,tc):
+python do_testexport() {
+ testexport_main(d)
+}
+
+def testexport_main(d):
import json
+ import logging
+
+ from oeqa.runtime.context import OERuntimeTestContext
+ from oeqa.runtime.context import OERuntimeTestContextExecutor
+
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+
+ tdname = "%s.testdata.json" % image_name
+ td = json.load(open(tdname, "r"))
+
+ logger = logging.getLogger("BitBake")
+
+ target = OERuntimeTestContextExecutor.getTarget(
+ d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
+ d.getVar("TEST_SERVER_IP"))
+
+ host_dumper = OERuntimeTestContextExecutor.getHostDumper(
+ d.getVar("testimage_dump_host"), d.getVar("TESTIMAGE_DUMP_DIR"))
+
+ image_manifest = "%s.manifest" % image_name
+ image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
+
+ extract_dir = d.getVar("TEST_EXTRACTED_DIR")
+
+ tc = OERuntimeTestContext(td, logger, target, host_dumper,
+ image_packages, extract_dir)
+
+ copy_needed_files(d, tc)
+
+def copy_needed_files(d, tc):
import shutil
- import pkgutil
- import re
import oe.path
- exportpath = d.getVar("TEST_EXPORT_DIR", True)
-
- savedata = {}
- savedata["d"] = {}
- savedata["target"] = {}
- savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
- savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
-
- keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
- and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func", True)]
- for key in keys:
- try:
- savedata["d"][key] = d.getVar(key, True)
- except bb.data_smart.ExpansionError:
- # we don't care about those anyway
- pass
-
- json_file = os.path.join(exportpath, "testdata.json")
- with open(json_file, "w") as f:
- json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
-
- # Replace absolute path with relative in the file
- exclude_path = os.path.join(d.getVar("COREBASE", True),'meta','lib','oeqa')
- f1 = open(json_file,'r').read()
- f2 = open(json_file,'w')
- m = f1.replace(exclude_path,'oeqa')
- f2.write(m)
- f2.close()
-
- # now start copying files
- # we'll basically copy everything under meta/lib/oeqa, with these exceptions
- # - oeqa/targetcontrol.py - not needed
- # - oeqa/selftest - something else
- # That means:
- # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
- # - the contents of oeqa/utils and oeqa/runtime/files
- # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
- # - __init__.py files
- bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
- bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
- # copy test modules, this should cover tests in other layers too
- bbpath = d.getVar("BBPATH", True).split(':')
- for t in tc.testslist:
- isfolder = False
- if re.search("\w+\.\w+\.test_\S+", t):
- t = '.'.join(t.split('.')[:3])
- mod = pkgutil.get_loader(t)
- # More depth than usual?
- if (t.count('.') > 2):
- for p in bbpath:
- foldername = os.path.join(p, 'lib', os.sep.join(t.split('.')).rsplit(os.sep, 1)[0])
- if os.path.isdir(foldername):
- isfolder = True
- target_folder = os.path.join(exportpath, "oeqa", "runtime", os.path.basename(foldername))
- if not os.path.exists(target_folder):
- oe.path.copytree(foldername, target_folder)
- if not isfolder:
- shutil.copy2(mod.path, os.path.join(exportpath, "oeqa/runtime"))
- json_file = "%s.json" % mod.path.rsplit(".", 1)[0]
- if os.path.isfile(json_file):
- shutil.copy2(json_file, os.path.join(exportpath, "oeqa/runtime"))
- # Get meta layer
- for layer in d.getVar("BBLAYERS", True).split():
- if os.path.basename(layer) == "meta":
- meta_layer = layer
- break
- # copy oeqa/oetest.py and oeqa/runexported.py
- oeqadir = os.path.join(meta_layer, "lib/oeqa")
- shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
- shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
- # copy oeqa/utils/*.py
- for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
- for f in files:
- if f.endswith(".py"):
- shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
- # copy oeqa/runtime/files/*
- for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
- for f in files:
- shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
+ from oeqa.utils.package_manager import _get_json_file
+ from oeqa.core.utils.test import getSuiteCasesFiles
+
+ export_path = d.getVar('TEST_EXPORT_DIR')
+ corebase_path = d.getVar('COREBASE')
+
+ # Clean everything before starting
+ oe.path.remove(export_path)
+ bb.utils.mkdirhier(os.path.join(export_path, 'lib', 'oeqa'))
+
+ # The source of files to copy are relative to 'COREBASE' directory
+ # The destination is relative to 'TEST_EXPORT_DIR'
+ # Because we are squashing the libraries, we need to remove
+ # the layer/script directory
+ files_to_copy = [ os.path.join('meta', 'lib', 'oeqa', 'core'),
+ os.path.join('meta', 'lib', 'oeqa', 'runtime'),
+ os.path.join('meta', 'lib', 'oeqa', 'files'),
+ os.path.join('meta', 'lib', 'oeqa', 'utils'),
+ os.path.join('scripts', 'oe-test'),
+ os.path.join('scripts', 'lib', 'argparse_oe.py'),
+ os.path.join('scripts', 'lib', 'scriptutils.py'), ]
+
+ for f in files_to_copy:
+ src = os.path.join(corebase_path, f)
+ dst = os.path.join(export_path, f.split('/', 1)[-1])
+ if os.path.isdir(src):
+ oe.path.copytree(src, dst)
+ else:
+ shutil.copy2(src, dst)
+
+ # Remove cases and just copy the ones specified
+ cases_path = os.path.join(export_path, 'lib', 'oeqa', 'runtime', 'cases')
+ oe.path.remove(cases_path)
+ bb.utils.mkdirhier(cases_path)
+ test_paths = get_runtime_paths(d)
+ test_modules = d.getVar('TEST_SUITES')
+ tc.loadTests(test_paths, modules=test_modules)
+ for f in getSuiteCasesFiles(tc.suites):
+ shutil.copy2(f, cases_path)
+ json_file = _get_json_file(f)
+ if json_file:
+ shutil.copy2(json_file, cases_path)
+
+ # Copy test data
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+ image_manifest = "%s.manifest" % image_name
+ tdname = "%s.testdata.json" % image_name
+ test_data_path = os.path.join(export_path, 'data')
+ bb.utils.mkdirhier(test_data_path)
+ shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest'))
+ shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json'))
# Create tar file for common parts of testexport
- create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR", True))
+ create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
# Copy packages needed for runtime testing
- test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR", True)
- if os.listdir(test_pkg_dir):
- export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True), "packages")
+ package_extraction(d, tc.suites)
+ test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR")
+ if os.path.isdir(test_pkg_dir) and os.listdir(test_pkg_dir):
+ export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
oe.path.copytree(test_pkg_dir, export_pkg_dir)
# Create tar file for packages needed by the DUT
- create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE", True), export_pkg_dir)
+ create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
# Copy SDK
- if d.getVar("TEST_EXPORT_SDK_ENABLED", True) == "1":
- sdk_deploy = d.getVar("SDK_DEPLOY", True)
- tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME", True)
+ if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
+ sdk_deploy = d.getVar("SDK_DEPLOY")
+ tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
tarball_path = os.path.join(sdk_deploy, tarball_name)
- export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True),
- d.getVar("TEST_EXPORT_SDK_DIR", True))
+ export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"),
+ d.getVar("TEST_EXPORT_SDK_DIR"))
bb.utils.mkdirhier(export_sdk_dir)
shutil.copy2(tarball_path, export_sdk_dir)
# Create tar file for the sdk
- create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH", True), export_sdk_dir)
-
- bb.plain("Exported tests to: %s" % exportpath)
+ create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
-def testexport_main(d):
- from oeqa.oetest import ExportTestContext
- from oeqa.targetcontrol import get_target_controller
- from oeqa.utils.dump import get_host_dumper
-
- test_create_extract_dirs(d)
- export_dir = d.getVar("TEST_EXPORT_DIR", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- bb.utils.remove(export_dir, recurse=True)
- bb.utils.mkdirhier(export_dir)
-
- # the robot dance
- target = get_target_controller(d)
-
- # test context
- tc = ExportTestContext(d, target)
-
- # this is a dummy load of tests
- # we are doing that to find compile errors in the tests themselves
- # before booting the image
- try:
- tc.loadTests()
- except Exception as e:
- import traceback
- bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
-
- tc.extract_packages()
- exportTests(d,tc)
+ bb.plain("Exported tests to: %s" % export_path)
def create_tarball(d, tar_name, src_dir):
import tarfile
- tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR", True), tar_name)
+ tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR"), tar_name)
current_dir = os.getcwd()
src_dir = src_dir.rstrip('/')
dir_name = os.path.dirname(src_dir)
@@ -200,7 +174,4 @@ def create_tarball(d, tar_name, src_dir):
tar.close()
os.chdir(current_dir)
-
-testexport_main[vardepsexclude] =+ "BB_ORIGENV"
-
inherit testimage
diff --git a/import-layers/yocto-poky/meta/classes/testimage.bbclass b/import-layers/yocto-poky/meta/classes/testimage.bbclass
index 6b6781d86..fb214604a 100644
--- a/import-layers/yocto-poky/meta/classes/testimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testimage.bbclass
@@ -35,9 +35,10 @@ TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
-RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'smart rpm', '', d)}"
+RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf rpm', '', d)}"
+SYSTEMDSUITE = "${@bb.utils.filter('DISTRO_FEATURES', 'systemd', d)}"
MINTESTSUITE = "ping"
-NETTESTSUITE = "${MINTESTSUITE} ssh df date scp syslog"
+NETTESTSUITE = "${MINTESTSUITE} ssh df date scp oe_syslog ${SYSTEMDSUITE}"
DEVTESTSUITE = "gcc kernelmodule ldd"
DEFAULT_TEST_SUITES = "${MINTESTSUITE} auto"
@@ -48,11 +49,11 @@ DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE}"
DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)}"
-DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} connman xorg perl python \
- ${DEVTESTSUITE} parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} buildcpio buildiptables buildgalculator \
+ connman ${DEVTESTSUITE} logrotate perl parselogs python ${RPMTESTSUITE} xorg"
DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcvs buildiptables buildgalculator \
- connman ${DEVTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcpio buildiptables buildgalculator \
+ connman ${DEVTESTSUITE} logrotate pam parselogs perl python ${RPMTESTSUITE}"
DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
# aarch64 has no graphics
@@ -60,7 +61,7 @@ DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
# mitigate this by removing build tests for qemumips machines.
-MIPSREMOVE ??= "buildcvs buildiptables buildgalculator"
+MIPSREMOVE ??= "buildcpio buildiptables buildgalculator"
DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
@@ -70,20 +71,22 @@ TEST_QEMUBOOT_TIMEOUT ?= "1000"
TEST_TARGET ?= "qemu"
TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot"
+TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python-smartpm-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot', '', d)}"
-
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
TESTIMAGELOCK_qemuall = ""
TESTIMAGE_DUMP_DIR ?= "/tmp/oe-saved-tests/"
+TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
+
testimage_dump_target () {
top -bn1
ps
@@ -112,6 +115,13 @@ testimage_dump_host () {
}
python do_testimage() {
+
+ testimage_sanity(d)
+
+ if (d.getVar('IMAGE_PKGTYPE') == 'rpm'
+ and 'dnf' in d.getVar('TEST_SUITES')):
+ create_rpm_index(d)
+
testimage_main(d)
}
@@ -120,72 +130,244 @@ do_testimage[nostamp] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
+def testimage_sanity(d):
+ if (d.getVar('TEST_TARGET') == 'simpleremote'
+ and (not d.getVar('TEST_TARGET_IP')
+ or not d.getVar('TEST_SERVER_IP'))):
+ bb.fatal('When TEST_TARGET is set to "simpleremote" '
+ 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.')
+
def testimage_main(d):
- import unittest
import os
- import oeqa.runtime
- import time
+ import json
import signal
- from oeqa.oetest import ImageTestContext
- from oeqa.targetcontrol import get_target_controller
- from oeqa.utils.dump import get_host_dumper
+ import logging
+
+ from bb.utils import export_proxies
+ from oeqa.core.utils.misc import updateTestData
+ from oeqa.runtime.context import OERuntimeTestContext
+ from oeqa.runtime.context import OERuntimeTestContextExecutor
+ from oeqa.core.target.qemu import supported_fstypes
+ from oeqa.core.utils.test import getSuiteCases
+ from oeqa.utils import make_logger_bitbake_compatible
+
+ def sigterm_exception(signum, stackframe):
+ """
+ Catch SIGTERM from worker in order to stop qemu.
+ """
+ raise RuntimeError
+
+ logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
+ pn = d.getVar("PN")
+
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
+
+ image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
+ d.getVar('IMAGE_LINK_NAME')))
+
+ tdname = "%s.testdata.json" % image_name
+ try:
+ td = json.load(open(tdname, "r"))
+ except (FileNotFoundError) as err:
+ bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+
+ # Some variables need to be updates (mostly paths) with the
+ # ones of the current environment because some tests require them.
+ updateTestData(d, td, d.getVar('TESTIMAGE_UPDATE_VARS').split())
+
+ image_manifest = "%s.manifest" % image_name
+ image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
+
+ extract_dir = d.getVar("TEST_EXTRACTED_DIR")
+
+ # Get machine
+ machine = d.getVar("MACHINE")
+
+ # Get rootfs
+ fstypes = [fs for fs in d.getVar('IMAGE_FSTYPES').split(' ')
+ if fs in supported_fstypes]
+ if not fstypes:
+ bb.fatal('Unsupported image type built. Add a comptible image to '
+ 'IMAGE_FSTYPES. Supported types: %s' %
+ ', '.join(supported_fstypes))
+ rootfs = '%s.%s' % (image_name, fstypes[0])
+
+ # Get tmpdir (not really used, just for compatibility)
+ tmpdir = d.getVar("TMPDIR")
+
+ # Get deploy_dir_image (not really used, just for compatibility)
+ dir_image = d.getVar("DEPLOY_DIR_IMAGE")
+
+ # Get bootlog
+ bootlog = os.path.join(d.getVar("TEST_LOG_DIR"),
+ 'qemu_boot_log.%s' % d.getVar('DATETIME'))
+
+ # Get display
+ display = d.getVar("BB_ORIGENV").getVar("DISPLAY")
- pn = d.getVar("PN", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- test_create_extract_dirs(d)
+ # Get kernel
+ kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine))
+ kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name)
+
+ # Get boottime
+ boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT"))
+
+ # Get use_kvm
+ qemu_use_kvm = d.getVar("QEMU_USE_KVM")
+ if qemu_use_kvm and \
+ (qemu_use_kvm == 'True' and 'x86' in machine or \
+ d.getVar('MACHINE') in qemu_use_kvm.split()):
+ kvm = True
+ else:
+ kvm = False
+
+ # TODO: We use the current implementatin of qemu runner because of
+ # time constrains, qemu runner really needs a refactor too.
+ target_kwargs = { 'machine' : machine,
+ 'rootfs' : rootfs,
+ 'tmpdir' : tmpdir,
+ 'dir_image' : dir_image,
+ 'display' : display,
+ 'kernel' : kernel,
+ 'boottime' : boottime,
+ 'bootlog' : bootlog,
+ 'kvm' : kvm,
+ }
+
+ # TODO: Currently BBPATH is needed for custom loading of targets.
+ # It would be better to find these modules using instrospection.
+ target_kwargs['target_modules_path'] = d.getVar('BBPATH')
+
+ # runtime use network for download projects for build
+ export_proxies(d)
# we need the host dumper in test context
- host_dumper = get_host_dumper(d)
+ host_dumper = OERuntimeTestContextExecutor.getHostDumper(
+ d.getVar("testimage_dump_host"),
+ d.getVar("TESTIMAGE_DUMP_DIR"))
# the robot dance
- target = get_target_controller(d)
+ target = OERuntimeTestContextExecutor.getTarget(
+ d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
+ d.getVar("TEST_SERVER_IP"), **target_kwargs)
# test context
- tc = ImageTestContext(d, target, host_dumper)
+ tc = OERuntimeTestContext(td, logger, target, host_dumper,
+ image_packages, extract_dir)
- # this is a dummy load of tests
- # we are doing that to find compile errors in the tests themselves
- # before booting the image
- try:
- tc.loadTests()
- except Exception as e:
- import traceback
- bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+ # Load tests before starting the target
+ test_paths = get_runtime_paths(d)
+ test_modules = d.getVar('TEST_SUITES')
+ tc.loadTests(test_paths, modules=test_modules)
- tc.extract_packages()
- target.deploy()
+ if not getSuiteCases(tc.suites):
+ bb.fatal('Empty test suite, please verify TEST_SUITES variable')
+
+ package_extraction(d, tc.suites)
+
+ bootparams = None
+ if d.getVar('VIRTUAL-RUNTIME_init_manager', '') == 'systemd':
+ # Add systemd.log_level=debug to enable systemd debug logging
+ bootparams = 'systemd.log_target=console'
+
+ results = None
+ orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
try:
- bootparams = None
- if d.getVar('VIRTUAL-RUNTIME_init_manager', '') == 'systemd':
- bootparams = 'systemd.log_level=debug systemd.log_target=console'
- target.start(extra_bootparams=bootparams)
- starttime = time.time()
- result = tc.runTests()
- stoptime = time.time()
- if result.wasSuccessful():
- bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
- msg = "%s - OK - All required tests passed" % pn
- skipped = len(result.skipped)
- if skipped:
- msg += " (skipped=%d)" % skipped
- bb.plain(msg)
+ # We need to check if runqemu ends unexpectedly
+ # or if the worker send us a SIGTERM
+ tc.target.start(extra_bootparams=bootparams)
+ results = tc.runTests()
+ except (RuntimeError, BlockingIOError) as err:
+ if isinstance(err, RuntimeError):
+ bb.error('testimage received SIGTERM, shutting down...')
else:
- bb.fatal("%s - FAILED - check the task log and the ssh log" % pn)
+ bb.error('runqemu failed, shutting down...')
+ if results:
+ results.stop()
+ results = None
finally:
- signal.signal(signal.SIGTERM, tc.origsigtermhandler)
- target.stop()
+ signal.signal(signal.SIGTERM, orig_sigterm_handler)
+ tc.target.stop()
+
+ # Show results (if we have them)
+ if not results:
+ bb.fatal('%s - FAILED - tests were interrupted during execution' % pn)
+ tc.logSummary(results, pn)
+ tc.logDetails()
+ if not results.wasSuccessful():
+ bb.fatal('%s - FAILED - check the task log and the ssh log' % pn)
+
+def get_runtime_paths(d):
+ """
+ Returns a list of paths where runtime test must reside.
+
+ Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/
+ """
+ paths = []
+
+ for layer in d.getVar('BBLAYERS').split():
+ path = os.path.join(layer, 'lib/oeqa/runtime/cases')
+ if os.path.isdir(path):
+ paths.append(path)
+ return paths
+
+def create_index(arg):
+ import subprocess
+
+ index_cmd = arg
+ try:
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ result = result.decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ return("Index creation command '%s' failed with return code "
+ '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8")))
+ if result:
+ bb.note(result)
+ return None
+
+def create_rpm_index(d):
+ # Index RPMs
+ rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
+ index_cmds = []
+ archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_')
+
+ for arch in archs.split():
+ rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch)
+ idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch)
+
+ if not os.path.isdir(rpm_dir):
+ continue
+
+ lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock')
+ lf = bb.utils.lockfile(lockfilename, False)
+ oe.path.copyhardlinktree(rpm_dir, idx_path)
+ # Full indexes overload a 256MB image so reduce the number of rpms
+ # in the feed. Filter to r* since we use the run-postinst packages and
+ # this leaves some allarch and machine arch packages too.
+ bb.utils.remove(idx_path + "*/[a-qs-z]*.rpm")
+ bb.utils.unlockfile(lf)
+ cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
+
+ # Create repodata
+ result = create_index(cmd)
+ if result:
+ bb.fatal('%s' % ('\n'.join(result)))
-def test_create_extract_dirs(d):
- install_path = d.getVar("TEST_INSTALL_TMP_DIR", True)
- package_path = d.getVar("TEST_PACKAGED_DIR", True)
- extracted_path = d.getVar("TEST_EXTRACTED_DIR", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- bb.utils.remove(package_path, recurse=True)
- bb.utils.mkdirhier(install_path)
- bb.utils.mkdirhier(package_path)
- bb.utils.mkdirhier(extracted_path)
+def package_extraction(d, test_suites):
+ from oeqa.utils.package_manager import find_packages_to_extract
+ from oeqa.utils.package_manager import extract_packages
+ bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True)
+ packages = find_packages_to_extract(test_suites)
+ if packages:
+ bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR"))
+ bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR"))
+ bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR"))
+ extract_packages(d, packages)
-testimage_main[vardepsexclude] =+ "BB_ORIGENV"
+testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
inherit testsdk
diff --git a/import-layers/yocto-poky/meta/classes/testsdk.bbclass b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
index 43342b1f2..6a201aa41 100644
--- a/import-layers/yocto-poky/meta/classes/testsdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
@@ -14,66 +14,72 @@
#
# where "<image-name>" is an image like core-image-sato.
-TEST_LOG_DIR ?= "${WORKDIR}/testimage"
-TESTSDKLOCK = "${TMPDIR}/testsdk.lock"
-
-def run_test_context(CTestContext, d, testdir, tcname, pn, *args):
- import glob
- import time
-
- targets = glob.glob(d.expand(testdir + "/tc/environment-setup-*"))
- for sdkenv in targets:
- bb.plain("Testing %s" % sdkenv)
- tc = CTestContext(d, testdir, sdkenv, tcname, args)
-
- # this is a dummy load of tests
- # we are doing that to find compile errors in the tests themselves
- # before booting the image
- try:
- tc.loadTests()
- except Exception as e:
- import traceback
- bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
-
- starttime = time.time()
- result = tc.runTests()
- stoptime = time.time()
- if result.wasSuccessful():
- bb.plain("%s SDK(%s):%s - Ran %d test%s in %.3fs" % (pn, os.path.basename(tcname), os.path.basename(sdkenv),result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
- msg = "%s - OK - All required tests passed" % pn
- skipped = len(result.skipped)
- if skipped:
- msg += " (skipped=%d)" % skipped
- bb.plain(msg)
- else:
- bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
-
def testsdk_main(d):
import os
- import oeqa.sdk
import subprocess
- from oeqa.oetest import SDKTestContext
+ import json
+ import logging
- pn = d.getVar("PN", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+ from bb.utils import export_proxies
+ from oeqa.core.runner import OEStreamLogger
+ from oeqa.sdk.context import OESDKTestContext, OESDKTestContextExecutor
+ from oeqa.utils import make_logger_bitbake_compatible
+
+ pn = d.getVar("PN")
+ logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
+
+ # sdk use network for download projects for build
+ export_proxies(d)
tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
if not os.path.exists(tcname):
- bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' .")
+ bb.fatal("The toolchain %s is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' ." % tcname)
+
+ tdname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.testdata.json")
+ test_data = json.load(open(tdname, "r"))
+
+ target_pkg_manifest = OESDKTestContextExecutor._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"))
+ host_pkg_manifest = OESDKTestContextExecutor._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"))
- sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
- bb.utils.remove(sdktestdir, True)
- bb.utils.mkdirhier(sdktestdir)
+ sdk_dir = d.expand("${WORKDIR}/testimage-sdk/")
+ bb.utils.remove(sdk_dir, True)
+ bb.utils.mkdirhier(sdk_dir)
try:
- subprocess.check_output("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
+ subprocess.check_output("cd %s; %s <<EOF\n./\nY\nEOF" % (sdk_dir, tcname), shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Couldn't install the SDK:\n%s" % e.output.decode("utf-8"))
- try:
- run_test_context(SDKTestContext, d, sdktestdir, tcname, pn)
- finally:
- bb.utils.remove(sdktestdir, True)
+ fail = False
+ sdk_envs = OESDKTestContextExecutor._get_sdk_environs(sdk_dir)
+ for s in sdk_envs:
+ sdk_env = sdk_envs[s]
+ bb.plain("SDK testing environment: %s" % s)
+ tc = OESDKTestContext(td=test_data, logger=logger, sdk_dir=sdk_dir,
+ sdk_env=sdk_env, target_pkg_manifest=target_pkg_manifest,
+ host_pkg_manifest=host_pkg_manifest)
+
+ try:
+ tc.loadTests(OESDKTestContextExecutor.default_cases)
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ result = tc.runTests()
+
+ component = "%s %s" % (pn, OESDKTestContextExecutor.name)
+ context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
+ tc.logSummary(result, component, context_msg)
+ tc.logDetails()
+
+ if not result.wasSuccessful():
+ fail = True
+
+ if fail:
+ bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
+
testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
python do_testsdk() {
@@ -81,46 +87,52 @@ python do_testsdk() {
}
addtask testsdk
do_testsdk[nostamp] = "1"
-do_testsdk[lockfiles] += "${TESTSDKLOCK}"
-
-TEST_LOG_SDKEXT_DIR ?= "${WORKDIR}/testsdkext"
-TESTSDKEXTLOCK = "${TMPDIR}/testsdkext.lock"
def testsdkext_main(d):
import os
- import oeqa.sdkext
+ import json
import subprocess
+ import logging
+
from bb.utils import export_proxies
- from oeqa.oetest import SDKTestContext, SDKExtTestContext
- from oeqa.utils import avoid_paths_in_environ
+ from oeqa.utils import avoid_paths_in_environ, make_logger_bitbake_compatible, subprocesstweak
+ from oeqa.sdkext.context import OESDKExtTestContext, OESDKExtTestContextExecutor
+ pn = d.getVar("PN")
+ logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
# extensible sdk use network
export_proxies(d)
+ subprocesstweak.errors_have_output()
+
# extensible sdk can be contaminated if native programs are
# in PATH, i.e. use perl-native instead of eSDK one.
- paths_to_avoid = [d.getVar('STAGING_DIR', True),
- d.getVar('BASE_WORKDIR', True)]
+ paths_to_avoid = [d.getVar('STAGING_DIR'),
+ d.getVar('BASE_WORKDIR')]
os.environ['PATH'] = avoid_paths_in_environ(paths_to_avoid)
- pn = d.getVar("PN", True)
- bb.utils.mkdirhier(d.getVar("TEST_LOG_SDKEXT_DIR", True))
-
tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.sh")
if not os.path.exists(tcname):
- bb.fatal("The toolchain ext is not built. Build it before running the" \
- " tests: 'bitbake <image> -c populate_sdk_ext' .")
+ bb.fatal("The toolchain ext %s is not built. Build it before running the" \
+ " tests: 'bitbake <image> -c populate_sdk_ext' ." % tcname)
- testdir = d.expand("${WORKDIR}/testsdkext/")
- bb.utils.remove(testdir, True)
- bb.utils.mkdirhier(testdir)
- sdkdir = os.path.join(testdir, 'tc')
+ tdname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.testdata.json")
+ test_data = json.load(open(tdname, "r"))
+
+ target_pkg_manifest = OESDKExtTestContextExecutor._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"))
+ host_pkg_manifest = OESDKExtTestContextExecutor._load_manifest(
+ d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"))
+
+ sdk_dir = d.expand("${WORKDIR}/testsdkext/")
+ bb.utils.remove(sdk_dir, True)
+ bb.utils.mkdirhier(sdk_dir)
try:
- subprocess.check_output("%s -y -d %s" % (tcname, sdkdir), shell=True)
+ subprocess.check_output("%s -y -d %s" % (tcname, sdk_dir), shell=True)
except subprocess.CalledProcessError as e:
msg = "Couldn't install the extensible SDK:\n%s" % e.output.decode("utf-8")
- logfn = os.path.join(sdkdir, 'preparing_build_system.log')
+ logfn = os.path.join(sdk_dir, 'preparing_build_system.log')
if os.path.exists(logfn):
msg += '\n\nContents of preparing_build_system.log:\n'
with open(logfn, 'r') as f:
@@ -128,19 +140,47 @@ def testsdkext_main(d):
msg += line
bb.fatal(msg)
- try:
- bb.plain("Running SDK Compatibility tests ...")
- run_test_context(SDKExtTestContext, d, testdir, tcname, pn, True)
- finally:
- pass
+ fail = False
+ sdk_envs = OESDKExtTestContextExecutor._get_sdk_environs(sdk_dir)
+ for s in sdk_envs:
+ bb.plain("Extensible SDK testing environment: %s" % s)
- try:
- bb.plain("Running Extensible SDK tests ...")
- run_test_context(SDKExtTestContext, d, testdir, tcname, pn)
- finally:
- pass
+ sdk_env = sdk_envs[s]
+
+ # Use our own SSTATE_DIR and DL_DIR so that updates to the eSDK come from our sstate cache
+ # and we don't spend hours downloading kernels for the kernel module test
+ # Abuse auto.conf since local.conf would be overwritten by the SDK
+ with open(os.path.join(sdk_dir, 'conf', 'auto.conf'), 'a+') as f:
+ f.write('SSTATE_MIRRORS += " \\n file://.* file://%s/PATH"\n' % test_data.get('SSTATE_DIR'))
+ f.write('SOURCE_MIRROR_URL = "file://%s"\n' % test_data.get('DL_DIR'))
+ f.write('INHERIT += "own-mirrors"')
+
+ # We need to do this in case we have a minimal SDK
+ subprocess.check_output(". %s > /dev/null; devtool sdk-install meta-extsdk-toolchain" % sdk_env, cwd=sdk_dir, shell=True)
- bb.utils.remove(testdir, True)
+ tc = OESDKExtTestContext(td=test_data, logger=logger, sdk_dir=sdk_dir,
+ sdk_env=sdk_env, target_pkg_manifest=target_pkg_manifest,
+ host_pkg_manifest=host_pkg_manifest)
+
+ try:
+ tc.loadTests(OESDKExtTestContextExecutor.default_cases)
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ result = tc.runTests()
+
+ component = "%s %s" % (pn, OESDKExtTestContextExecutor.name)
+ context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
+
+ tc.logSummary(result, component, context_msg)
+ tc.logDetails()
+
+ if not result.wasSuccessful():
+ fail = True
+
+ if fail:
+ bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
testsdkext_main[vardepsexclude] =+ "BB_ORIGENV"
@@ -149,4 +189,4 @@ python do_testsdkext() {
}
addtask testsdkext
do_testsdkext[nostamp] = "1"
-do_testsdkext[lockfiles] += "${TESTSDKEXTLOCK}"
+
diff --git a/import-layers/yocto-poky/meta/classes/texinfo.bbclass b/import-layers/yocto-poky/meta/classes/texinfo.bbclass
index 92efbccdd..6b0def0ea 100644
--- a/import-layers/yocto-poky/meta/classes/texinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/texinfo.bbclass
@@ -1,10 +1,10 @@
# This class is inherited by recipes whose upstream packages invoke the
# texinfo utilities at build-time. Native and cross recipes are made to use the
-# dummy scripts provided by texinfo-dummy-native, for improved performance.
-# Target architecture recipes use the genuine Texinfo utilities. By default,
+# dummy scripts provided by texinfo-dummy-native, for improved performance.
+# Target architecture recipes use the genuine Texinfo utilities. By default,
# they use the Texinfo utilities on the host system. If you want to use the
-# Texinfo recipe shipped with yoco, you can remove texinfo-native from
-# ASSUME_PROVIDED and makeinfo from SANITY_REQUIRED_UTILITIES.
+# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
+# makeinfo from SANITY_REQUIRED_UTILITIES.
TEXDEP = "texinfo-native"
TEXDEP_class-native = "texinfo-dummy-native"
@@ -13,3 +13,6 @@ DEPENDS_append = " ${TEXDEP}"
PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+# libtool-cross doesn't inherit cross
+TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
+PATH_prepend_pn-libtool-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
diff --git a/import-layers/yocto-poky/meta/classes/tinderclient.bbclass b/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
index 917b74d88..00f453cec 100644
--- a/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
+++ b/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
@@ -55,22 +55,22 @@ def tinder_format_http_post(d,status,log):
# the variables we will need to send on this form post
variables = {
- "tree" : d.getVar('TINDER_TREE', True),
- "machine_name" : d.getVar('TINDER_MACHINE', True),
+ "tree" : d.getVar('TINDER_TREE'),
+ "machine_name" : d.getVar('TINDER_MACHINE'),
"os" : os.uname()[0],
"os_version" : os.uname()[2],
"compiler" : "gcc",
- "clobber" : d.getVar('TINDER_CLOBBER', True) or "0",
- "srcdate" : d.getVar('SRCDATE', True),
- "PN" : d.getVar('PN', True),
- "PV" : d.getVar('PV', True),
- "PR" : d.getVar('PR', True),
- "FILE" : d.getVar('FILE', True) or "N/A",
- "TARGETARCH" : d.getVar('TARGET_ARCH', True),
- "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown",
- "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown",
- "MACHINE" : d.getVar('MACHINE', True) or "Unknown",
- "DISTRO" : d.getVar('DISTRO', True) or "Unknown",
+ "clobber" : d.getVar('TINDER_CLOBBER') or "0",
+ "srcdate" : d.getVar('SRCDATE'),
+ "PN" : d.getVar('PN'),
+ "PV" : d.getVar('PV'),
+ "PR" : d.getVar('PR'),
+ "FILE" : d.getVar('FILE') or "N/A",
+ "TARGETARCH" : d.getVar('TARGET_ARCH'),
+ "TARGETFPU" : d.getVar('TARGET_FPU') or "Unknown",
+ "TARGETOS" : d.getVar('TARGET_OS') or "Unknown",
+ "MACHINE" : d.getVar('MACHINE') or "Unknown",
+ "DISTRO" : d.getVar('DISTRO') or "Unknown",
"zecke-rocks" : "sure",
}
@@ -82,7 +82,7 @@ def tinder_format_http_post(d,status,log):
# we only need on build_status.pl but sending it
# always does not hurt
try:
- f = open(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
+ f = open(d.getVar('TMPDIR')+'/tinder-machine.id', 'r')
id = f.read()
variables['machine_id'] = id
except:
@@ -106,8 +106,8 @@ def tinder_build_start(d):
# get the body and type
content_type, body = tinder_format_http_post(d,None,None)
- server = d.getVar('TINDER_HOST', True )
- url = d.getVar('TINDER_URL', True )
+ server = d.getVar('TINDER_HOST')
+ url = d.getVar('TINDER_URL')
selector = url + "/xml/build_start.pl"
@@ -127,7 +127,7 @@ def tinder_build_start(d):
# now we will need to save the machine number
# we will override any previous numbers
- f = open(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
+ f = open(d.getVar('TMPDIR')+"/tinder-machine.id", 'w')
f.write(report)
@@ -137,8 +137,8 @@ def tinder_send_http(d, status, _log):
"""
# get the body and type
- server = d.getVar('TINDER_HOST', True)
- url = d.getVar('TINDER_URL', True)
+ server = d.getVar('TINDER_HOST')
+ url = d.getVar('TINDER_URL')
selector = url + "/xml/build_status.pl"
@@ -163,16 +163,16 @@ def tinder_print_info(d):
time = tinder_time_string()
ops = os.uname()[0]
version = os.uname()[2]
- url = d.getVar( 'TINDER_URL' , True )
- tree = d.getVar( 'TINDER_TREE', True )
- branch = d.getVar( 'TINDER_BRANCH', True )
- srcdate = d.getVar( 'SRCDATE', True )
- machine = d.getVar( 'MACHINE', True )
- distro = d.getVar( 'DISTRO', True )
- bbfiles = d.getVar( 'BBFILES', True )
- tarch = d.getVar( 'TARGET_ARCH', True )
- fpu = d.getVar( 'TARGET_FPU', True )
- oerev = d.getVar( 'OE_REVISION', True ) or "unknown"
+ url = d.getVar('TINDER_URL')
+ tree = d.getVar('TINDER_TREE')
+ branch = d.getVar('TINDER_BRANCH')
+ srcdate = d.getVar('SRCDATE')
+ machine = d.getVar('MACHINE')
+ distro = d.getVar('DISTRO')
+ bbfiles = d.getVar('BBFILES')
+ tarch = d.getVar('TARGET_ARCH')
+ fpu = d.getVar('TARGET_FPU')
+ oerev = d.getVar('OE_REVISION') or "unknown"
# there is a bug with tipple quoted strings
# i will work around but will fix the original
@@ -278,7 +278,7 @@ def tinder_do_tinder_report(event):
try:
# truncate the tinder log file
- f = open(event.data.getVar('TINDER_LOG', True), 'w')
+ f = open(event.data.getVar('TINDER_LOG'), 'w')
f.write("")
f.close()
except:
@@ -287,7 +287,7 @@ def tinder_do_tinder_report(event):
try:
# write a status to the file. This is needed for the -k option
# of BitBake
- g = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ g = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
g.write("")
g.close()
except IOError:
@@ -296,10 +296,10 @@ def tinder_do_tinder_report(event):
# Append the Task-Log (compile,configure...) to the log file
# we will send to the server
if name == "TaskSucceeded" or name == "TaskFailed":
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
+ log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T'), event.task))
if len(log_file) != 0:
- to_file = event.data.getVar('TINDER_LOG', True)
+ to_file = event.data.getVar('TINDER_LOG')
log += "".join(open(log_file[0], 'r').readlines())
# set the right 'HEADER'/Summary for the TinderBox
@@ -310,23 +310,23 @@ def tinder_do_tinder_report(event):
elif name == "TaskFailed":
log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
elif name == "PkgStarted":
- log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True)
+ log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF')
elif name == "PkgSucceeded":
- log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True)
+ log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF')
elif name == "PkgFailed":
- if not event.data.getVar('TINDER_AUTOBUILD', True) == "0":
+ if not event.data.getVar('TINDER_AUTOBUILD') == "0":
build.exec_task('do_clean', event.data)
- log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
+ log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF')
status = 200
# remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
h.write("200")
elif name == "BuildCompleted":
log += "Build Completed\n"
status = 100
# Check if we have a old status...
try:
- h = open(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
+ h = open(event.data.getVar('TMPDIR')+'/tinder-status', 'r')
status = int(h.read())
except:
pass
@@ -342,7 +342,7 @@ def tinder_do_tinder_report(event):
log += "Error:Was Runtime: %d\n" % event.isRuntime()
status = 200
# remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
h.write("200")
# now post the log
@@ -360,7 +360,7 @@ python tinderclient_eventhandler() {
if e.data is None or bb.event.getName(e) == "MsgNote":
return
- do_tinder_report = e.data.getVar('TINDER_REPORT', True)
+ do_tinder_report = e.data.getVar('TINDER_REPORT')
if do_tinder_report and do_tinder_report == "1":
tinder_do_tinder_report(e)
diff --git a/import-layers/yocto-poky/meta/classes/toaster.bbclass b/import-layers/yocto-poky/meta/classes/toaster.bbclass
index 4bddf34e9..6cef0b8f6 100644
--- a/import-layers/yocto-poky/meta/classes/toaster.bbclass
+++ b/import-layers/yocto-poky/meta/classes/toaster.bbclass
@@ -80,7 +80,7 @@ python toaster_layerinfo_dumpdata() {
return layer_info
- bblayers = e.data.getVar("BBLAYERS", True)
+ bblayers = e.data.getVar("BBLAYERS")
llayerinfo = {}
@@ -119,10 +119,10 @@ python toaster_package_dumpdata() {
"""
# No need to try and dumpdata if the recipe isn't generating packages
- if not d.getVar('PACKAGES', True):
+ if not d.getVar('PACKAGES'):
return
- pkgdatadir = d.getVar('PKGDESTWORK', True)
+ pkgdatadir = d.getVar('PKGDESTWORK')
lpkgdata = {}
datadir = os.path.join(pkgdatadir, 'runtime')
@@ -142,7 +142,7 @@ python toaster_artifact_dumpdata() {
"""
event_data = {
- "TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME", True)
+ "TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME")
}
bb.event.fire(bb.event.MetadataEvent("SDKArtifactInfo", event_data), d)
@@ -157,11 +157,11 @@ python toaster_collect_task_stats() {
import bb.utils
import os
- toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")
-
- if not e.data.getVar('BUILDSTATS_BASE', True):
+ if not e.data.getVar('BUILDSTATS_BASE'):
return # if we don't have buildstats, we cannot collect stats
+ toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE'), "toasterstatlist")
+
def stat_to_float(value):
return float(value.strip('% \n\r'))
@@ -246,7 +246,7 @@ python toaster_buildhistory_dump() {
import re
BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
- pkgdata_dir = e.data.getVar("PKGDATA_DIR", True)
+ pkgdata_dir = e.data.getVar("PKGDATA_DIR")
# scan the build targets for this build
@@ -265,28 +265,33 @@ python toaster_buildhistory_dump() {
with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
for line in fin:
line = line.rstrip(";")
- psize, px = line.split("\t")
- punit, pname = px.split(" ")
+ psize, punit, pname = line.split()
# this size is "installed-size" as it measures how much space it takes on disk
images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
with open("%s/depends.dot" % installed_img_path, "r") as fin:
- p = re.compile(r' -> ')
- dot = re.compile(r'.*style=dotted')
+ p = re.compile(r'\s*"(?P<name>[^"]+)"\s*->\s*"(?P<dep>[^"]+)"(?P<rec>.*?\[style=dotted\])?')
for line in fin:
- line = line.rstrip(';')
- linesplit = p.split(line)
- if len(linesplit) == 2:
- pname = linesplit[0].rstrip('"').strip('"')
- dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"')
- deptype = "depends"
- if dot.match(line):
- deptype = "recommends"
- if not pname in images[target]:
- images[target][pname] = {'size': 0, 'depends' : []}
- if not dependsname in images[target]:
- images[target][dependsname] = {'size': 0, 'depends' : []}
- images[target][pname]['depends'].append((dependsname, deptype))
+ m = p.match(line)
+ if not m:
+ continue
+ pname = m.group('name')
+ dependsname = m.group('dep')
+ deptype = 'recommends' if m.group('rec') else 'depends'
+
+ # If RPM is used for packaging, then there may be
+ # dependencies such as "/bin/sh", which will confuse
+ # _toaster_load_pkgdatafile() later on. While at it, ignore
+ # any dependencies that contain parentheses, e.g.,
+ # "libc.so.6(GLIBC_2.7)".
+ if dependsname.startswith('/') or '(' in dependsname:
+ continue
+
+ if not pname in images[target]:
+ images[target][pname] = {'size': 0, 'depends' : []}
+ if not dependsname in images[target]:
+ images[target][dependsname] = {'size': 0, 'depends' : []}
+ images[target][pname]['depends'].append((dependsname, deptype))
# files-in-image.txt is only generated if an image file is created,
# so the file entries ('syms', 'dirs', 'files') for a target will be
@@ -329,8 +334,18 @@ python toaster_artifacts() {
if e.taskname in ["do_deploy", "do_image_complete", "do_populate_sdk", "do_populate_sdk_ext"]:
d2 = d.createCopy()
d2.setVar('FILE', e.taskfile)
- d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
+ # Use 'stamp-extra-info' if present, else use workaround
+ # to determine 'SSTATE_MANMACH'
+ extrainf = d2.getVarFlag(e.taskname, 'stamp-extra-info')
+ if extrainf:
+ d2.setVar('SSTATE_MANMACH', extrainf)
+ else:
+ if "do_populate_sdk" == e.taskname:
+ d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}${SDKMACHINE}"))
+ else:
+ d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
manifest = oe.sstatesig.sstate_get_manifest_filename(e.taskname[3:], d2)[0]
+
if os.access(manifest, os.R_OK):
with open(manifest) as fmanifest:
artifacts = [fname.strip() for fname in fmanifest]
@@ -357,8 +372,9 @@ do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
do_package[postfuncs] += "toaster_package_dumpdata "
do_package[vardepsexclude] += "toaster_package_dumpdata "
-do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
-do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
+#do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
+#do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
+
+#do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
+#do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata "
-do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
-do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata " \ No newline at end of file
diff --git a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
index 0e11f2d7a..260ece967 100644
--- a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
+++ b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -31,7 +31,6 @@ toolchain_create_sdk_env_script () {
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
- echo "export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$CCACHE_PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
@@ -50,7 +49,6 @@ toolchain_create_tree_env_script () {
rm -f $script
touch $script
echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
- echo 'export CCACHE_PATH=${STAGING_DIR_NATIVE}/usr/bin:${CCACHE_PATH}' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
@@ -108,6 +106,7 @@ EOF
TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
+DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
#This function create a site config file
toolchain_create_sdk_siteconfig () {
@@ -139,9 +138,9 @@ toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTC
python __anonymous () {
import oe.classextend
deps = ""
- for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
+ for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split():
deps += " %s:do_populate_sysroot" % dep
- for variant in (d.getVar('MULTILIB_VARIANTS', True) or "").split():
+ for variant in (d.getVar('MULTILIB_VARIANTS') or "").split():
clsextend = oe.classextend.ClassExtender(variant, d)
newdep = clsextend.extend_name(dep)
deps += " %s:do_populate_sysroot" % newdep
diff --git a/import-layers/yocto-poky/meta/classes/typecheck.bbclass b/import-layers/yocto-poky/meta/classes/typecheck.bbclass
index 6bff7c713..72da93223 100644
--- a/import-layers/yocto-poky/meta/classes/typecheck.bbclass
+++ b/import-layers/yocto-poky/meta/classes/typecheck.bbclass
@@ -5,7 +5,7 @@
python check_types() {
import oe.types
for key in e.data.keys():
- if e.data.getVarFlag(key, "type", True):
+ if e.data.getVarFlag(key, "type"):
oe.data.typed_value(key, e.data)
}
addhandler check_types
diff --git a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
index 3f760f2fb..10013b7d4 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
@@ -14,19 +14,19 @@
UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
python () {
- ubootmachine = d.getVar("UBOOT_MACHINE", True)
+ ubootmachine = d.getVar("UBOOT_MACHINE")
ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
- ubootbinary = d.getVar('UBOOT_BINARY', True)
- ubootbinaries = d.getVar('UBOOT_BINARIES', True)
+ ubootbinary = d.getVar('UBOOT_BINARY')
+ ubootbinaries = d.getVar('UBOOT_BINARIES')
# The "doc" varflag is special, we don't want to see it here
ubootconfigflags.pop('doc', None)
if not ubootmachine and not ubootconfigflags:
- PN = d.getVar("PN", True)
- FILE = os.path.basename(d.getVar("FILE", True))
+ PN = d.getVar("PN")
+ FILE = os.path.basename(d.getVar("FILE"))
bb.debug(1, "To build %s, see %s for instructions on \
setting up your machine config" % (PN, FILE))
- raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True))
+ raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
if ubootmachine and ubootconfigflags:
raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
@@ -37,7 +37,7 @@ python () {
if not ubootconfigflags:
return
- ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
+ ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
if len(ubootconfig) > 0:
for config in ubootconfig:
for f, v in ubootconfigflags.items():
diff --git a/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
index df91386c0..8447a047e 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
@@ -12,10 +12,15 @@
# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
+# UBOOT_EXTLINUX_FDT - Device tree file.
# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
# concatenate and use as an initrd (optional).
# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
+# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
+# Measured in 1/10 of a second.
+# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
+# the timeout period
#
# If there's only one label system will boot automatically and menu won't be
# created. If you want to use more than one labels, e.g linux and alternate,
@@ -25,6 +30,9 @@
#
# UBOOT_EXTLINUX_LABELS ??= "default fallback"
#
+# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
+# UBOOT_EXTLINUX_TIMEOUT ??= "30"
+#
# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
#
@@ -34,6 +42,8 @@
# Results:
#
# menu title Select the boot mode
+# TIMEOUT 30
+# DEFAULT Linux Default
# LABEL Linux Default
# KERNEL ../zImage
# FDTDIR ../
@@ -50,6 +60,7 @@
# a console=...some_tty...
UBOOT_EXTLINUX_CONSOLE ??= "console=${console}"
UBOOT_EXTLINUX_LABELS ??= "linux"
+UBOOT_EXTLINUX_FDT ??= ""
UBOOT_EXTLINUX_FDTDIR ??= "../"
UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
@@ -58,23 +69,25 @@ UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
python create_extlinux_config() {
- if d.getVar("UBOOT_EXTLINUX", True) != "1":
+ if d.getVar("UBOOT_EXTLINUX") != "1":
return
- if not d.getVar('WORKDIR', True):
+ if not d.getVar('WORKDIR'):
bb.error("WORKDIR not defined, unable to package")
- labels = d.getVar('UBOOT_EXTLINUX_LABELS', True)
+ labels = d.getVar('UBOOT_EXTLINUX_LABELS')
if not labels:
bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
if not labels.strip():
bb.fatal("No labels, nothing to do")
- cfile = d.getVar('UBOOT_EXTLINUX_CONFIG', True)
+ cfile = d.getVar('UBOOT_EXTLINUX_CONFIG')
if not cfile:
bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
+ localdata = bb.data.createCopy(d)
+
try:
with open(cfile, 'w') as cfgfile:
cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
@@ -82,37 +95,50 @@ python create_extlinux_config() {
if len(labels.split()) > 1:
cfgfile.write('menu title Select the boot mode\n')
+ timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
+ if timeout:
+ cfgfile.write('TIMEOUT %s\n' % (timeout))
+
+ if len(labels.split()) > 1:
+ default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL')
+ if default:
+ cfgfile.write('DEFAULT %s\n' % (default))
+
for label in labels.split():
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar('OVERRIDES', True)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
bb.fatal('OVERRIDES not defined')
localdata.setVar('OVERRIDES', label + ':' + overrides)
- bb.data.update_data(localdata)
- extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE', True)
+ extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
- menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION', True)
+ menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION')
if not menu_description:
menu_description = label
- root = localdata.getVar('UBOOT_EXTLINUX_ROOT', True)
+ root = localdata.getVar('UBOOT_EXTLINUX_ROOT')
if not root:
bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
- kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE', True)
- fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR', True)
- if fdtdir:
+ kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE')
+ fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
+
+ fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
+
+ if fdt:
+ cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' %
+ (menu_description, kernel_image, fdt))
+ elif fdtdir:
cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
(menu_description, kernel_image, fdtdir))
else:
cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
- kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS', True)
+ kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
- initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD', True)
+ initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD')
if initrd:
cfgfile.write('\tINITRD %s\n'% initrd)
diff --git a/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass b/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass
index cef26b19b..8ee904e7d 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass
@@ -25,7 +25,7 @@
# u-boot:do_concat_dtb
# u-boot:do_install
#
-# For more details on signature process, please refer to U-boot documentation.
+# For more details on signature process, please refer to U-Boot documentation.
# Signature activation.
UBOOT_SIGN_ENABLE ?= "0"
@@ -80,9 +80,9 @@ do_concat_dtb () {
}
python () {
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot', True) or 'u-boot'
- if d.getVar('UBOOT_SIGN_ENABLE', True) == '1' and d.getVar('PN', True) == uboot_pn:
- kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel', True)
+ uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
+ if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == uboot_pn:
+ kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
# u-boot.dtb and u-boot-nodtb.bin are deployed _before_ do_deploy
# Thus, do_deploy_setscene will also populate them in DEPLOY_IMAGE_DIR
diff --git a/import-layers/yocto-poky/meta/classes/uninative.bbclass b/import-layers/yocto-poky/meta/classes/uninative.bbclass
index 975466929..8f3448336 100644
--- a/import-layers/yocto-poky/meta/classes/uninative.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uninative.bbclass
@@ -20,11 +20,11 @@ python uninative_event_fetchloader() {
loader isn't already present.
"""
- chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH", True), True)
+ chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
if not chksum:
- bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH", True))
+ bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
- loader = d.getVar("UNINATIVE_LOADER", True)
+ loader = d.getVar("UNINATIVE_LOADER")
loaderchksum = loader + ".chksum"
if os.path.exists(loader) and os.path.exists(loaderchksum):
with open(loaderchksum, "r") as f:
@@ -37,13 +37,13 @@ python uninative_event_fetchloader() {
# Save and restore cwd as Fetch.download() does a chdir()
olddir = os.getcwd()
- tarball = d.getVar("UNINATIVE_TARBALL", True)
- tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR", True), chksum)
+ tarball = d.getVar("UNINATIVE_TARBALL")
+ tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
tarballpath = os.path.join(tarballdir, tarball)
if not os.path.exists(tarballpath):
bb.utils.mkdirhier(tarballdir)
- if d.getVar("UNINATIVE_URL", True) == "unset":
+ if d.getVar("UNINATIVE_URL") == "unset":
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
localdata = bb.data.createCopy(d)
@@ -59,8 +59,17 @@ python uninative_event_fetchloader() {
if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
os.symlink(localpath, tarballpath)
- cmd = d.expand("mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; cd ${UNINATIVE_STAGING_DIR}-uninative; tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
- subprocess.check_call(cmd, shell=True)
+ cmd = d.expand("\
+mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
+cd ${UNINATIVE_STAGING_DIR}-uninative; \
+tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
+${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
+ ${UNINATIVE_LOADER} \
+ ${UNINATIVE_LOADER} \
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
f.write(chksum)
@@ -86,12 +95,13 @@ python uninative_event_enable() {
}
def enable_uninative(d):
- loader = d.getVar("UNINATIVE_LOADER", True)
+ loader = d.getVar("UNINATIVE_LOADER")
if os.path.exists(loader):
bb.debug(2, "Enabling uninative")
d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
- d.prependVar("PATH", "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
+ d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
+ d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
python uninative_changeinterp () {
import subprocess
@@ -101,7 +111,7 @@ python uninative_changeinterp () {
if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
return
- sstateinst = d.getVar('SSTATE_INSTDIR', True)
+ sstateinst = d.getVar('SSTATE_INSTDIR')
for walkroot, dirs, files in os.walk(sstateinst):
for file in files:
if file.endswith(".so") or ".so." in file:
@@ -120,11 +130,5 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
- try:
- subprocess.check_output(("patchelf-uninative", "--set-interpreter",
- d.getVar("UNINATIVE_LOADER", True), f),
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("'%s' failed with exit code %d and the following output:\n%s" %
- (e.cmd, e.returncode, e.output))
+ subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
}
diff --git a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
index 65929e555..4bba76c3b 100644
--- a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
+++ b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
@@ -65,9 +65,11 @@ ALTERNATIVE_PRIORITY = "10"
# and include that vairable in the set.
UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
+PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
+
def gen_updatealternativesvardeps(d):
- pkgs = (d.getVar("PACKAGES", True) or "").split()
- vars = (d.getVar("UPDALTVARS", True) or "").split()
+ pkgs = (d.getVar("PACKAGES") or "").split()
+ vars = (d.getVar("UPDALTVARS") or "").split()
# First compute them for non_pkg versions
for v in vars:
@@ -84,7 +86,7 @@ def gen_updatealternativesvardeps(d):
d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
def ua_extend_depends(d):
- if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True):
+ if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
python __anonymous() {
@@ -94,6 +96,10 @@ python __anonymous() {
bb.data.inherits_class('cross-canadian', d):
return
+ # Disable when targeting mingw32 (no target support)
+ if d.getVar("TARGET_OS") == "mingw32":
+ return
+
# compute special vardeps
gen_updatealternativesvardeps(d)
@@ -103,8 +109,8 @@ python __anonymous() {
def gen_updatealternativesvars(d):
ret = []
- pkgs = (d.getVar("PACKAGES", True) or "").split()
- vars = (d.getVar("UPDALTVARS", True) or "").split()
+ pkgs = (d.getVar("PACKAGES") or "").split()
+ vars = (d.getVar("UPDALTVARS") or "").split()
for v in vars:
ret.append(v + "_VARDEPS")
@@ -123,23 +129,23 @@ populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
# place.
python perform_packagecopy_append () {
# Check for deprecated usage...
- pn = d.getVar('BPN', True)
- if d.getVar('ALTERNATIVE_LINKS', True) != None:
+ pn = d.getVar('BPN')
+ if d.getVar('ALTERNATIVE_LINKS') != None:
bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
# Do actual update alternatives processing
- pkgdest = d.getVar('PKGD', True)
- for pkg in (d.getVar('PACKAGES', True) or "").split():
+ pkgdest = d.getVar('PKGD')
+ for pkg in (d.getVar('PACKAGES') or "").split():
# If the src == dest, we know we need to rename the dest by appending ${BPN}
link_rename = {}
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
if not alt_link:
- alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name)
+ alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
# Sometimes alt_target is specified as relative to the link name.
alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
@@ -189,23 +195,23 @@ python perform_packagecopy_append () {
PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
python populate_packages_updatealternatives () {
- pn = d.getVar('BPN', True)
+ pn = d.getVar('BPN')
# Do actual update alternatives processing
- pkgdest = d.getVar('PKGD', True)
- for pkg in (d.getVar('PACKAGES', True) or "").split():
+ pkgdest = d.getVar('PKGD')
+ for pkg in (d.getVar('PACKAGES') or "").split():
# Create post install/removal scripts
alt_setup_links = "# Begin section update-alternatives\n"
alt_remove_links = "# Begin section update-alternatives\n"
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
# Sometimes alt_target is specified as relative to the link name.
alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
- alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True)
- alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True)
+ alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
+ alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or d.getVar('ALTERNATIVE_PRIORITY')
# This shouldn't trigger, as it should have been resolved earlier!
if alt_link == alt_target:
@@ -227,14 +233,14 @@ python populate_packages_updatealternatives () {
if len(alt_setup_links.splitlines()) > 2:
# RDEPENDS setup
- provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True)
+ provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
if provider:
#bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or '#!/bin/sh\n'
postinst = postinst.splitlines(True)
try:
index = postinst.index('# Begin section update-rc.d\n')
@@ -245,7 +251,7 @@ python populate_packages_updatealternatives () {
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True) or '#!/bin/sh\n'
+ prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
prerm = prerm.splitlines(True)
try:
index = prerm.index('# End section update-rc.d\n')
@@ -257,14 +263,14 @@ python populate_packages_updatealternatives () {
}
python package_do_filedeps_append () {
- pn = d.getVar('BPN', True)
- pkgdest = d.getVar('PKGDEST', True)
+ pn = d.getVar('BPN')
+ pkgdest = d.getVar('PKGDEST')
for pkg in packages.split():
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
+ alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
if alt_link == alt_target:
bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
@@ -276,7 +282,7 @@ python package_do_filedeps_append () {
# Add file provide
trans_target = oe.package.file_translate(alt_target)
d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
- if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""):
+ if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
}
diff --git a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
index 18df2dc3f..9ba3dacca 100644
--- a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
+++ b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
@@ -1,6 +1,6 @@
UPDATERCPN ?= "${PN}"
-DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d-native update-rc.d initscripts', '', d)}"
+DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
UPDATERCD = "update-rc.d"
UPDATERCD_class-cross = ""
@@ -11,11 +11,20 @@ INITSCRIPT_PARAMS ?= "defaults"
INIT_D_DIR = "${sysconfdir}/init.d"
+def use_updatercd(d):
+ # If the distro supports both sysvinit and systemd, and the current recipe
+ # supports systemd, only call update-rc.d on rootfs creation or if systemd
+ # is not running. That's because systemctl enable/disable will already call
+ # update-rc.d if it detects initscripts.
+ if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d):
+ return '[ -n "$D" -o ! -d /run/systemd/system ]'
+ return 'true'
+
updatercd_preinst() {
-if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
+if ${@use_updatercd(d)} && [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
fi
-if type update-rc.d >/dev/null 2>/dev/null; then
+if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
OPT="-f -r $D"
else
@@ -25,9 +34,11 @@ if type update-rc.d >/dev/null 2>/dev/null; then
fi
}
+PACKAGE_WRITE_DEPS += "update-rc.d-native"
+
updatercd_postinst() {
# Begin section update-rc.d
-if type update-rc.d >/dev/null 2>/dev/null; then
+if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
OPT="-r $D"
else
@@ -40,14 +51,14 @@ fi
updatercd_prerm() {
# Begin section update-rc.d
-if [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
+if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
fi
# End section update-rc.d
}
updatercd_postrm() {
-if type update-rc.d >/dev/null 2>/dev/null; then
+if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
OPT="-f -r $D"
else
@@ -84,64 +95,63 @@ python populate_packages_updatercd () {
return
statement = "grep -q -w '/etc/init.d/functions' %s" % path
if subprocess.call(statement, shell=True) == 0:
- mlprefix = d.getVar('MLPREFIX', True) or ""
+ mlprefix = d.getVar('MLPREFIX') or ""
d.appendVar('RDEPENDS_' + pkg, ' %sinitscripts-functions' % (mlprefix))
def update_rcd_package(pkg):
bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", True)
+ overrides = localdata.getVar("OVERRIDES")
localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
- bb.data.update_data(localdata)
update_rcd_auto_depend(pkg)
- preinst = d.getVar('pkg_preinst_%s' % pkg, True)
+ preinst = d.getVar('pkg_preinst_%s' % pkg)
if not preinst:
preinst = '#!/bin/sh\n'
- preinst += localdata.getVar('updatercd_preinst', True)
+ preinst += localdata.getVar('updatercd_preinst')
d.setVar('pkg_preinst_%s' % pkg, preinst)
- postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst = postinst.splitlines(True)
try:
index = postinst.index('# End section update-alternatives\n')
- postinst.insert(index + 1, localdata.getVar('updatercd_postinst', True))
+ postinst.insert(index + 1, localdata.getVar('updatercd_postinst'))
except ValueError:
- postinst.append(localdata.getVar('updatercd_postinst', True))
+ postinst.append(localdata.getVar('updatercd_postinst'))
postinst = ''.join(postinst)
d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg, True)
+ prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm = prerm.splitlines(True)
try:
index = prerm.index('# Begin section update-alternatives\n')
- prerm.insert(index, localdata.getVar('updatercd_prerm', True))
+ prerm.insert(index, localdata.getVar('updatercd_prerm'))
except ValueError:
- prerm.append(localdata.getVar('updatercd_prerm', True))
+ prerm.append(localdata.getVar('updatercd_prerm'))
prerm = ''.join(prerm)
d.setVar('pkg_prerm_%s' % pkg, prerm)
- postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += localdata.getVar('updatercd_postrm', True)
+ postrm += localdata.getVar('updatercd_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
- if not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
- pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
+ if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"):
+ pkgs = d.getVar('INITSCRIPT_PACKAGES')
if pkgs == None:
- pkgs = d.getVar('UPDATERCPN', True)
- packages = (d.getVar('PACKAGES', True) or "").split()
+ pkgs = d.getVar('UPDATERCPN')
+ packages = (d.getVar('PACKAGES') or "").split()
if not pkgs in packages and packages != []:
pkgs = packages[0]
for pkg in pkgs.split():
diff --git a/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass b/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass
index 89556ed7d..256c75242 100644
--- a/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass
+++ b/import-layers/yocto-poky/meta/classes/upstream-version-is-even.bbclass
@@ -2,4 +2,4 @@
# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
# This scheme is used by Gnome and a number of other projects
# to signify stable releases vs development releases.
-UPSTREAM_CHECK_REGEX = "(?P<pver>\d+\.(\d*[02468])+(\.\d+)+)"
+UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar"
diff --git a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
index afb580aed..6ebf7600f 100644
--- a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -8,11 +8,11 @@ def update_useradd_static_config(d):
class myArgumentParser( argparse.ArgumentParser ):
def _print_message(self, message, file=None):
- bb.warn("%s - %s: %s" % (d.getVar('PN', True), pkg, message))
+ bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
# This should never be called...
def exit(self, status=0, message=None):
- message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN', True), pkg))
+ message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
error(message)
def error(self, message):
@@ -52,14 +52,13 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg):
# For backwards compatibility we accept "1" in addition to "error"
- if d.getVar('USERADD_ERROR_DYNAMIC', True) == 'error' or d.getVar('USERADD_ERROR_DYNAMIC', True) == '1':
- #bb.error("Skipping recipe %s, package %s which adds %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
- bb.fatal("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
- elif d.getVar('USERADD_ERROR_DYNAMIC', True) == 'warn':
- bb.warn("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
+ if d.getVar('USERADD_ERROR_DYNAMIC') == 'error' or d.getVar('USERADD_ERROR_DYNAMIC') == '1':
+ raise NotImplementedError("%s - %s: %sname %s does not have a static ID defined. Skipping it." % (d.getVar('PN'), pkg, type, id))
+ elif d.getVar('USERADD_ERROR_DYNAMIC') == 'warn':
+ bb.warn("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id))
# We parse and rewrite the useradd components
- def rewrite_useradd(params):
+ def rewrite_useradd(params, is_pkg):
# The following comes from --help on useradd from shadow
parser = myArgumentParser(prog='useradd')
parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
@@ -78,6 +77,7 @@ def update_useradd_static_config(d):
parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
@@ -90,8 +90,8 @@ def update_useradd_static_config(d):
# paths are resolved via BBPATH
def get_passwd_list(d):
str = ""
- bbpath = d.getVar('BBPATH', True)
- passwd_tables = d.getVar('USERADD_UID_TABLES', True)
+ bbpath = d.getVar('BBPATH')
+ passwd_tables = d.getVar('USERADD_UID_TABLES')
if not passwd_tables:
passwd_tables = 'files/passwd'
for conf_file in passwd_tables.split():
@@ -107,7 +107,7 @@ def update_useradd_static_config(d):
try:
uaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
except:
- bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -124,14 +124,14 @@ def update_useradd_static_config(d):
users = merge_files(get_passwd_list(d), 7)
if uaargs.LOGIN not in users:
- if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
- handle_missing_id(uaargs.LOGIN, 'user', pkg)
+ handle_missing_id(uaargs.LOGIN, 'user', pkg)
+ newparams.append(param)
continue
field = users[uaargs.LOGIN]
if uaargs.uid and field[2] and (uaargs.uid != field[2]):
- bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
+ bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.uid, field[2]))
uaargs.uid = field[2] or uaargs.uid
# Determine the possible groupname
@@ -141,9 +141,13 @@ def update_useradd_static_config(d):
# So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
# is used, and we disable the user_group option.
#
- user_group = uaargs.user_group is None or uaargs.user_group is True
- uaargs.groupname = uaargs.LOGIN if user_group else uaargs.gid
- uaargs.groupid = field[3] or uaargs.gid or uaargs.groupname
+ if uaargs.gid:
+ uaargs.groupname = uaargs.gid
+ elif uaargs.user_group is not False:
+ uaargs.groupname = uaargs.LOGIN
+ else:
+ uaargs.groupname = 'users'
+ uaargs.groupid = field[3] or uaargs.groupname
if uaargs.groupid and uaargs.gid != uaargs.groupid:
newgroup = None
@@ -159,14 +163,16 @@ def update_useradd_static_config(d):
# We want to add a group, but we don't know it's name... so we can't add the group...
# We have to assume the group has previously been added or we'll fail on the adduser...
# Note: specifying the actual gid is very rare in OE, usually the group name is specified.
- bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupid))
+ bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.groupid))
uaargs.gid = uaargs.groupid
uaargs.user_group = None
- if newgroup:
- groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
+ if newgroup and is_pkg:
+ groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg)
if groupadd:
- d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ # Only add the group if not already specified
+ if not uaargs.groupname in groupadd:
+ d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
@@ -183,7 +189,7 @@ def update_useradd_static_config(d):
newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
- newparam += ['', ' --expiredata %s' % uaargs.expiredate][uaargs.expiredate != None]
+ newparam += ['', ' --expiredate %s' % uaargs.expiredate][uaargs.expiredate != None]
newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
@@ -194,7 +200,10 @@ def update_useradd_static_config(d):
newparam += ['', ' --no-create-home'][uaargs.create_home is False]
newparam += ['', ' --no-user-group'][uaargs.user_group is False]
newparam += ['', ' --non-unique'][uaargs.non_unique]
- newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
+ if uaargs.password != None:
+ newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
+ elif uaargs.clear_password:
+ newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -207,7 +216,7 @@ def update_useradd_static_config(d):
return ";".join(newparams).strip()
# We parse and rewrite the groupadd components
- def rewrite_groupadd(params):
+ def rewrite_groupadd(params, is_pkg):
# The following comes from --help on groupadd from shadow
parser = myArgumentParser(prog='groupadd')
parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
@@ -215,6 +224,7 @@ def update_useradd_static_config(d):
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("GROUP", help="Group name of the new group")
@@ -224,8 +234,8 @@ def update_useradd_static_config(d):
# paths are resolved via BBPATH
def get_group_list(d):
str = ""
- bbpath = d.getVar('BBPATH', True)
- group_tables = d.getVar('USERADD_GID_TABLES', True)
+ bbpath = d.getVar('BBPATH')
+ group_tables = d.getVar('USERADD_GID_TABLES')
if not group_tables:
group_tables = 'files/group'
for conf_file in group_tables.split():
@@ -242,7 +252,7 @@ def update_useradd_static_config(d):
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
except:
- bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -257,15 +267,15 @@ def update_useradd_static_config(d):
groups = merge_files(get_group_list(d), 4)
if gaargs.GROUP not in groups:
- if not gaargs.gid or not gaargs.gid.isdigit():
- handle_missing_id(gaargs.GROUP, 'group', pkg)
+ handle_missing_id(gaargs.GROUP, 'group', pkg)
+ newparams.append(param)
continue
field = groups[gaargs.GROUP]
if field[2]:
if gaargs.gid and (gaargs.gid != field[2]):
- bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
+ bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), gaargs.GROUP, gaargs.gid, field[2]))
gaargs.gid = field[2]
if not gaargs.gid or not gaargs.gid.isdigit():
@@ -276,7 +286,10 @@ def update_useradd_static_config(d):
newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
newparam += ['', ' --non-unique'][gaargs.non_unique]
- newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
+ if gaargs.password != None:
+ newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
+ elif gaargs.clear_password:
+ newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
@@ -289,33 +302,58 @@ def update_useradd_static_config(d):
# the files listed in USERADD_UID/GID_TABLES. We need to tell bitbake
# about that explicitly to trigger re-parsing and thus re-execution of
# this code when the files change.
- bbpath = d.getVar('BBPATH', True)
+ bbpath = d.getVar('BBPATH')
for varname, default in (('USERADD_UID_TABLES', 'files/passwd'),
('USERADD_GID_TABLES', 'files/group')):
- tables = d.getVar(varname, True)
+ tables = d.getVar(varname)
if not tables:
tables = default
for conf_file in tables.split():
bb.parse.mark_dependency(d, bb.utils.which(bbpath, conf_file))
# Load and process the users and groups, rewriting the adduser/addgroup params
- useradd_packages = d.getVar('USERADD_PACKAGES', True)
+ useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
- useradd_param = d.getVar('USERADD_PARAM_%s' % pkg, True)
+ useradd_param = d.getVar('USERADD_PARAM_%s' % pkg)
if useradd_param:
#bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
- d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param))
- #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg, True)))
+ d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True))
+ #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg)))
- groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg, True)
+ groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg)
if groupadd_param:
#bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
- d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param))
- #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg, True)))
+ d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True))
+ #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg)))
+
+ # Load and process extra users and groups, rewriting only adduser/addgroup params
+ pkg = d.getVar('PN')
+ extrausers = d.getVar('EXTRA_USERS_PARAMS') or ""
+
+ #bb.warn("Before: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
+ new_extrausers = []
+ for cmd in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', extrausers):
+ cmd = cmd.strip()
+ if not cmd:
+ continue
+
+ if re.match('''useradd (.*)''', cmd):
+ useradd_param = re.match('''useradd (.*)''', cmd).group(1)
+ useradd_param = rewrite_useradd(useradd_param, False)
+ cmd = 'useradd %s' % useradd_param
+ elif re.match('''groupadd (.*)''', cmd):
+ groupadd_param = re.match('''groupadd (.*)''', cmd).group(1)
+ groupadd_param = rewrite_groupadd(groupadd_param, False)
+ cmd = 'groupadd %s' % groupadd_param
+
+ new_extrausers.append(cmd)
+ new_extrausers.append('')
+ d.setVar('EXTRA_USERS_PARAMS', ';'.join(new_extrausers))
+ #bb.warn("After: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
python __anonymous() {
@@ -323,7 +361,7 @@ python __anonymous() {
and not bb.data.inherits_class('native', d):
try:
update_useradd_static_config(d)
- except bb.build.FuncFailed as f:
- bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN', True), f))
+ except NotImplementedError as f:
+ bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN'), f))
raise bb.parse.SkipPackage(f)
}
diff --git a/import-layers/yocto-poky/meta/classes/useradd.bbclass b/import-layers/yocto-poky/meta/classes/useradd.bbclass
index 3cff08e00..0f551b50f 100644
--- a/import-layers/yocto-poky/meta/classes/useradd.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd.bbclass
@@ -3,7 +3,8 @@ inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow"
+DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
+PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
#
@@ -31,7 +32,7 @@ if test "x$D" != "x"; then
fi
# user/group lookups should match useradd/groupadd --root
- export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}"
+ export PSEUDO_PASSWD="$SYSROOT"
fi
# If we're not doing a special SSTATE/SYSROOT install
@@ -96,15 +97,30 @@ fi
}
useradd_sysroot () {
- # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
+ # Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
- export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
+ export PSEUDO="${FAKEROOTENV} ${PSEUDO_SYSROOT}${bindir_native}/pseudo"
# Explicitly set $D since it isn't set to anything
- # before do_install
+ # before do_prepare_recipe_sysroot
D=${STAGING_DIR_TARGET}
+ # base-passwd's postinst may not have run yet in which case we'll get called later, just exit.
+ # Beware that in some cases we might see the fake pseudo passwd here, in which case we also must
+ # exit.
+ if [ ! -f $D${sysconfdir}/passwd ] ||
+ grep -q this-is-the-pseudo-passwd $D${sysconfdir}/passwd; then
+ exit 0
+ fi
+
+ # It is also possible we may be in a recipe which doesn't have useradd dependencies and hence the
+ # useradd/groupadd tools are unavailable. If there is no dependency, we assume we don't want to
+ # create users in the sysroot
+ if ! command -v useradd; then
+ exit 0
+ fi
+
# Add groups and users defined for all recipe packages
GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
@@ -116,49 +132,43 @@ useradd_sysroot () {
useradd_preinst
}
-useradd_sysroot_sstate () {
- if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
- then
- useradd_sysroot
- fi
-}
-
-userdel_sysroot_sstate () {
-if test "x${STAGING_DIR_TARGET}" != "x"; then
- if [ "${BB_CURRENTTASK}" = "clean" ]; then
- export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
- OPT="--root ${STAGING_DIR_TARGET}"
-
- # Remove groups and users defined for package
- GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
- USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
-
- user=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- while test "x$user" != "x"; do
- perform_userdel "${STAGING_DIR_TARGET}" "$OPT $user"
- user=`echo "$remaining" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$remaining" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- done
-
- user=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- while test "x$user" != "x"; do
- perform_groupdel "${STAGING_DIR_TARGET}" "$OPT $user"
- user=`echo "$remaining" | cut -d ';' -f 1 | awk '{ print $NF }'`
- remaining=`echo "$remaining" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
- done
-
- fi
-fi
+# The export of PSEUDO in useradd_sysroot() above contains references to
+# ${COMPONENTS_DIR} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
+# shell functions use ${LOGFIFO}. These need to be handled when restoring
+# postinst-useradd-${PN} from the sstate cache.
+EXTRA_STAGING_FIXMES += "COMPONENTS_DIR PSEUDO_LOCALSTATEDIR LOGFIFO"
+
+python useradd_sysroot_sstate () {
+ task = d.getVar("BB_CURRENTTASK")
+ if task == "package_setscene":
+ bb.build.exec_func("useradd_sysroot", d)
+ elif task == "prepare_recipe_sysroot":
+ # Used to update this recipe's own sysroot so the user/groups are available to do_install
+ scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-${PN}")
+ bb.utils.mkdirhier(os.path.dirname(scriptfile))
+ with open(scriptfile, 'w') as script:
+ script.write("#!/bin/sh\n")
+ bb.data.emit_func("useradd_sysroot", script, d)
+ script.write("useradd_sysroot\n")
+ os.chmod(scriptfile, 0o755)
+ bb.build.exec_func("useradd_sysroot", d)
+ elif task == "populate_sysroot":
+ # Used when installed in dependent task sysroots
+ scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-${PN}")
+ bb.utils.mkdirhier(os.path.dirname(scriptfile))
+ with open(scriptfile, 'w') as script:
+ script.write("#!/bin/sh\n")
+ bb.data.emit_func("useradd_sysroot", script, d)
+ script.write("useradd_sysroot\n")
+ os.chmod(scriptfile, 0o755)
}
-SSTATECLEANFUNCS_append_class-target = " userdel_sysroot_sstate"
-
-do_install[prefuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC_class-target = "useradd_sysroot"
+do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
+SYSROOTFUNC_class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
+SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
+
SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
@@ -168,13 +178,13 @@ USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
def update_useradd_after_parse(d):
- useradd_packages = d.getVar('USERADD_PACKAGES', True)
+ useradd_packages = d.getVar('USERADD_PACKAGES')
if not useradd_packages:
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
- if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
+ if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
@@ -191,9 +201,9 @@ def get_all_cmd_params(d, cmd_type):
param_type = cmd_type.upper() + "_PARAM_%s"
params = []
- useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
- param = d.getVar(param_type % pkg, True)
+ param = d.getVar(param_type % pkg)
if param:
params.append(param.rstrip(" ;"))
@@ -209,20 +219,20 @@ fakeroot python populate_packages_prepend () {
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
- preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
+ preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
preinst += 'bbwarn () {\n\techo "WARNING: $*"\n}\n'
preinst += 'bbfatal () {\n\techo "ERROR: $*"\n\texit 1\n}\n'
- preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True)
- preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True)
- preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True)
- preinst += d.getVar('useradd_preinst', True)
+ preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd')
+ preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
+ preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
+ preinst += d.getVar('useradd_preinst')
d.setVar('pkg_preinst_%s' % pkg, preinst)
# RDEPENDS setup
- rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
+ rdepends = d.getVar("RDEPENDS_%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
@@ -233,7 +243,7 @@ fakeroot python populate_packages_prepend () {
# to packages specified by USERADD_PACKAGES
if not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('native', d):
- useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
update_useradd_package(pkg)
}
diff --git a/import-layers/yocto-poky/meta/classes/useradd_base.bbclass b/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
index ba87edc57..551c82c32 100644
--- a/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
@@ -69,11 +69,21 @@ perform_groupdel () {
bbnote "${PN}: Performing groupdel with [$opts]"
local groupname=`echo "$opts" | awk '{ print $NF }'`
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+
if test "x$group_exists" != "x"; then
- eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
- group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
- if test "x$group_exists" != "x"; then
- bbfatal "${PN}: groupdel command did not succeed."
+ local awk_input='BEGIN {FS=":"}; $1=="'$groupname'" { print $3 }'
+ local groupid=`echo "$awk_input" | awk -f- $rootdir/etc/group`
+ local awk_check_users='BEGIN {FS=":"}; $4=="'$groupid'" {print $1}'
+ local other_users=`echo "$awk_check_users" | awk -f- $rootdir/etc/passwd`
+
+ if test "x$other_users" = "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
+ group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+ if test "x$group_exists" != "x"; then
+ bbfatal "${PN}: groupdel command did not succeed."
+ fi
+ else
+ bbnote "${PN}: '$groupname' is primary group for users '$other_users', not removing it"
fi
else
bbnote "${PN}: group $groupname doesn't exist, not removing it"
diff --git a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
index 7ba56e28a..587bfd4ab 100644
--- a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
@@ -4,12 +4,12 @@ python do_listtasks() {
taskdescs = {}
maxlen = 0
for e in d.keys():
- if d.getVarFlag(e, 'task', True):
+ if d.getVarFlag(e, 'task'):
maxlen = max(maxlen, len(e))
if e.endswith('_setscene'):
- desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc', True) or '')
+ desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
else:
- desc = d.getVarFlag(e, 'doc', True) or ''
+ desc = d.getVarFlag(e, 'doc') or ''
taskdescs[e] = desc
tasks = sorted(taskdescs.keys())
@@ -28,18 +28,18 @@ python do_clean() {
bb.note("Removing " + dir)
oe.path.remove(dir)
- dir = "%s.*" % bb.data.expand(d.getVar('STAMP', False), d)
+ dir = "%s.*" % d.getVar('STAMP')
bb.note("Removing " + dir)
oe.path.remove(dir)
- for f in (d.getVar('CLEANFUNCS', True) or '').split():
+ for f in (d.getVar('CLEANFUNCS') or '').split():
bb.build.exec_func(f, d)
}
addtask checkuri
do_checkuri[nostamp] = "1"
python do_checkuri() {
- src_uri = (d.getVar('SRC_URI', True) or "").split()
+ src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
return
diff --git a/import-layers/yocto-poky/meta/classes/utils.bbclass b/import-layers/yocto-poky/meta/classes/utils.bbclass
index dbb5e4cbb..96463ab32 100644
--- a/import-layers/yocto-poky/meta/classes/utils.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utils.bbclass
@@ -41,9 +41,9 @@ def oe_filter_out(f, str, d):
def machine_paths(d):
"""List any existing machine specific filespath directories"""
- machine = d.getVar("MACHINE", True)
- filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
- for basepath in d.getVar("FILESPATHBASE", True).split(":"):
+ machine = d.getVar("MACHINE")
+ filespathpkg = d.getVar("FILESPATHPKG").split(":")
+ for basepath in d.getVar("FILESPATHBASE").split(":"):
for pkgpath in filespathpkg:
machinepath = os.path.join(basepath, pkgpath, machine)
if os.path.isdir(machinepath):
@@ -52,7 +52,7 @@ def machine_paths(d):
def is_machine_specific(d):
"""Determine whether the current recipe is machine specific"""
machinepaths = set(machine_paths(d))
- srcuri = d.getVar("SRC_URI", True).split()
+ srcuri = d.getVar("SRC_URI").split()
for url in srcuri:
fetcher = bb.fetch2.Fetch([srcuri], d)
if url.startswith("file://"):
@@ -264,10 +264,17 @@ create_cmdline_wrapper () {
mv $cmd $cmd.real
cmdname=`basename $cmd`
+ dirname=`dirname $cmd`
+ cmdoptions=$@
+ if [ "${base_prefix}" != "" ]; then
+ relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
+ cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
+ fi
cat <<END >$cmd
#!/bin/bash
realpath=\`readlink -fn \$0\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $@ "\$@"
+realdir=\`dirname \$realpath\`
+exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}
@@ -287,10 +294,17 @@ create_wrapper () {
mv $cmd $cmd.real
cmdname=`basename $cmd`
+ dirname=`dirname $cmd`
+ exportstring=$@
+ if [ "${base_prefix}" != "" ]; then
+ relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
+ exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
+ fi
cat <<END >$cmd
#!/bin/bash
realpath=\`readlink -fn \$0\`
-export $@
+realdir=\`dirname \$realpath\`
+export $exportstring
exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
END
chmod +x $cmd
@@ -307,7 +321,7 @@ hardlinkdir () {
def check_app_exists(app, d):
app = d.expand(app).strip()
- path = d.getVar('PATH', d, True)
+ path = d.getVar('PATH')
return bool(bb.utils.which(path, app))
def explode_deps(s):
@@ -315,14 +329,14 @@ def explode_deps(s):
def base_set_filespath(path, d):
filespath = []
- extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
# Remove default flag which was used for checking
extrapaths = extrapaths.replace("__default:", "")
# Don't prepend empty strings to the path list
if extrapaths != "":
path = extrapaths.split(":") + path
# The ":" ensures we have an 'empty' override
- overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":")
+ overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
overrides.reverse()
for o in overrides:
for p in path:
@@ -333,7 +347,7 @@ def base_set_filespath(path, d):
def extend_variants(d, var, extend, delim=':'):
"""Return a string of all bb class extend variants for the given extend"""
variants = []
- whole = d.getVar(var, True) or ""
+ whole = d.getVar(var) or ""
for ext in whole.split():
eext = ext.split(delim)
if len(eext) > 1 and eext[0] == extend:
@@ -341,7 +355,7 @@ def extend_variants(d, var, extend, delim=':'):
return " ".join(variants)
def multilib_pkg_extend(d, pkg):
- variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split()
+ variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
if not variants:
return pkg
pkgs = pkg
@@ -349,24 +363,27 @@ def multilib_pkg_extend(d, pkg):
pkgs = pkgs + " " + v + "-" + pkg
return pkgs
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ return localdata
+
def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
"""Return a string of all ${var} in all multilib tune configuration"""
values = []
- value = d.getVar(var, True) or ""
+ value = d.getVar(var) or ""
if value != "":
if need_split:
for item in value.split(delim):
values.append(item)
else:
values.append(value)
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ variants = d.getVar("MULTILIB_VARIANTS") or ""
for item in variants.split():
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
- localdata.setVar("OVERRIDES", overrides)
- localdata.setVar("MLPREFIX", item + "-")
- bb.data.update_data(localdata)
- value = localdata.getVar(var, True) or ""
+ localdata = get_multilib_datastore(item, d)
+ value = localdata.getVar(var) or ""
if value != "":
if need_split:
for item in value.split(delim):
@@ -402,21 +419,16 @@ def all_multilib_tune_list(vars, d):
newoverrides.append(o)
localdata.setVar("OVERRIDES", ":".join(newoverrides))
localdata.setVar("MLPREFIX", "")
- origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL", True)
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
if origdefault:
localdata.setVar("DEFAULTTUNE", origdefault)
- bb.data.update_data(localdata)
values['ml'] = ['']
for v in vars:
- values[v].append(localdata.getVar(v, True))
- variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ values[v].append(localdata.getVar(v))
+ variants = d.getVar("MULTILIB_VARIANTS") or ""
for item in variants.split():
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
- localdata.setVar("OVERRIDES", overrides)
- localdata.setVar("MLPREFIX", item + "-")
- bb.data.update_data(localdata)
- values[v].append(localdata.getVar(v, True))
+ localdata = get_multilib_datastore(item, d)
+ values[v].append(localdata.getVar(v))
values['ml'].append(item)
return values
diff --git a/import-layers/yocto-poky/meta/classes/waf.bbclass b/import-layers/yocto-poky/meta/classes/waf.bbclass
index 5e55833ca..c4698e910 100644
--- a/import-layers/yocto-poky/meta/classes/waf.bbclass
+++ b/import-layers/yocto-poky/meta/classes/waf.bbclass
@@ -1,8 +1,10 @@
# avoids build breaks when using no-static-libs.inc
DISABLE_STATIC = ""
+EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+
def get_waf_parallel_make(d):
- pm = d.getVar('PARALLEL_MAKE', True)
+ pm = d.getVar('PARALLEL_MAKE')
if pm:
# look for '-j' and throw other options (e.g. '-l') away
# because they might have different meaning in bjam
OpenPOWER on IntegriCloud