summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/meta/classes')
-rw-r--r--import-layers/yocto-poky/meta/classes/allarch.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/archiver.bbclass42
-rw-r--r--import-layers/yocto-poky/meta/classes/autotools.bbclass112
-rw-r--r--import-layers/yocto-poky/meta/classes/base.bbclass62
-rw-r--r--import-layers/yocto-poky/meta/classes/bash-completion.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/buildhistory.bbclass57
-rw-r--r--import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/buildstats.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/ccache.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/chrpath.bbclass21
-rw-r--r--import-layers/yocto-poky/meta/classes/cmake.bbclass20
-rw-r--r--import-layers/yocto-poky/meta/classes/cml1.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/core-image.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/cross-canadian.bbclass23
-rw-r--r--import-layers/yocto-poky/meta/classes/cross.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/cve-check.bbclass265
-rw-r--r--import-layers/yocto-poky/meta/classes/devshell.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/distrodata.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils-tools.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3-base.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/distutils3.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/externalsrc.bbclass44
-rw-r--r--import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass28
-rw-r--r--import-layers/yocto-poky/meta/classes/grub-efi.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-doc.bbclass72
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/gummiboot.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/icecc.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/image-live.bbclass20
-rw-r--r--import-layers/yocto-poky/meta/classes/image-swab.bbclass94
-rw-r--r--import-layers/yocto-poky/meta/classes/image-vm.bbclass34
-rw-r--r--import-layers/yocto-poky/meta/classes/image.bbclass117
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types.bbclass212
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass20
-rw-r--r--import-layers/yocto-poky/meta/classes/insane.bbclass98
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-arch.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass140
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass83
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass152
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel.bbclass79
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-common.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-package.bbclass48
-rw-r--r--import-layers/yocto-poky/meta/classes/license.bbclass84
-rw-r--r--import-layers/yocto-poky/meta/classes/linuxloader.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/live-vm-common.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/metadata_scm.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/mirrors.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/module.bbclass15
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_global.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_header.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/native.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/nativesdk.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/nopackages.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/npm.bbclass22
-rw-r--r--import-layers/yocto-poky/meta/classes/oelint.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/package.bbclass78
-rw-r--r--import-layers/yocto-poky/meta/classes/package_deb.bbclass49
-rw-r--r--import-layers/yocto-poky/meta/classes/package_ipk.bbclass19
-rw-r--r--import-layers/yocto-poky/meta/classes/package_rpm.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/package_tar.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass252
-rw-r--r--import-layers/yocto-poky/meta/classes/packagegroup.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/patch.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/pixbufcache.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass40
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass267
-rw-r--r--import-layers/yocto-poky/meta/classes/python-dir.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/python3-dir.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/python3native.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/pythonnative.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/qemu.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/qemuboot.bbclass82
-rw-r--r--import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass22
-rw-r--r--import-layers/yocto-poky/meta/classes/report-error.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/rm_work.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass59
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/sanity.bbclass70
-rw-r--r--import-layers/yocto-poky/meta/classes/scons.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/sdl.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/sip.bbclass61
-rw-r--r--import-layers/yocto-poky/meta/classes/siteinfo.bbclass29
-rw-r--r--import-layers/yocto-poky/meta/classes/spdx.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/sstate.bbclass77
-rw-r--r--import-layers/yocto-poky/meta/classes/staging.bbclass90
-rw-r--r--import-layers/yocto-poky/meta/classes/syslinux.bbclass21
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd-boot.bbclass124
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/terminal.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/testexport.bbclass206
-rw-r--r--import-layers/yocto-poky/meta/classes/testimage.bbclass176
-rw-r--r--import-layers/yocto-poky/meta/classes/testsdk.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/tinderclient.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/toaster.bbclass113
-rw-r--r--import-layers/yocto-poky/meta/classes/toolchain-scripts-base.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass28
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-config.bbclass19
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass126
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-sign.bbclass95
-rw-r--r--import-layers/yocto-poky/meta/classes/update-alternatives.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/update-rc.d.bbclass18
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass75
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd.bbclass88
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd_base.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/utility-tasks.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/utils.bbclass69
113 files changed, 3251 insertions, 1485 deletions
diff --git a/import-layers/yocto-poky/meta/classes/allarch.bbclass b/import-layers/yocto-poky/meta/classes/allarch.bbclass
index 208cde6e5..ddc2a8505 100644
--- a/import-layers/yocto-poky/meta/classes/allarch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/allarch.bbclass
@@ -17,6 +17,7 @@ python () {
# Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
# naming anyway
+ d.setVar("baselib", "lib")
d.setVar("TARGET_ARCH", "allarch")
d.setVar("TARGET_OS", "linux")
d.setVar("TARGET_CC_ARCH", "none")
@@ -41,6 +42,10 @@ python () {
d.setVar("EXCLUDE_FROM_SHLIBS", "1")
d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
d.setVar("INHIBIT_PACKAGE_STRIP", "1")
+
+ # These multilib values shouldn't change allarch packages so exclude them
+ d.setVarFlag("emit_pkgdata", "vardepsexclude", "MULTILIB_VARIANTS")
+ d.setVarFlag("write_specfile", "vardepsexclude", "MULTILIBS")
elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True))
}
diff --git a/import-layers/yocto-poky/meta/classes/archiver.bbclass b/import-layers/yocto-poky/meta/classes/archiver.bbclass
index 2f3b278fb..9239983e8 100644
--- a/import-layers/yocto-poky/meta/classes/archiver.bbclass
+++ b/import-layers/yocto-poky/meta/classes/archiver.bbclass
@@ -132,7 +132,26 @@ python do_ar_original() {
ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
bb.note('Archiving the original source...')
- fetch = bb.fetch2.Fetch([], d)
+ urls = d.getVar("SRC_URI", True).split()
+ # destsuffix (git fetcher) and subdir (everything else) are allowed to be
+ # absolute paths (for example, destsuffix=${S}/foobar).
+ # That messes with unpacking inside our tmpdir below, because the fetchers
+ # will then unpack in that directory and completely ignore the tmpdir.
+ # That breaks parallel tasks relying on ${S}, like do_compile.
+ #
+ # To solve this, we remove these parameters from all URLs.
+ # We do this even for relative paths because it makes the content of the
+ # archives more useful (no extra paths that are only used during
+ # compilation).
+ for i, url in enumerate(urls):
+ decoded = bb.fetch2.decodeurl(url)
+ for param in ('destsuffix', 'subdir'):
+ if param in decoded[5]:
+ del decoded[5][param]
+ encoded = bb.fetch2.encodeurl(decoded)
+ urls[i] = encoded
+ fetch = bb.fetch2.Fetch(urls, d)
+ tarball_suffix = {}
for url in fetch.urls:
local = fetch.localpath(url).rstrip("/");
if os.path.isfile(local):
@@ -140,7 +159,21 @@ python do_ar_original() {
elif os.path.isdir(local):
tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
fetch.unpack(tmpdir, (url,))
- create_tarball(d, tmpdir + '/.', '', ar_outdir)
+ # To handle recipes with more than one source, we add the "name"
+ # URL parameter as suffix. We treat it as an error when
+ # there's more than one URL without a name, or a name gets reused.
+ # This is an additional safety net, in practice the name has
+ # to be set when using the git fetcher, otherwise SRCREV cannot
+ # be set separately for each URL.
+ params = bb.fetch2.decodeurl(url)[5]
+ name = params.get('name', '')
+ if name in tarball_suffix:
+ if not name:
+ bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
+ else:
+ bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
+ tarball_suffix[name] = url
+ create_tarball(d, tmpdir + '/.', name, ar_outdir)
# Emit patch series files for 'original'
bb.note('Writing patch series files...')
@@ -270,9 +303,10 @@ python do_unpack_and_patch() {
return
ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
+ pn = d.getVar('PN', True)
# The kernel class functions require it to be on work-shared, so we dont change WORKDIR
- if not bb.data.inherits_class('kernel-yocto', d):
+ if not (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source')):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
@@ -290,7 +324,7 @@ python do_unpack_and_patch() {
oe.path.copytree(src, src_orig)
# Make sure gcc and kernel sources are patched only once
- if not ((d.getVar('SRC_URI', True) == "" or bb.data.inherits_class('kernel-yocto', d))):
+ if not (d.getVar('SRC_URI', True) == "" or (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source'))):
bb.build.exec_func('do_patch', d)
# Create the patches
diff --git a/import-layers/yocto-poky/meta/classes/autotools.bbclass b/import-layers/yocto-poky/meta/classes/autotools.bbclass
index 6649f5df7..c43ea9a7e 100644
--- a/import-layers/yocto-poky/meta/classes/autotools.bbclass
+++ b/import-layers/yocto-poky/meta/classes/autotools.bbclass
@@ -19,15 +19,13 @@ def autotools_dep_prepend(d):
return deps + 'gnu-config-native '
-EXTRA_OEMAKE = ""
-
DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d, False)}"
+export CONFIG_SITE = "${@siteinfo_get_files(d)}"
acpaths = "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
@@ -77,6 +75,8 @@ CONFIGUREOPTS = " --build=${BUILD_SYS} \
${@append_libtool_sysroot(d)}"
CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
+CACHED_CONFIGUREVARS ?= ""
+
AUTOTOOLS_SCRIPT_PATH ?= "${S}"
CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
@@ -85,7 +85,7 @@ AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
oe_runconf () {
# Use relative path to avoid buildpaths in files
cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
- cfgscript=`python -c "import os; print os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.')"`/$cfgscript_name
+ cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
if [ -x "$cfgscript" ] ; then
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
if ! ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
@@ -129,12 +129,16 @@ autotools_postconfigure(){
EXTRACONFFUNCS ??= ""
+EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+
do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
do_configure[postfuncs] += "autotools_postconfigure"
-ACLOCALDIR = "${B}/aclocal-copy"
+ACLOCALDIR = "${WORKDIR}/aclocal-copy"
python autotools_copy_aclocals () {
+ import copy
+
s = d.getVar("AUTOTOOLS_SCRIPT_PATH", True)
if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
if not d.getVar("AUTOTOOLS_COPYACLOCAL", False):
@@ -163,36 +167,63 @@ python autotools_copy_aclocals () {
if start is None:
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
- # We need to find configure tasks which are either from <target> -> <target>
- # or <native> -> <native> but not <target> -> <native> unless they're direct
- # dependencies. This mirrors what would get restored from sstate.
- done = [start]
- next = [start]
+ # We need to figure out which m4 files we need to expose to this do_configure task.
+ # This needs to match what would get restored from sstate, which is controlled
+ # ultimately by calls from bitbake to setscene_depvalid().
+ # That function expects a setscene dependency tree. We build a dependency tree
+ # condensed to do_populate_sysroot -> do_populate_sysroot dependencies, similar to
+ # that used by setscene tasks. We can then call into setscene_depvalid() and decide
+ # which dependencies we can "see" and should expose the m4 files for.
+ setscenedeps = copy.deepcopy(taskdepdata)
+
+ start = set([start])
+
+ # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
+ for dep in taskdepdata:
+ data = setscenedeps[dep]
+ if data[1] != "do_populate_sysroot":
+ for dep2 in setscenedeps:
+ data2 = setscenedeps[dep2]
+ if dep in data2[3]:
+ data2[3].update(setscenedeps[dep][3])
+ data2[3].remove(dep)
+ if dep in start:
+ start.update(setscenedeps[dep][3])
+ start.remove(dep)
+ del setscenedeps[dep]
+
+ # Remove circular references
+ for dep in setscenedeps:
+ if dep in setscenedeps[dep][3]:
+ setscenedeps[dep][3].remove(dep)
+
+ # Direct dependencies should be present and can be depended upon
+ for dep in start:
+ configuredeps.append(setscenedeps[dep][0])
+
+ # Call into setscene_depvalid for each sub-dependency and only copy m4 files
+ # for ones that would be restored from sstate.
+ done = list(start)
+ next = list(start)
while next:
new = []
for dep in next:
- data = taskdepdata[dep]
+ data = setscenedeps[dep]
for datadep in data[3]:
if datadep in done:
continue
- if (not data[0].endswith("-native")) and taskdepdata[datadep][0].endswith("-native") and dep != start:
+ taskdeps = {}
+ taskdeps[dep] = setscenedeps[dep][:2]
+ taskdeps[datadep] = setscenedeps[datadep][:2]
+ retval = setscene_depvalid(datadep, taskdeps, [], d)
+ if retval:
+ bb.note("Skipping setscene dependency %s for m4 macro copying" % datadep)
continue
done.append(datadep)
new.append(datadep)
- if taskdepdata[datadep][1] == "do_configure":
- configuredeps.append(taskdepdata[datadep][0])
+ configuredeps.append(setscenedeps[datadep][0])
next = new
- #configuredeps2 = []
- #for dep in taskdepdata:
- # data = taskdepdata[dep]
- # if data[1] == "do_configure" and data[0] != pn:
- # configuredeps2.append(data[0])
- #configuredeps.sort()
- #configuredeps2.sort()
- #bb.warn(str(configuredeps))
- #bb.warn(str(configuredeps2))
-
cp = []
if nodeps:
bb.warn("autotools: Unable to find task dependencies, -b being used? Pulling in all m4 files")
@@ -222,11 +253,14 @@ python autotools_copy_aclocals () {
t = os.path.join(aclocaldir, os.path.basename(c))
if not os.path.exists(t):
os.symlink(c, t)
-
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, False))
+
+ # Refresh variable with cache files
+ d.setVar("CONFIG_SITE", siteinfo_get_files(d, aclocalcache=True))
}
autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
+
autotools_do_configure() {
# WARNING: gross hack follows:
# An autotools built package generally needs these scripts, however only
@@ -236,6 +270,9 @@ autotools_do_configure() {
# for a package whose autotools are old, on an x86_64 machine, which the old
# config.sub does not support. Work around this by installing them manually
# regardless.
+
+ PRUNE_M4=""
+
for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
rm -f `dirname $ac`/configure
done
@@ -246,7 +283,7 @@ autotools_do_configure() {
if [ x"${acpaths}" = xdefault ]; then
acpaths=
for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
- grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
+ grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
acpaths="$acpaths -I $i"
done
else
@@ -285,17 +322,22 @@ autotools_do_configure() {
cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
fi
fi
- for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
- for j in `find ${S} -ignore_readdir_race -name $i | grep -v aclocal-copy`; do
- rm $j
- done
- done
+ PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
fi
mkdir -p m4
if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
+ if ! echo "${DEPENDS}" | grep -q intltool-native; then
+ bbwarn "Missing DEPENDS on intltool-native"
+ fi
+ PRUNE_M4="$PRUNE_M4 intltool.m4"
bbnote Executing intltoolize --copy --force --automake
intltoolize --copy --force --automake
fi
+
+ for i in $PRUNE_M4; do
+ find ${S} -ignore_readdir_race -name $i -delete
+ done
+
bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
cd $olddir
@@ -307,6 +349,10 @@ autotools_do_configure() {
fi
}
+autotools_do_compile() {
+ oe_runmake
+}
+
autotools_do_install() {
oe_runmake 'DESTDIR=${D}' install
# Info dir listing isn't interesting at this point so remove it if it exists.
@@ -317,6 +363,6 @@ autotools_do_install() {
inherit siteconfig
-EXPORT_FUNCTIONS do_configure do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
B = "${WORKDIR}/build"
diff --git a/import-layers/yocto-poky/meta/classes/base.bbclass b/import-layers/yocto-poky/meta/classes/base.bbclass
index a7ca3a667..024fe4331 100644
--- a/import-layers/yocto-poky/meta/classes/base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/base.bbclass
@@ -10,7 +10,7 @@ inherit utility-tasks
inherit metadata_scm
inherit logging
-OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
OE_IMPORTS[type] = "list"
def oe_import(d):
@@ -105,12 +105,15 @@ def get_lic_checksum_file_list(d):
# any others should be covered by SRC_URI.
try:
path = bb.fetch.decodeurl(url)[2]
+ if not path:
+ raise bb.fetch.MalformedUrl(url)
+
if path[0] == '/':
if path.startswith(tmpdir):
continue
filelist.append(path + ":" + str(os.path.exists(path)))
except bb.fetch.MalformedUrl:
- raise bb.build.FuncFailed(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+ bb.fatal(d.getVar('PN', True) + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
return " ".join(filelist)
addtask fetch
@@ -128,30 +131,28 @@ python base_do_fetch() {
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ bb.fatal(str(e))
}
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
+
+python () {
+ if d.getVar('S', True) != d.getVar('WORKDIR', True):
+ d.setVarFlag('do_unpack', 'cleandirs', '${S}')
+ else:
+ d.setVarFlag('do_unpack', 'cleandirs', os.path.join('${S}', 'patches'))
+}
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
if len(src_uri) == 0:
return
- rootdir = d.getVar('WORKDIR', True)
-
- # Ensure that we cleanup ${S}/patches
- # TODO: Investigate if we can remove
- # the entire ${S} in this case.
- s_dir = d.getVar('S', True)
- p_dir = os.path.join(s_dir, 'patches')
- bb.utils.remove(p_dir, True)
-
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.unpack(rootdir)
+ fetcher.unpack(d.getVar('WORKDIR', True))
except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ bb.fatal(str(e))
}
def pkgarch_mapping(d):
@@ -308,7 +309,7 @@ base_do_compile() {
}
addtask install after do_compile
-do_install[dirs] = "${D} ${B}"
+do_install[dirs] = "${B}"
# Remove and re-create ${D} so that is it guaranteed to be empty
do_install[cleandirs] = "${D}"
@@ -430,12 +431,6 @@ python () {
appendVar('RDEPENDS_${PN}', extrardeps)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
- # TODO: once all recipes/classes abusing EXTRA_OECONF
- # to get PACKAGECONFIG options are fixed to use PACKAGECONFIG_CONFARGS
- # move this appendVar to autotools.bbclass.
- if not bb.data.inherits_class('cmake', d):
- appendVar('EXTRA_OECONF', extraconf)
-
pn = d.getVar('PN', True)
license = d.getVar('LICENSE', True)
if license == "INVALID":
@@ -477,7 +472,7 @@ python () {
else:
raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
- source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
+ source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
need_host = d.getVar('COMPATIBLE_HOST', True)
if need_host:
@@ -490,7 +485,7 @@ python () {
check_license = False if pn.startswith("nativesdk-") else True
for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
- "-crosssdk-${SDK_ARCH}", "-crosssdk-initial-${SDK_ARCH}",
+ "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
"-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
if pn.endswith(d.expand(t)):
check_license = False
@@ -542,6 +537,19 @@ python () {
if pn in incompatwl:
bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
+ # Try to verify per-package (LICENSE_<pkg>) values. LICENSE should be a
+ # superset of all per-package licenses. We do not do advanced (pattern)
+ # matching of license expressions - just check that all license strings
+ # in LICENSE_<pkg> are found in LICENSE.
+ license_set = oe.license.list_licenses(license)
+ for pkg in d.getVar('PACKAGES', True).split():
+ pkg_license = d.getVar('LICENSE_' + pkg, True)
+ if pkg_license:
+ unlisted = oe.license.list_licenses(pkg_license) - license_set
+ if unlisted:
+ bb.warn("LICENSE_%s includes licenses (%s) that are not "
+ "listed in LICENSE" % (pkg, ' '.join(unlisted)))
+
needsrcrev = False
srcuri = d.getVar('SRC_URI', True)
for uri in srcuri.split():
@@ -566,6 +574,10 @@ python () {
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
+ # Perforce packages support SRCREV = "${AUTOREV}"
+ elif scheme == "p4":
+ needsrcrev = True
+
# OSC packages should DEPEND on osc-native
elif scheme == "osc":
d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
@@ -658,8 +670,8 @@ python do_cleanall() {
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.clean()
- except bb.fetch2.BBFetchException, e:
- raise bb.build.FuncFailed(e)
+ except bb.fetch2.BBFetchException as e:
+ bb.fatal(str(e))
}
do_cleanall[nostamp] = "1"
diff --git a/import-layers/yocto-poky/meta/classes/bash-completion.bbclass b/import-layers/yocto-poky/meta/classes/bash-completion.bbclass
index 74a878edf..80ee9b487 100644
--- a/import-layers/yocto-poky/meta/classes/bash-completion.bbclass
+++ b/import-layers/yocto-poky/meta/classes/bash-completion.bbclass
@@ -1,3 +1,5 @@
+DEPENDS_append_class-target = " bash-completion"
+
PACKAGES += "${PN}-bash-completion"
FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
diff --git a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
index 581d53269..3a5bc2c3e 100644
--- a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
@@ -57,6 +57,9 @@ SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
# class.
BUILDHISTORY_PRESERVE = "latest latest_srcrev"
+PATCH_GIT_USER_EMAIL ?= "buildhistory@oe"
+PATCH_GIT_USER_NAME ?= "OpenEmbedded"
+
#
# Write out metadata about this package for comparison when writing future packages
#
@@ -145,7 +148,7 @@ python buildhistory_emit_pkghistory() {
elif name == "RCONFLICTS":
pkginfo.rconflicts = value
elif name == "PKGSIZE":
- pkginfo.size = long(value)
+ pkginfo.size = int(value)
elif name == "FILES":
pkginfo.files = value
elif name == "FILELIST":
@@ -233,7 +236,7 @@ python buildhistory_emit_pkghistory() {
key = item[0]
if key.endswith('_' + pkg):
key = key[:-len(pkg)-1]
- pkgdata[key] = item[1].decode('utf-8').decode('string_escape')
+ pkgdata[key] = item[1]
pkge = pkgdata.get('PKGE', '0')
pkgv = pkgdata['PKGV']
@@ -274,7 +277,7 @@ python buildhistory_emit_pkghistory() {
# Gather information about packaged files
val = pkgdata.get('FILES_INFO', '')
dictval = json.loads(val)
- filelist = dictval.keys()
+ filelist = list(dictval.keys())
filelist.sort()
pkginfo.filelist = " ".join(filelist)
@@ -288,14 +291,12 @@ python buildhistory_emit_pkghistory() {
def write_recipehistory(rcpinfo, d):
- import codecs
-
bb.debug(2, "Writing recipe history")
pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
infofile = os.path.join(pkghistdir, "latest")
- with codecs.open(infofile, "w", encoding='utf8') as f:
+ with open(infofile, "w") as f:
if rcpinfo.pe != "0":
f.write(u"PE = %s\n" % rcpinfo.pe)
f.write(u"PV = %s\n" % rcpinfo.pv)
@@ -305,8 +306,6 @@ def write_recipehistory(rcpinfo, d):
def write_pkghistory(pkginfo, d):
- import codecs
-
bb.debug(2, "Writing package history for package %s" % pkginfo.name)
pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
@@ -316,22 +315,20 @@ def write_pkghistory(pkginfo, d):
bb.utils.mkdirhier(pkgpath)
infofile = os.path.join(pkgpath, "latest")
- with codecs.open(infofile, "w", encoding='utf8') as f:
+ with open(infofile, "w") as f:
if pkginfo.pe != "0":
f.write(u"PE = %s\n" % pkginfo.pe)
f.write(u"PV = %s\n" % pkginfo.pv)
f.write(u"PR = %s\n" % pkginfo.pr)
- pkgvars = {}
- pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else ''
- pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else ''
- pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else ''
- pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else ''
- for pkgvar in pkgvars:
- val = pkgvars[pkgvar]
- if val:
- f.write(u"%s = %s\n" % (pkgvar, val))
-
+ if pkginfo.pkg != pkginfo.name:
+ f.write(u"PKG = %s\n" % pkginfo.pkg)
+ if pkginfo.pkge != pkginfo.pe:
+ f.write(u"PKGE = %s\n" % pkginfo.pkge)
+ if pkginfo.pkgv != pkginfo.pv:
+ f.write(u"PKGV = %s\n" % pkginfo.pkgv)
+ if pkginfo.pkgr != pkginfo.pr:
+ f.write(u"PKGR = %s\n" % pkginfo.pkgr)
f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides)
f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends)
f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends)
@@ -349,7 +346,7 @@ def write_pkghistory(pkginfo, d):
filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
val = pkginfo.filevars[filevar]
if val:
- with codecs.open(filevarpath, "w", encoding='utf8') as f:
+ with open(filevarpath, "w") as f:
f.write(val)
else:
if os.path.exists(filevarpath):
@@ -565,11 +562,11 @@ python buildhistory_get_extra_sdkinfo() {
tasksizes[task] = origtotal + fsize
filesizes[fn] = fsize
with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-package-sizes.txt'), 'w') as f:
- filesizes_sorted = sorted(filesizes.items(), key=operator.itemgetter(1), reverse=True)
+ filesizes_sorted = sorted(filesizes.items(), key=operator.itemgetter(1, 0), reverse=True)
for fn, size in filesizes_sorted:
f.write('%10d KiB %s\n' % (size, fn))
with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-task-sizes.txt'), 'w') as f:
- tasksizes_sorted = sorted(tasksizes.items(), key=operator.itemgetter(1), reverse=True)
+ tasksizes_sorted = sorted(tasksizes.items(), key=operator.itemgetter(1, 0), reverse=True)
for task, size in tasksizes_sorted:
f.write('%10d KiB %s\n' % (size, task))
}
@@ -645,7 +642,7 @@ def buildhistory_get_sdkvars(d):
sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
- sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS"
+ sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
return outputvars(sdkvars, listvars, d)
@@ -714,15 +711,9 @@ END
git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
git tag -f build-minus-1 > /dev/null 2>&1 || true
fi
- # If the user hasn't set up their name/email, set some defaults
- # just for this repo (otherwise the commit will fail with older
- # versions of git)
- if ! git config user.email > /dev/null ; then
- git config --local user.email "buildhistory@${DISTRO}"
- fi
- if ! git config user.name > /dev/null ; then
- git config --local user.name "buildhistory"
- fi
+
+ check_git_config
+
# Check if there are new/changed files to commit (other than metadata-revs)
repostatus=`git status --porcelain | grep -v " metadata-revs$"`
HOSTNAME=`hostname 2>/dev/null || echo unknown`
@@ -842,7 +833,7 @@ python write_srcrev() {
f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
else:
- f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next())
+ f.write('SRCREV = "%s"\n' % srcrevs.values())
if len(tag_srcrevs) > 0:
for name, srcrev in tag_srcrevs.items():
f.write('# tag_%s = "%s"\n' % (name, srcrev))
diff --git a/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass b/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
index d73350b94..b86abcc3f 100644
--- a/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildstats-summary.bbclass
@@ -30,7 +30,11 @@ python buildstats_summary () {
header_printed = True
bb.note("Build completion summary:")
- bb.note(" {0}: {1}% sstate reuse ({2} setscene, {3} scratch)".format(t, 100*len(sstate)/(len(sstate)+len(no_sstate)), len(sstate), len(no_sstate)))
+ sstate_count = len(sstate)
+ no_sstate_count = len(no_sstate)
+ total_count = sstate_count + no_sstate_count
+ bb.note(" {0}: {1:.1f}% sstate reuse({2} setscene, {3} scratch)".format(
+ t, round(100 * sstate_count / total_count, 1), sstate_count, no_sstate_count))
}
addhandler buildstats_summary
buildstats_summary[eventmask] = "bb.event.BuildCompleted"
diff --git a/import-layers/yocto-poky/meta/classes/buildstats.bbclass b/import-layers/yocto-poky/meta/classes/buildstats.bbclass
index 34ecb0386..599a21998 100644
--- a/import-layers/yocto-poky/meta/classes/buildstats.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildstats.bbclass
@@ -163,8 +163,13 @@ python run_buildstats () {
bs = os.path.join(bsdir, "build_stats")
with open(bs, "a") as f:
rootfs = d.getVar('IMAGE_ROOTFS', True)
- rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
- f.write("Uncompressed Rootfs size: %s" % rootfs_size)
+ if os.path.isdir(rootfs):
+ try:
+ rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
+ stderr=subprocess.STDOUT).decode('utf-8')
+ f.write("Uncompressed Rootfs size: %s" % rootfs_size)
+ except subprocess.CalledProcessError as err:
+ bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
elif isinstance(e, bb.build.TaskFailed):
# Can have a failure before TaskStarted so need to mkdir here too
diff --git a/import-layers/yocto-poky/meta/classes/ccache.bbclass b/import-layers/yocto-poky/meta/classes/ccache.bbclass
index 2cdce4693..2e9837cf0 100644
--- a/import-layers/yocto-poky/meta/classes/ccache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/ccache.bbclass
@@ -4,5 +4,3 @@ CCACHE_DISABLE[unexport] = "1"
do_configure[dirs] =+ "${CCACHE_DIR}"
do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
-
-do_clean[cleandirs] += "${CCACHE_DIR}"
diff --git a/import-layers/yocto-poky/meta/classes/chrpath.bbclass b/import-layers/yocto-poky/meta/classes/chrpath.bbclass
index 9c68855ab..3b5cd37f7 100644
--- a/import-layers/yocto-poky/meta/classes/chrpath.bbclass
+++ b/import-layers/yocto-poky/meta/classes/chrpath.bbclass
@@ -5,15 +5,17 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
import subprocess as sub
p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- err, out = p.communicate()
- # If returned successfully, process stderr for results
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
if p.returncode != 0:
return
+ out = out.decode('utf-8')
+
# Handle RUNPATH as well as RPATH
- err = err.replace("RUNPATH=","RPATH=")
+ out = out.replace("RUNPATH=","RPATH=")
# Throw away everything other than the rpath list
- curr_rpath = err.partition("RPATH=")[2]
+ curr_rpath = out.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
rpaths = curr_rpath.split(":")
new_rpaths = []
@@ -37,18 +39,17 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
if p.returncode != 0:
- bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
- raise bb.build.FuncFailed
+ bb.fatal("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
import subprocess as sub
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- err, out = p.communicate()
- # If returned successfully, process stderr for results
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
if p.returncode != 0:
return
- for l in err.split("\n"):
+ for l in out.split("\n"):
if "(compatibility" not in l:
continue
rpath = l.partition("(compatibility")[0].strip()
@@ -57,7 +58,7 @@ def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- err, out = p.communicate()
+ out, err = p.communicate()
def process_dir (rootdir, directory, d):
import stat
diff --git a/import-layers/yocto-poky/meta/classes/cmake.bbclass b/import-layers/yocto-poky/meta/classes/cmake.bbclass
index 02f313a86..3e762de6a 100644
--- a/import-layers/yocto-poky/meta/classes/cmake.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cmake.bbclass
@@ -84,6 +84,8 @@ EOF
addtask generate_toolchain_file after do_patch before do_configure
+CONFIGURE_FILES = "CMakeLists.txt"
+
cmake_do_configure() {
if [ "${OECMAKE_BUILDPATH}" ]; then
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
@@ -108,25 +110,27 @@ cmake_do_configure() {
${OECMAKE_SITEFILE} \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${libexecdir} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir', True), d.getVar('prefix', True))} \
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${sharedstatedir} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir', True), d. getVar('prefix', True))} \
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${libdir} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${includedir} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${datadir} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir', True), d.getVar('prefix', True))} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir', True), d.getVar('prefix', True))} \
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
-DCMAKE_VERBOSE_MAKEFILE=1 \
+ -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
${EXTRA_OECMAKE} \
-Wno-dev
}
+do_compile[progress] = "percent"
cmake_do_compile() {
cd ${B}
- base_do_compile
+ base_do_compile VERBOSE=1
}
cmake_do_install() {
diff --git a/import-layers/yocto-poky/meta/classes/cml1.bbclass b/import-layers/yocto-poky/meta/classes/cml1.bbclass
index b5dc028a2..583480626 100644
--- a/import-layers/yocto-poky/meta/classes/cml1.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cml1.bbclass
@@ -42,6 +42,7 @@ python do_menuconfig() {
}
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
do_menuconfig[nostamp] = "1"
+do_menuconfig[dirs] = "${B}"
addtask menuconfig after do_configure
python do_diffconfig() {
@@ -73,4 +74,5 @@ python do_diffconfig() {
}
do_diffconfig[nostamp] = "1"
+do_diffconfig[dirs] = "${B}"
addtask diffconfig
diff --git a/import-layers/yocto-poky/meta/classes/core-image.bbclass b/import-layers/yocto-poky/meta/classes/core-image.bbclass
index 705cad8d9..8431440db 100644
--- a/import-layers/yocto-poky/meta/classes/core-image.bbclass
+++ b/import-layers/yocto-poky/meta/classes/core-image.bbclass
@@ -2,9 +2,6 @@
#
# Copyright (C) 2007-2011 Linux Foundation
-LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d690 \
- file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
-
# IMAGE_FEATURES control content of the core reference images
#
# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
diff --git a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
index e07b1bdb6..21921b3dd 100644
--- a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -15,7 +15,8 @@ STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${S
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
-CANADIANEXTRAOS = "linux-uclibc linux-musl"
+BASECANADIANEXTRAOS ?= "linux-uclibc linux-musl"
+CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
CANADIANEXTRAVENDOR = ""
MODIFYTOS ??= "1"
python () {
@@ -34,8 +35,13 @@ python () {
tos = d.getVar("TARGET_OS", True)
whitelist = []
+ extralibcs = [""]
+ if "uclibc" in d.getVar("BASECANADIANEXTRAOS", True):
+ extralibcs.append("uclibc")
+ if "musl" in d.getVar("BASECANADIANEXTRAOS", True):
+ extralibcs.append("musl")
for variant in ["", "spe", "x32", "eabi", "n32"]:
- for libc in ["", "uclibc", "musl"]:
+ for libc in extralibcs:
entry = "linux"
if variant and libc:
entry = entry + "-" + libc + variant
@@ -59,15 +65,22 @@ python () {
if tarch == "x86_64":
d.setVar("LIBCEXTENSION", "")
d.setVar("ABIEXTENSION", "")
- d.appendVar("CANADIANEXTRAOS", " linux-gnux32 linux-uclibcx32 linux-muslx32")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
+ for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
elif tarch == "powerpc":
# PowerPC can build "linux" and "linux-gnuspe"
d.setVar("LIBCEXTENSION", "")
d.setVar("ABIEXTENSION", "")
- d.appendVar("CANADIANEXTRAOS", " linux-gnuspe linux-uclibcspe linux-muslspe")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
+ for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
elif tarch == "mips64":
- d.appendVar("CANADIANEXTRAOS", " linux-gnun32 linux-uclibcn32 linux-musln32")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
+ for extraos in d.getVar("BASECANADIANEXTRAOS", True).split():
+ d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
if tarch == "arm" or tarch == "armeb":
+ d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi linux-uclibceabi")
d.setVar("TARGET_OS", "linux-gnueabi")
else:
d.setVar("TARGET_OS", "linux")
diff --git a/import-layers/yocto-poky/meta/classes/cross.bbclass b/import-layers/yocto-poky/meta/classes/cross.bbclass
index 81d1c9d85..01b09337a 100644
--- a/import-layers/yocto-poky/meta/classes/cross.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross.bbclass
@@ -17,6 +17,8 @@ HOST_CC_ARCH = "${BUILD_CC_ARCH}"
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
+export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
+
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
PACKAGE_ARCH = "${BUILD_ARCH}"
@@ -68,4 +70,16 @@ do_install () {
USE_NLS = "no"
+export CC = "${BUILD_CC}"
+export CXX = "${BUILD_CXX}"
+export FC = "${BUILD_FC}"
+export CPP = "${BUILD_CPP}"
+export LD = "${BUILD_LD}"
+export CCLD = "${BUILD_CCLD}"
+export AR = "${BUILD_AR}"
+export AS = "${BUILD_AS}"
+export RANLIB = "${BUILD_RANLIB}"
+export STRIP = "${BUILD_STRIP}"
+export NM = "${BUILD_NM}"
+
inherit nopackages
diff --git a/import-layers/yocto-poky/meta/classes/cve-check.bbclass b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
new file mode 100644
index 000000000..1425a4055
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
@@ -0,0 +1,265 @@
+# This class is used to check recipes against public CVEs.
+#
+# In order to use this class just inherit the class in the
+# local.conf file and it will add the cve_check task for
+# every recipe. The task can be used per recipe, per image,
+# or using the special cases "world" and "universe". The
+# cve_check task will print a warning for every unpatched
+# CVE found and generate a file in the recipe WORKDIR/cve
+# directory. If an image is build it will generate a report
+# in DEPLOY_DIR_IMAGE for all the packages used.
+#
+# Example:
+# bitbake -c cve_check openssl
+# bitbake core-image-sato
+# bitbake -k -c cve_check universe
+#
+# DISCLAIMER
+#
+# This class/tool is meant to be used as support and not
+# the only method to check against CVEs. Running this tool
+# doesn't guarantee your packages are free of CVEs.
+
+CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvd.db"
+
+CVE_CHECK_LOCAL_DIR ?= "${WORKDIR}/cve"
+CVE_CHECK_LOCAL_FILE ?= "${CVE_CHECK_LOCAL_DIR}/cve.log"
+CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
+
+CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
+CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_COPY_FILES ??= "1"
+CVE_CHECK_CREATE_MANIFEST ??= "1"
+
+# Whitelist for packages (PN)
+CVE_CHECK_PN_WHITELIST = "\
+ glibc-locale \
+"
+
+# Whitelist for CVE and version of package
+CVE_CHECK_CVE_WHITELIST = "{\
+ 'CVE-2014-2524': ('6.3',), \
+}"
+
+python do_cve_check () {
+ """
+ Check recipe for patched and unpatched CVEs
+ """
+
+ if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE", True)):
+ patched_cves = get_patches_cves(d)
+ patched, unpatched = check_cves(d, patched_cves)
+ if patched or unpatched:
+ cve_data = get_cve_info(d, patched + unpatched)
+ cve_write_data(d, patched, unpatched, cve_data)
+ else:
+ bb.note("Failed to update CVE database, skipping CVE check")
+}
+
+addtask cve_check after do_unpack before do_build
+do_cve_check[depends] = "cve-check-tool-native:do_populate_cve_db"
+do_cve_check[nostamp] = "1"
+
+python cve_check_cleanup () {
+ """
+ Delete the file used to gather all the CVE information.
+ """
+
+ bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE", True))
+}
+
+addhandler cve_check_cleanup
+cve_check_cleanup[eventmask] = "bb.cooker.CookerExit"
+
+python cve_check_write_rootfs_manifest () {
+ """
+ Create CVE manifest when building an image
+ """
+
+ import shutil
+
+ if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE", True)):
+ bb.note("Writing rootfs CVE manifest")
+ deploy_dir = d.getVar("DEPLOY_DIR_IMAGE", True)
+ link_name = d.getVar("IMAGE_LINK_NAME", True)
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST", True)
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE", True)
+
+ shutil.copyfile(cve_tmp_file, manifest_name)
+
+ if manifest_name and os.path.exists(manifest_name):
+ manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
+ # If we already have another manifest, update symlinks
+ if os.path.exists(os.path.realpath(manifest_link)):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
+ bb.plain("Image CVE report stored in: %s" % manifest_name)
+}
+
+ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST', True) == '1' else ''}"
+
+def get_patches_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+
+ pn = d.getVar("PN", True)
+ cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+ patched_cves = set()
+ bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
+ for url in src_patches(d):
+ patch_file = bb.fetch.decodeurl(url)[2]
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for the "CVE: " line
+ match = cve_match.search(patch_text)
+ if match:
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ else:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ return patched_cves
+
+def check_cves(d, patched_cves):
+ """
+ Run cve-check-tool looking for patched and unpatched CVEs.
+ """
+
+ import ast, csv, tempfile, subprocess, io
+
+ cves_patched = []
+ cves_unpatched = []
+ bpn = d.getVar("BPN", True)
+ pv = d.getVar("PV", True).split("git+")[0]
+ cves = " ".join(patched_cves)
+ cve_db_dir = d.getVar("CVE_CHECK_DB_DIR", True)
+ cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST", True))
+ cve_cmd = "cve-check-tool"
+ cmd = [cve_cmd, "--no-html", "--csv", "--not-affected", "-t", "faux", "-d", cve_db_dir]
+
+ # If the recipe has been whitlisted we return empty lists
+ if d.getVar("PN", True) in d.getVar("CVE_CHECK_PN_WHITELIST", True).split():
+ bb.note("Recipe has been whitelisted, skipping check")
+ return ([], [])
+
+ # It is needed to export the proxies to download the database using HTTP
+ bb.utils.export_proxies(d)
+
+ try:
+ # Write the faux CSV file to be used with cve-check-tool
+ fd, faux = tempfile.mkstemp(prefix="cve-faux-")
+ with os.fdopen(fd, "w") as f:
+ f.write("%s,%s,%s," % (bpn, pv, cves))
+ cmd.append(faux)
+
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.debug(2, "Output of command %s:\n%s" % ("\n".join(cmd), output))
+ except subprocess.CalledProcessError as e:
+ bb.warn("Couldn't check for CVEs: %s (output %s)" % (e, e.output))
+ finally:
+ os.remove(faux)
+
+ for row in csv.reader(io.StringIO(output)):
+ # Third row has the unpatched CVEs
+ if row[2]:
+ for cve in row[2].split():
+ # Skip if the CVE has been whitlisted for the current version
+ if pv in cve_whitelist.get(cve,[]):
+ bb.note("%s-%s has been whitelisted for %s" % (bpn, pv, cve))
+ else:
+ cves_unpatched.append(cve)
+ bb.debug(2, "%s-%s is not patched for %s" % (bpn, pv, cve))
+ # Fourth row has patched CVEs
+ if row[3]:
+ for cve in row[3].split():
+ cves_patched.append(cve)
+ bb.debug(2, "%s-%s is patched for %s" % (bpn, pv, cve))
+
+ return (cves_patched, cves_unpatched)
+
+def get_cve_info(d, cves):
+ """
+ Get CVE information from the database used by cve-check-tool.
+
+ Unfortunately the only way to get CVE info is set the output to
+ html (hard to parse) or query directly the database.
+ """
+
+ try:
+ import sqlite3
+ except ImportError:
+ from pysqlite2 import dbapi2 as sqlite3
+
+ cve_data = {}
+ db_file = d.getVar("CVE_CHECK_DB_FILE", True)
+ placeholder = ",".join("?" * len(cves))
+ query = "SELECT * FROM NVD WHERE id IN (%s)" % placeholder
+ conn = sqlite3.connect(db_file)
+ cur = conn.cursor()
+ for row in cur.execute(query, tuple(cves)):
+ cve_data[row[0]] = {}
+ cve_data[row[0]]["summary"] = row[1]
+ cve_data[row[0]]["score"] = row[2]
+ cve_data[row[0]]["modified"] = row[3]
+ cve_data[row[0]]["vector"] = row[4]
+ conn.close()
+
+ return cve_data
+
+def cve_write_data(d, patched, unpatched, cve_data):
+ """
+ Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
+ CVE manifest if enabled.
+ """
+
+ cve_file = d.getVar("CVE_CHECK_LOCAL_FILE", True)
+ nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ write_string = ""
+ first_alert = True
+ bb.utils.mkdirhier(d.getVar("CVE_CHECK_LOCAL_DIR", True))
+
+ for cve in sorted(cve_data):
+ write_string += "PACKAGE NAME: %s\n" % d.getVar("PN", True)
+ write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV", True)
+ write_string += "CVE: %s\n" % cve
+ if cve in patched:
+ write_string += "CVE STATUS: Patched\n"
+ else:
+ write_string += "CVE STATUS: Unpatched\n"
+ if first_alert:
+ bb.warn("Found unpatched CVE, for more information check %s" % cve_file)
+ first_alert = False
+ write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
+ write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["score"]
+ write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
+ write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
+
+ with open(cve_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % cve_file)
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_COPY_FILES", True) == "1":
+ cve_dir = d.getVar("CVE_CHECK_DIR", True)
+ bb.utils.mkdirhier(cve_dir)
+ deploy_file = os.path.join(cve_dir, d.getVar("PN", True))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST", True) == "1":
+ with open(d.getVar("CVE_CHECK_TMP_FILE", True), "a") as f:
+ f.write("%s" % write_string)
diff --git a/import-layers/yocto-poky/meta/classes/devshell.bbclass b/import-layers/yocto-poky/meta/classes/devshell.bbclass
index 341d9c000..be71aff35 100644
--- a/import-layers/yocto-poky/meta/classes/devshell.bbclass
+++ b/import-layers/yocto-poky/meta/classes/devshell.bbclass
@@ -65,9 +65,6 @@ def devpyshell(d):
os.dup2(m, sys.stdout.fileno())
os.dup2(m, sys.stderr.fileno())
- sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
- sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0)
-
bb.utils.nonblockingfd(sys.stdout)
bb.utils.nonblockingfd(sys.stderr)
bb.utils.nonblockingfd(sys.stdin)
@@ -93,6 +90,7 @@ def devpyshell(d):
else:
prompt = ps1
sys.stdout.write(prompt)
+ sys.stdout.flush()
# Restore Ctrl+C since bitbake masks this
def signal_handler(signal, frame):
@@ -114,6 +112,7 @@ def devpyshell(d):
continue
except EOFError as e:
sys.stdout.write("\n")
+ sys.stdout.flush()
except (OSError, IOError) as e:
if e.errno == 11:
continue
diff --git a/import-layers/yocto-poky/meta/classes/distrodata.bbclass b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
index 51bfc1e54..fbb7402e0 100644
--- a/import-layers/yocto-poky/meta/classes/distrodata.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
@@ -1,4 +1,3 @@
-include conf/distro/include/package_regex.inc
include conf/distro/include/upstream_tracking.inc
include conf/distro/include/distro_alias.inc
include conf/distro/include/maintainers.inc
diff --git a/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass
index 08511f59c..824a1b68b 100644
--- a/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils-common-base.bbclass
@@ -1,7 +1,3 @@
-inherit python-dir
-
-EXTRA_OEMAKE = ""
-
export STAGING_INCDIR
export STAGING_LIBDIR
diff --git a/import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass
deleted file mode 100644
index 509cb9551..000000000
--- a/import-layers/yocto-poky/meta/classes/distutils-native-base.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
-inherit distutils-common-base
-
-DEPENDS += "${@["${PYTHON_PN}-native", ""][(d.getVar('PACKAGES', True) == '')]}"
diff --git a/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass b/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
index 8d9b3f78f..3ef9cc5a7 100644
--- a/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils-tools.bbclass
@@ -8,14 +8,12 @@ DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
distutils_do_compile() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
}
distutils_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
}
@@ -25,7 +23,6 @@ distutils_stage_all() {
STAGING_LIBDIR=${STAGING_LIBDIR} \
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
}
@@ -37,7 +34,6 @@ distutils_do_install() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install execution failed."
diff --git a/import-layers/yocto-poky/meta/classes/distutils.bbclass b/import-layers/yocto-poky/meta/classes/distutils.bbclass
index da48a2ed5..857572d75 100644
--- a/import-layers/yocto-poky/meta/classes/distutils.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils.bbclass
@@ -10,14 +10,12 @@ DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
distutils_do_compile() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py build execution failed."
}
distutils_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
}
@@ -27,7 +25,6 @@ distutils_stage_all() {
STAGING_LIBDIR=${STAGING_LIBDIR} \
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
}
@@ -37,7 +34,6 @@ distutils_do_install() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install execution failed."
diff --git a/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
index 2a093d3a8..82ab6a3d1 100644
--- a/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils3-base.bbclass
@@ -1,8 +1,5 @@
DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-PYTHON_BASEVERSION = "3.5"
-PYTHON_ABI = "m"
-
inherit distutils-common-base python3native
diff --git a/import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass b/import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass
deleted file mode 100644
index db9a1a73c..000000000
--- a/import-layers/yocto-poky/meta/classes/distutils3-native-base.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-PYTHON_BASEVERSION = "3.5"
-PYTHON_ABI = "m"
-
-inherit distutils-native-base
diff --git a/import-layers/yocto-poky/meta/classes/distutils3.bbclass b/import-layers/yocto-poky/meta/classes/distutils3.bbclass
index 4f6ca4482..a6720c5b6 100644
--- a/import-layers/yocto-poky/meta/classes/distutils3.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distutils3.bbclass
@@ -9,14 +9,8 @@ DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
--install-data=${D}/${datadir}"
distutils3_do_compile() {
- if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
- SYS=${MACHINE}
- else
- SYS=${HOST_SYS}
- fi
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
build ${DISTUTILS_BUILD_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
@@ -25,28 +19,16 @@ distutils3_do_compile[vardepsexclude] = "MACHINE"
distutils3_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
- SYS=${MACHINE}
- else
- SYS=${HOST_SYS}
- fi
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
}
distutils3_stage_headers[vardepsexclude] = "MACHINE"
distutils3_stage_all() {
- if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
- SYS=${MACHINE}
- else
- SYS=${HOST_SYS}
- fi
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
}
@@ -54,15 +36,9 @@ distutils3_stage_all[vardepsexclude] = "MACHINE"
distutils3_do_install() {
install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
- SYS=${MACHINE}
- else
- SYS=${HOST_SYS}
- fi
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
bbfatal_log "${PYTHON_PN} setup.py install execution failed."
diff --git a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
index da7eb4781..31908c3ca 100644
--- a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
@@ -29,6 +29,23 @@ EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
python () {
externalsrc = d.getVar('EXTERNALSRC', True)
+
+ # If this is the base recipe and EXTERNALSRC is set for it or any of its
+ # derivatives, then enable BB_DONT_CACHE to force the recipe to always be
+ # re-parsed so that the file-checksums function for do_compile is run every
+ # time.
+ bpn = d.getVar('BPN', True)
+ if bpn == d.getVar('PN', True):
+ classextend = (d.getVar('BBCLASSEXTEND', True) or '').split()
+ if (externalsrc or
+ ('native' in classextend and
+ d.getVar('EXTERNALSRC_pn-%s-native' % bpn, True)) or
+ ('nativesdk' in classextend and
+ d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn, True)) or
+ ('cross' in classextend and
+ d.getVar('EXTERNALSRC_pn-%s-cross' % bpn, True))):
+ d.setVar('BB_DONT_CACHE', '1')
+
if externalsrc:
d.setVar('S', externalsrc)
externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
@@ -85,10 +102,8 @@ python () {
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
- # Force the recipe to be always re-parsed so that the file_checksums
- # function is run every time
- d.setVar('BB_DONT_CACHE', '1')
d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
+ d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
# We don't want the workdir to go away
d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
@@ -145,10 +160,31 @@ def srctree_hash_files(d):
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '.'], cwd=s_dir, env=env)
- sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env)
+ sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
else:
ret = d.getVar('EXTERNALSRC', True) + '/*:True'
return ret
+
+def srctree_configure_hash_files(d):
+ """
+ Get the list of files that should trigger do_configure to re-execute,
+ based on the value of CONFIGURE_FILES
+ """
+ in_files = (d.getVar('CONFIGURE_FILES', True) or '').split()
+ out_items = []
+ search_files = []
+ for entry in in_files:
+ if entry.startswith('/'):
+ out_items.append('%s:%s' % (entry, os.path.exists(entry)))
+ else:
+ search_files.append(entry)
+ if search_files:
+ s_dir = d.getVar('EXTERNALSRC', True)
+ for root, _, files in os.walk(s_dir):
+ for f in files:
+ if f in search_files:
+ out_items.append('%s:True' % os.path.join(root, f))
+ return ' '.join(out_items)
diff --git a/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass b/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass
index b1bdd268e..2ef684626 100644
--- a/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gobject-introspection-data.bbclass
@@ -3,7 +3,5 @@
#
# It should be used in recipes to determine whether introspection data should be built,
# so that qemu use can be avoided when necessary.
-GI_DATA_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
+GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
-
-
diff --git a/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass b/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
index 2d73e402c..37389cbc8 100644
--- a/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gobject-introspection.bbclass
@@ -1,22 +1,28 @@
# Inherit this class in recipes to enable building their introspection files
-# This sets up autoconf-based recipes to build introspection data (or not),
+# python3native is inherited to prevent introspection tools being run with
+# host's python 3 (they need to be run with native python 3)
+#
+# This also sets up autoconf-based recipes to build introspection data (or not),
# depending on distro and machine features (see gobject-introspection-data class).
-inherit gobject-introspection-data
-EXTRA_OECONF_prepend = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+inherit python3native gobject-introspection-data
+EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+
+# When building native recipes, disable introspection, as it is not necessary,
+# pulls in additional dependencies, and makes build times longer
+EXTRA_OECONF_prepend_class-native = "--disable-introspection "
+EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS_append = " gobject-introspection gobject-introspection-native qemu-native"
-
-# This is necessary for python scripts to succeed - distutils fails if these
-# are not set
-export BUILD_SYS
-export HOST_SYS
-export STAGING_INCDIR
-export STAGING_LIBDIR
+DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native"
+
+# Even though introspection is disabled on -native, gobject-introspection package is still
+# needed for m4 macros.
+DEPENDS_append_class-native = " gobject-introspection-native"
+DEPENDS_append_class-nativesdk = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
export XDG_DATA_DIRS = "${STAGING_DATADIR}"
diff --git a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
index 4ce3d2844..17417ba5d 100644
--- a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
+++ b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
@@ -45,6 +45,8 @@ efi_populate() {
GRUB_IMAGE="bootx64.efi"
fi
install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
}
@@ -57,7 +59,7 @@ efi_iso_populate() {
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
cp $iso_dir/vmlinuz ${EFIIMGDIR}
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
+ printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" > ${EFIIMGDIR}/startup.nsh
if [ -f "$iso_dir/initrd" ] ; then
cp $iso_dir/initrd ${EFIIMGDIR}
fi
@@ -88,12 +90,12 @@ python build_efi_cfg() {
cfile = d.getVar('GRUB_CFG', True)
if not cfile:
- raise bb.build.FuncFailed('Unable to read GRUB_CFG')
+ bb.fatal('Unable to read GRUB_CFG')
try:
- cfgfile = file(cfile, 'w')
+ cfgfile = open(cfile, 'w')
except OSError:
- raise bb.build.funcFailed('Unable to open %s' % (cfile))
+ bb.fatal('Unable to open %s' % cfile)
cfgfile.write('# Automatically created by OE\n')
@@ -112,7 +114,7 @@ python build_efi_cfg() {
root = d.getVar('GRUB_ROOT', True)
if not root:
- raise bb.build.FuncFailed('GRUB_ROOT not defined')
+ bb.fatal('GRUB_ROOT not defined')
if gfxserial == "1":
btypes = [ [ " graphics console", "" ],
@@ -125,7 +127,7 @@ python build_efi_cfg() {
overrides = localdata.getVar('OVERRIDES', True)
if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
+ bb.fatal('OVERRIDES not defined')
for btype in btypes:
localdata.setVar('OVERRIDES', label + ':' + overrides)
@@ -144,7 +146,8 @@ python build_efi_cfg() {
if append:
append = replace_rootfs_uuid(d, append)
- cfgfile.write('%s' % (append))
+ cfgfile.write(' %s' % (append))
+
cfgfile.write(' %s' % btype[1])
cfgfile.write('\n')
diff --git a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
index e32f98dcf..297eac63b 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
@@ -1,25 +1,69 @@
-# Helper class to pull in the right gtk-doc dependencies and disable
-# gtk-doc.
+# Helper class to pull in the right gtk-doc dependencies and configure
+# gtk-doc to enable or disable documentation building (which requries the
+# use of usermode qemu).
+
+# This variable is set to True if api-documentation is in
+# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
#
-# Long-term it would be great if this class could be toggled between
-# gtk-doc-stub-native and the real gtk-doc-native, which would enable
-# re-generation of documentation. For now, we'll make do with this which
-# packages up any existing documentation (so from tarball builds).
+# It should be used in recipes to determine whether gtk-doc based documentation should be built,
+# so that qemu use can be avoided when necessary.
+GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
+ bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+
+EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
+ '--disable-gtk-doc', d)} "
+
+# When building native recipes, disable gtkdoc, as it is not necessary,
+# pulls in additional dependencies, and makes build times longer
+EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
+EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
+
+DEPENDS_append_class-target = " gtk-doc-native qemu-native"
+
+# Even though gtkdoc is disabled on -native, gtk-doc package is still
+# needed for m4 macros.
+DEPENDS_append_class-native = " gtk-doc-native"
+DEPENDS_append_class-nativesdk = " gtk-doc-native"
# The documentation directory, where the infrastructure will be copied.
# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
GTKDOC_DOCDIR ?= "${S}"
-DEPENDS_append = " gtk-doc-stub-native"
+do_configure_prepend () {
+ ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} || true )
+}
+
+inherit qemu
-EXTRA_OECONF_append = "\
- --disable-gtk-doc \
- --disable-gtk-doc-html \
- --disable-gtk-doc-pdf \
-"
+export STAGING_DIR_HOST
-do_configure_prepend () {
- ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} )
+do_compile_prepend_class-target () {
+
+ # Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
+ # can run target helper binaries through that.
+ qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
+ cat > ${B}/gtkdoc-qemuwrapper << EOF
+#!/bin/sh
+# Use a modules directory which doesn't exist so we don't load random things
+# which may then get deleted (or their dependencies) and potentially segfault
+export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
+
+GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
+
+if test -d ".libs"; then
+ $qemu_binary ".libs/\$@"
+else
+ $qemu_binary "\$@"
+fi
+
+if [ \$? -ne 0 ]; then
+ echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help."
+ echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )"
+ exit 1
+fi
+EOF
+ chmod +x ${B}/gtkdoc-qemuwrapper
}
+
inherit pkgconfig
diff --git a/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass b/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
index c099cd38e..ebbc9dea8 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
@@ -15,7 +15,8 @@ if [ "x$D" != "x" ]; then
${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
$IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
- elif [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
+ fi
+ if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
$IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
@@ -40,7 +41,8 @@ if [ "x$D" != "x" ]; then
${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
$IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
- elif [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
+ fi
+ if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
$IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
@@ -82,6 +84,6 @@ python __anonymous() {
gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
if not gtkimmodules_check:
bb_filename = d.getVar('FILE', False)
- raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
+ bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
}
diff --git a/import-layers/yocto-poky/meta/classes/gummiboot.bbclass b/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
index 1ebb9462d..4f2dea6c3 100644
--- a/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gummiboot.bbclass
@@ -34,6 +34,8 @@ efi_populate() {
install -d ${DEST}/loader
install -d ${DEST}/loader/entries
install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf
for i in ${GUMMIBOOT_ENTRIES}; do
install -m 0644 ${i} ${DEST}/loader/entries
@@ -72,7 +74,7 @@ python build_efi_cfg() {
try:
cfgfile = open(cfile, 'w')
except OSError:
- raise bb.build.funcFailed('Unable to open %s' % (cfile))
+ bb.fatal('Unable to open %s' % cfile)
cfgfile.write('# Automatically created by OE\n')
cfgfile.write('default %s\n' % (labels.split()[0]))
@@ -88,14 +90,14 @@ python build_efi_cfg() {
overrides = localdata.getVar('OVERRIDES', True)
if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
+ bb.fatal('OVERRIDES not defined')
entryfile = "%s/%s.conf" % (s, label)
d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile)
try:
entrycfg = open(entryfile, "w")
except OSError:
- raise bb.build.funcFailed('Unable to open %s' % (entryfile))
+ bb.fatal('Unable to open %s' % entryfile)
localdata.setVar('OVERRIDES', label + ':' + overrides)
bb.data.update_data(localdata)
diff --git a/import-layers/yocto-poky/meta/classes/icecc.bbclass b/import-layers/yocto-poky/meta/classes/icecc.bbclass
index e1c06c49c..a83789415 100644
--- a/import-layers/yocto-poky/meta/classes/icecc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/icecc.bbclass
@@ -47,7 +47,8 @@ def get_cross_kernel_cc(bb,d):
# evaluate the expression by the shell if necessary
if '`' in kernel_cc or '$(' in kernel_cc:
- kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1]
+ import subprocess
+ kernel_cc = subprocess.check_output("echo %s" % kernel_cc, shell=True).decode("utf-8")[:-1]
kernel_cc = d.expand(kernel_cc)
kernel_cc = kernel_cc.replace('ccache', '').strip()
@@ -220,9 +221,14 @@ def icecc_get_and_check_tool(bb, d, tool):
# PATH or icecc-create-env script will silently create an invalid
# compiler environment package.
t = icecc_get_tool(bb, d, tool)
- if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
- bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
- return ""
+ if t:
+ import subprocess
+ link_path = subprocess.check_output("readlink -f %s" % t, shell=True).decode("utf-8")[:-1]
+ if link_path == get_icecc(d):
+ bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
+ return ""
+ else:
+ return t
else:
return t
diff --git a/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass b/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
index 197b24235..3003f5d25 100644
--- a/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-buildinfo.bbclass
@@ -32,7 +32,7 @@ def get_layer_git_status(path):
shell=True,
stderr=subprocess.STDOUT)
return ""
- except subprocess.CalledProcessError, ex:
+ except subprocess.CalledProcessError as ex:
# Silently treat errors as "modified", without checking for the
# (expected) return code 1 in a modified git repo. For example, we get
# output and a 129 return code when a layer isn't a git repo at all.
@@ -71,7 +71,9 @@ Build Configuration: |
Layer Revisions: |
-----------------------
''',
- get_layer_revs(d)
+ get_layer_revs(d),
+ '''
+'''
))
}
diff --git a/import-layers/yocto-poky/meta/classes/image-live.bbclass b/import-layers/yocto-poky/meta/classes/image-live.bbclass
index c8a861060..4a634dca9 100644
--- a/import-layers/yocto-poky/meta/classes/image-live.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-live.bbclass
@@ -43,7 +43,7 @@ ROOT_LIVE ?= "root=/dev/ram0"
INITRD_IMAGE_LIVE ?= "core-image-minimal-initramfs"
INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
-ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.ext4"
+ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.ext4"
IMAGE_TYPEDEP_live = "ext4"
IMAGE_TYPEDEP_iso = "ext4"
@@ -56,7 +56,7 @@ python() {
if image_b == initrd_i:
bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
- else:
+ elif initrd_i:
d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
}
@@ -144,14 +144,14 @@ build_iso() {
if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
# PCBIOS only media
mkisofs -V ${BOOTIMG_VOLUME_ID} \
- -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+ -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
$mkisofs_compress_opts \
${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
else
# EFI only OR EFI+PCBIOS
mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
- -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+ -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
$mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
-eltorito-alt-boot -eltorito-platform efi \
@@ -160,7 +160,7 @@ build_iso() {
isohybrid_args="-u"
fi
- isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
+ isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
}
build_fat_img() {
@@ -202,12 +202,6 @@ build_fat_img() {
# Determine the final size in blocks accounting for some padding
BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
- # Ensure total sectors is an integral number of sectors per
- # track or mcopy will complain. Sectors are 512 bytes, and we
- # generate images with 32 sectors per track. This calculation is
- # done in blocks, thus the mod by 16 instead of 32.
- BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
-
# mkdosfs will sometimes use FAT16 when it is not appropriate,
# resulting in a boot failure from SYSLINUX. Use FAT32 for
# images larger than 512MB, otherwise let mkdosfs decide.
@@ -258,13 +252,13 @@ build_hddimg() {
fi
fi
- build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+ build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
if [ "${PCBIOS}" = "1" ]; then
syslinux_hddimg_install
fi
- chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+ chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
fi
}
diff --git a/import-layers/yocto-poky/meta/classes/image-swab.bbclass b/import-layers/yocto-poky/meta/classes/image-swab.bbclass
deleted file mode 100644
index 6b02cadaf..000000000
--- a/import-layers/yocto-poky/meta/classes/image-swab.bbclass
+++ /dev/null
@@ -1,94 +0,0 @@
-HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
-SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
-SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
-TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
-TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
-
-SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
-BB_DEFAULT_TASK = "generate_swabber_report"
-
-# Several recipes don't build with parallel make when run under strace
-# Ideally these should be fixed but as a temporary measure disable parallel
-# builds for troublesome recipes
-PARALLEL_MAKE_pn-openssl = ""
-PARALLEL_MAKE_pn-glibc = ""
-PARALLEL_MAKE_pn-glib-2.0 = ""
-PARALLEL_MAKE_pn-libxml2 = ""
-PARALLEL_MAKE_pn-readline = ""
-PARALLEL_MAKE_pn-util-linux = ""
-PARALLEL_MAKE_pn-binutils = ""
-PARALLEL_MAKE_pn-bison = ""
-PARALLEL_MAKE_pn-cmake = ""
-PARALLEL_MAKE_pn-elfutils = ""
-PARALLEL_MAKE_pn-gcc = ""
-PARALLEL_MAKE_pn-gcc-runtime = ""
-PARALLEL_MAKE_pn-m4 = ""
-PARALLEL_MAKE_pn-opkg = ""
-PARALLEL_MAKE_pn-pkgconfig = ""
-PARALLEL_MAKE_pn-prelink = ""
-PARALLEL_MAKE_pn-rpm = ""
-PARALLEL_MAKE_pn-tcl = ""
-PARALLEL_MAKE_pn-beecrypt = ""
-PARALLEL_MAKE_pn-curl = ""
-PARALLEL_MAKE_pn-gmp = ""
-PARALLEL_MAKE_pn-libmpc = ""
-PARALLEL_MAKE_pn-libxslt = ""
-PARALLEL_MAKE_pn-lzo = ""
-PARALLEL_MAKE_pn-popt = ""
-PARALLEL_MAKE_pn-linux-wrs = ""
-PARALLEL_MAKE_pn-libgcrypt = ""
-PARALLEL_MAKE_pn-gpgme = ""
-PARALLEL_MAKE_pn-udev = ""
-PARALLEL_MAKE_pn-gnutls = ""
-
-python() {
- # NOTE: It might be useful to detect host infection on native and cross
- # packages but as it turns out to be pretty hard to do this for all native
- # and cross packages which aren't swabber-native or one of its dependencies
- # I have ignored them for now...
- if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
- deps = (d.getVarFlag('do_setscene', 'depends', True) or "").split()
- deps.append('strace-native:do_populate_sysroot')
- d.setVarFlag('do_setscene', 'depends', " ".join(deps))
- logdir = d.expand("${TRACE_LOGDIR}")
- bb.utils.mkdirhier(logdir)
- else:
- d.setVar('STRACEFUNC', '')
-}
-
-STRACEPID = "${@os.getpid()}"
-STRACEFUNC = "imageswab_attachstrace"
-
-do_configure[prefuncs] += "${STRACEFUNC}"
-do_compile[prefuncs] += "${STRACEFUNC}"
-
-imageswab_attachstrace () {
- STRACE=`which strace`
-
- if [ -x "$STRACE" ]; then
- swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
- fi
-}
-
-do_generate_swabber_report () {
-
- update_distro ${HOST_DATA}
-
- # Swabber can't create the directory for us
- mkdir -p ${SWABBER_REPORT}
-
- REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
-
- if [ `which ccache` ] ; then
- CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
- fi
-
- if [ "$(ls -A ${HOST_DATA})" ]; then
- echo "Generating swabber report"
- swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
- else
- echo "No host data, cannot generate swabber report."
- fi
-}
-addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
-do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/image-vm.bbclass b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
index 47f73261f..2f35d6b4d 100644
--- a/import-layers/yocto-poky/meta/classes/image-vm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
@@ -23,23 +23,24 @@ do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
parted-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
- ${PN}:do_image_ext4 \
+ ${PN}:do_image_${VM_ROOTFS_TYPE} \
"
-IMAGE_TYPEDEP_vmdk = "ext4"
-IMAGE_TYPEDEP_vdi = "ext4"
-IMAGE_TYPEDEP_qcow2 = "ext4"
-IMAGE_TYPEDEP_hdddirect = "ext4"
+IMAGE_TYPEDEP_vmdk = "${VM_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_vdi = "${VM_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_qcow2 = "${VM_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_hdddirect = "${VM_ROOTFS_TYPE}"
IMAGE_TYPES_MASKED += "vmdk vdi qcow2 hdddirect"
-ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.ext4"
+VM_ROOTFS_TYPE ?= "ext4"
+ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${VM_ROOTFS_TYPE}"
# Used by bootloader
LABELS_VM ?= "boot"
ROOT_VM ?= "root=/dev/sda2"
# Using an initramfs is optional. Enable it by setting INITRD_IMAGE_VM.
INITRD_IMAGE_VM ?= ""
-INITRD_VM ?= "${@'${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_VM}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE_VM}' else ''}"
+INITRD_VM ?= "${@'${IMGDEPLOYDIR}/${INITRD_IMAGE_VM}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE_VM}' else ''}"
do_bootdirectdisk[depends] += "${@'${INITRD_IMAGE_VM}:do_image_complete' if '${INITRD_IMAGE_VM}' else ''}"
BOOTDD_VOLUME_ID ?= "boot"
@@ -51,7 +52,7 @@ DISK_SIGNATURE[vardepsexclude] = "DISK_SIGNATURE_GENERATED"
build_boot_dd() {
HDDDIR="${S}/hdd/boot"
HDDIMG="${S}/hdd.image"
- IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
+ IMAGE=${IMGDEPLOYDIR}/${IMAGE_NAME}.hdddirect
populate_kernel $HDDDIR
@@ -65,12 +66,6 @@ build_boot_dd() {
BLOCKS=`du -bks $HDDDIR | cut -f 1`
BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
- # Ensure total sectors is an integral number of sectors per
- # track or mcopy will complain. Sectors are 512 bytes, and we
- # generate images with 32 sectors per track. This calculation is
- # done in blocks, thus the mod by 16 instead of 32.
- BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
-
# Remove it since mkdosfs would fail when it exists
rm -f $HDDIMG
mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
@@ -109,9 +104,9 @@ build_boot_dd() {
dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
- cd ${DEPLOY_DIR_IMAGE}
- rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
- ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+ cd ${IMGDEPLOYDIR}
+
+ ln -sf ${IMAGE_NAME}.hdddirect ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect
}
python do_bootdirectdisk() {
@@ -146,8 +141,9 @@ DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
run_qemu_img (){
type="$1"
- qemu-img convert -O $type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.$type
- ln -sf ${IMAGE_NAME}.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type
+ qemu-img convert -O $type ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect ${IMGDEPLOYDIR}/${IMAGE_NAME}.$type
+
+ ln -sf ${IMAGE_NAME}.$type ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.$type
}
create_vmdk_image () {
run_qemu_img vmdk
diff --git a/import-layers/yocto-poky/meta/classes/image.bbclass b/import-layers/yocto-poky/meta/classes/image.bbclass
index 8bfd24193..6111f6d26 100644
--- a/import-layers/yocto-poky/meta/classes/image.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image.bbclass
@@ -74,6 +74,8 @@ IMAGE_INSTALL[type] = "list"
export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
+IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
+
# Images are generally built explicitly, do not need to be part of world.
EXCLUDE_FROM_WORLD = "1"
@@ -114,11 +116,11 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
- variables = ['IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+ variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
- 'COMPRESSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED']
+ 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY']
variables.extend(rootfs_command_variables(d))
variables.extend(variable_depends(d))
return " ".join(variables)
@@ -166,7 +168,7 @@ python () {
if temp:
bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
- d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features)))
+ d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
check_image_features(d)
initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
@@ -198,6 +200,17 @@ fakeroot python do_rootfs () {
from oe.rootfs import create_rootfs
from oe.manifest import create_manifest
+ # NOTE: if you add, remove or significantly refactor the stages of this
+ # process then you should recalculate the weightings here. This is quite
+ # easy to do - just change the MultiStageProgressReporter line temporarily
+ # to pass debug=True as the last parameter and you'll get a printout of
+ # the weightings as well as a map to the lines where next_stage() was
+ # called. Of course this isn't critical, but it helps to keep the progress
+ # reporting accurate.
+ stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1]
+ progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights)
+ progress_reporter.next_stage()
+
# Handle package exclusions
excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
@@ -230,11 +243,15 @@ fakeroot python do_rootfs () {
# Generate the initial manifest
create_manifest(d)
- # Generate rootfs
- create_rootfs(d)
+ progress_reporter.next_stage()
+
+ # generate rootfs
+ create_rootfs(d, progress_reporter=progress_reporter)
+
+ progress_reporter.finish()
}
do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[cleandirs] += "${S}"
+do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
do_rootfs[umask] = "022"
addtask rootfs before do_build
@@ -258,8 +275,43 @@ fakeroot python do_image_complete () {
}
do_image_complete[dirs] = "${TOPDIR}"
do_image_complete[umask] = "022"
+SSTATETASKS += "do_image_complete"
+SSTATE_SKIP_CREATION_task-image-complete = '1'
+do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
+do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
+do_image_complete[stamp-extra-info] = "${MACHINE}"
addtask do_image_complete after do_image before do_build
+# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
+#
+# IMAGE_QA_COMMANDS += " \
+# image_check_everything_ok \
+# "
+# This task runs all functions in IMAGE_QA_COMMANDS after the image
+# construction has completed in order to validate the resulting image.
+fakeroot python do_image_qa () {
+ from oe.utils import ImageQAFailed
+
+ qa_cmds = (d.getVar('IMAGE_QA_COMMANDS', True) or '').split()
+ qamsg = ""
+
+ for cmd in qa_cmds:
+ try:
+ bb.build.exec_func(cmd, d)
+ except oe.utils.ImageQAFailed as e:
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
+ except bb.build.FuncFailed as e:
+ qamsg = qamsg + '\tImage QA function %s failed' % e.name
+ if e.logfile:
+ qamsg = qamsg + ' (log file is located at %s)' % e.logfile
+ qamsg = qamsg + '\n'
+
+ if qamsg:
+ imgname = d.getVar('IMAGE_NAME', True)
+ bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
+}
+addtask do_image_qa after do_image_complete before do_build
+
#
# Write environment variables used by wic
# to tmp/sysroots/<machine>/imgdata/<image>.env
@@ -287,6 +339,7 @@ def setup_debugfs_variables(d):
d.appendVar('IMAGE_ROOTFS', '-dbg')
d.appendVar('IMAGE_LINK_NAME', '-dbg')
d.appendVar('IMAGE_NAME','-dbg')
+ d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True)
if debugfs_image_fstypes:
d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
@@ -297,8 +350,16 @@ python setup_debugfs () {
python () {
vardeps = set()
- ctypes = d.getVar('COMPRESSIONTYPES', True).split()
- old_overrides = d.getVar('OVERRIDES', 0)
+ # We allow CONVERSIONTYPES to have duplicates. That avoids breaking
+ # derived distros when OE-core or some other layer independently adds
+ # the same type. There is still only one command for each type, but
+ # presumably the commands will do the same when the type is the same,
+ # even when added in different places.
+ #
+ # Without de-duplication, gen_conversion_cmds() below
+ # would create the same compression command multiple times.
+ ctypes = set(d.getVar('CONVERSIONTYPES', True).split())
+ old_overrides = d.getVar('OVERRIDES', False)
def _image_base_type(type):
basetype = type
@@ -354,6 +415,7 @@ python () {
d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
maskedtypes = (d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
+ maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
for t in basetypes:
vardeps = set()
@@ -386,21 +448,30 @@ python () {
cmds.append("\t" + image_cmd)
else:
bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
- cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
+ cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
+
+ # Since a copy of IMAGE_CMD_xxx will be inlined within do_image_xxx,
+ # prevent a redundant copy of IMAGE_CMD_xxx being emitted as a function.
+ d.delVarFlag('IMAGE_CMD_' + realt, 'func')
rm_tmp_images = set()
def gen_conversion_cmds(bt):
for ctype in ctypes:
- if bt.endswith("." + ctype):
+ if bt[bt.find('.') + 1:] == ctype:
type = bt[0:-len(ctype) - 1]
if type.startswith("debugfs_"):
type = type[8:]
# Create input image first.
gen_conversion_cmds(type)
localdata.setVar('type', type)
- cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
+ cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype, True) or localdata.getVar("COMPRESS_CMD_" + ctype, True))
+ if cmd not in cmds:
+ cmds.append(cmd)
+ vardeps.add('CONVERSION_CMD_' + ctype)
vardeps.add('COMPRESS_CMD_' + ctype)
- subimages.append(type + "." + ctype)
+ subimage = type + "." + ctype
+ if subimage not in subimages:
+ subimages.append(subimage)
if type not in alltypes:
rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
@@ -467,6 +538,12 @@ def get_rootfs_size(d):
base_size += rootfs_alignment - 1
base_size -= base_size % rootfs_alignment
+ # Do not check image size of the debugfs image. This is not supposed
+ # to be deployed, etc. so it doesn't make sense to limit the size
+ # of the debug.
+ if (d.getVar('IMAGE_BUILDING_DEBUGFS', True) or "") == "true":
+ return base_size
+
# Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
if rootfs_maxsize:
rootfs_maxsize_int = int(rootfs_maxsize)
@@ -495,28 +572,26 @@ python set_image_size () {
#
python create_symlinks() {
- deploy_dir = d.getVar('DEPLOY_DIR_IMAGE', True)
+ deploy_dir = d.getVar('IMGDEPLOYDIR', True)
img_name = d.getVar('IMAGE_NAME', True)
link_name = d.getVar('IMAGE_LINK_NAME', True)
manifest_name = d.getVar('IMAGE_MANIFEST', True)
taskname = d.getVar("BB_CURRENTTASK", True)
subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix', True) or d.expand("${IMAGE_NAME_SUFFIX}.")
- os.chdir(deploy_dir)
if not link_name:
return
for type in subimages:
- if os.path.exists(img_name + imgsuffix + type):
- dst = deploy_dir + "/" + link_name + "." + type
- src = img_name + imgsuffix + type
+ dst = os.path.join(deploy_dir, link_name + "." + type)
+ src = img_name + imgsuffix + type
+ if os.path.exists(os.path.join(deploy_dir, src)):
bb.note("Creating symlink: %s -> %s" % (dst, src))
if os.path.islink(dst):
- if d.getVar('RM_OLD_IMAGE', True) == "1" and \
- os.path.exists(os.path.realpath(dst)):
- os.remove(os.path.realpath(dst))
os.remove(dst)
os.symlink(src, dst)
+ else:
+ bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
}
MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
diff --git a/import-layers/yocto-poky/meta/classes/image_types.bbclass b/import-layers/yocto-poky/meta/classes/image_types.bbclass
index 53af7ca8d..1ce8334e3 100644
--- a/import-layers/yocto-poky/meta/classes/image_types.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types.bbclass
@@ -11,41 +11,40 @@ IMAGE_ROOTFS_ALIGNMENT ?= "1"
def imagetypes_getdepends(d):
def adddep(depstr, deps):
- for i in (depstr or "").split():
- if i not in deps:
- deps.append(i)
+ for d in (depstr or "").split():
+ # Add task dependency if not already present
+ if ":" not in d:
+ d += ":do_populate_sysroot"
+ deps.add(d)
- deps = []
- ctypes = d.getVar('COMPRESSIONTYPES', True).split()
fstypes = set((d.getVar('IMAGE_FSTYPES', True) or "").split())
fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS', True) or "").split())
- for type in fstypes:
- if type in ["vmdk", "vdi", "qcow2", "hdddirect", "live", "iso", "hddimg"]:
- type = "ext4"
- basetype = type
- for ctype in ctypes:
- if type.endswith("." + ctype):
- basetype = type[:-len("." + ctype)]
- adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
- break
+
+ deps = set()
+ for typestring in fstypes:
+ types = typestring.split(".")
+ basetype, resttypes = types[0], types[1:]
+
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split():
adddep(d.getVar('IMAGE_DEPENDS_%s' % typedepends, True) , deps)
- adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
-
- depstr = ""
- for dep in deps:
- depstr += " " + dep + ":do_populate_sysroot"
- return depstr
+ for ctype in resttypes:
+ adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype, True), deps)
+ adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
+ # Sort the set so that ordering is consistant
+ return " ".join(sorted(deps))
-XZ_COMPRESSION_LEVEL ?= "-e -6"
+XZ_COMPRESSION_LEVEL ?= "-3"
XZ_INTEGRITY_CHECK ?= "crc32"
XZ_THREADS ?= "-T 0"
+ZIP_COMPRESSION_LEVEL ?= "-9"
+
JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
+IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
oe_mkext234fs () {
fstype=$1
@@ -65,8 +64,8 @@ oe_mkext234fs () {
eval COUNT=\"$MIN_COUNT\"
fi
# Create a sparse image block
- dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
- mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
+ mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
}
IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
@@ -76,16 +75,16 @@ IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
MIN_BTRFS_SIZE ?= "16384"
IMAGE_CMD_btrfs () {
if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
- dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${ROOTFS_SIZE} bs=1024
- mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${ROOTFS_SIZE} bs=1024
+ mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
else
bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
fi
}
-IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
# By default, tar from the host is used, which can be quite old. If
# you need special parameters (like --xattrs) which are only supported
@@ -98,18 +97,25 @@ IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE
# In practice, it turned out to be not needed when creating archives and
# required when extracting, but it seems prudent to use it in both cases.
IMAGE_CMD_TAR ?= "tar"
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} ."
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} ."
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
- (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
- if [ ! -L ${IMAGE_ROOTFS}/init -a ! -e ${IMAGE_ROOTFS}/init ]; then
- if [ -L ${IMAGE_ROOTFS}/sbin/init -o -e ${IMAGE_ROOTFS}/sbin/init ]; then
- ln -sf /sbin/init ${WORKDIR}/cpio_append/init
- else
- touch ${WORKDIR}/cpio_append/init
+ (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ # We only need the /init symlink if we're building the real
+ # image. The -dbg image doesn't need it! By being clever
+ # about this we also avoid 'touch' below failing, as it
+ # might be trying to touch /sbin/init on the host since both
+ # the normal and the -dbg image share the same WORKDIR
+ if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then
+ if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
+ if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
+ ln -sf /sbin/init ${WORKDIR}/cpio_append/init
+ else
+ touch ${WORKDIR}/cpio_append/init
+ fi
+ (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
fi
- (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
fi
}
@@ -117,8 +123,8 @@ ELF_KERNEL ?= "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE}"
ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
IMAGE_CMD_elf () {
- test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
- mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
+ test -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
+ mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.cpio.gz --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
}
IMAGE_TYPEDEP_elf = "cpio.gz"
@@ -136,20 +142,20 @@ multiubi_mkfs() {
echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
- mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
- ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
+ mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
# Cleanup cfg file
- mv ubinize${vname}-${IMAGE_NAME}.cfg ${DEPLOY_DIR_IMAGE}/
+ mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
# Create own symlinks for 'named' volumes
if [ -n "$vname" ]; then
- cd ${DEPLOY_DIR_IMAGE}
+ cd ${IMGDEPLOYDIR}
if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
${IMAGE_LINK_NAME}${vname}.ubifs
@@ -176,9 +182,9 @@ IMAGE_CMD_ubi () {
multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
}
-IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
-WKS_FILE ?= "${IMAGE_BASENAME}.${MACHINE}.wks"
+WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
WKS_FULL_PATH = "${@wks_search('${WKS_FILES}'.split(), '${WKS_SEARCH_PATH}') or ''}"
@@ -193,22 +199,76 @@ def wks_search(files, search_path):
if searched:
return searched
+WIC_CREATE_EXTRA_ARGS ?= ""
+
IMAGE_CMD_wic () {
- out="${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}"
+ out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
wks="${WKS_FULL_PATH}"
if [ -z "$wks" ]; then
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
- BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR_TARGET}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/"
+ BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR_TARGET}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
mv "$out/build/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
rm -rf "$out/"
}
IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES"
# Rebuild when the wks file or vars in WICVARS change
-USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${COMPRESSIONTYPES}'.split()), '1', '', d)}"
-do_image_wic[file-checksums] += "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
+USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
+WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
+do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
+
+python () {
+ if d.getVar('USING_WIC', True) and 'do_bootimg' in d:
+ bb.build.addtask('do_image_wic', '', 'do_bootimg', d)
+}
+
+python do_write_wks_template () {
+ """Write out expanded template contents to WKS_FULL_PATH."""
+ import re
+
+ template_body = d.getVar('_WKS_TEMPLATE', True)
+
+ # Remove any remnant variable references left behind by the expansion
+ # due to undefined variables
+ expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
+ while True:
+ new_body = re.sub(expand_var_regexp, '', template_body)
+ if new_body == template_body:
+ break
+ else:
+ template_body = new_body
+
+ wks_file = d.getVar('WKS_FULL_PATH', True)
+ with open(wks_file, 'w') as f:
+ f.write(template_body)
+}
+
+python () {
+ if d.getVar('USING_WIC', True):
+ wks_file_u = d.getVar('WKS_FULL_PATH', False)
+ wks_file = d.expand(wks_file_u)
+ base, ext = os.path.splitext(wks_file)
+ if ext == '.in' and os.path.exists(wks_file):
+ wks_out_file = os.path.join(d.getVar('WORKDIR', True), os.path.basename(base))
+ d.setVar('WKS_FULL_PATH', wks_out_file)
+ d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
+ d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
+
+ try:
+ with open(wks_file, 'r') as f:
+ body = f.read()
+ except (IOError, OSError) as exc:
+ pass
+ else:
+ # Previously, I used expandWithRefs to get the dependency list
+ # and add it to WICVARS, but there's no point re-parsing the
+ # file in process_wks_template as well, so just put it in
+ # a variable and let the metadata deal with the deps.
+ d.setVar('_WKS_TEMPLATE', body)
+ bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
+}
EXTRA_IMAGECMD = ""
@@ -262,25 +322,35 @@ IMAGE_TYPES = " \
wic wic.gz wic.bz2 wic.lzma \
"
-COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum"
-COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
-COMPRESS_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
-COMPRESS_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
-COMPRESS_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
-COMPRESS_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
-COMPRESS_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
-COMPRESS_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
-COMPRESS_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
-COMPRESS_DEPENDS_lzma = "xz-native"
-COMPRESS_DEPENDS_gz = ""
-COMPRESS_DEPENDS_bz2 = "pbzip2-native"
-COMPRESS_DEPENDS_xz = "xz-native"
-COMPRESS_DEPENDS_lz4 = "lz4-native"
-COMPRESS_DEPENDS_sum = "mtd-utils-native"
+# Compression is a special case of conversion. The old variable
+# names are still supported for backward-compatibility. When defining
+# new compression or conversion commands, use CONVERSIONTYPES and
+# CONVERSION_CMD/DEPENDS.
+COMPRESSIONTYPES ?= ""
+
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap ${COMPRESSIONTYPES}"
+CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+CONVERSION_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
+CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
+CONVERSION_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
+CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
+CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
+CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
+CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
+CONVERSION_DEPENDS_lzma = "xz-native"
+CONVERSION_DEPENDS_gz = ""
+CONVERSION_DEPENDS_bz2 = "pbzip2-native"
+CONVERSION_DEPENDS_xz = "xz-native"
+CONVERSION_DEPENDS_lz4 = "lz4-native"
+CONVERSION_DEPENDS_zip = "zip-native"
+CONVERSION_DEPENDS_sum = "mtd-utils-native"
+CONVERSION_DEPENDS_bmap = "bmap-tools-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
@@ -296,4 +366,4 @@ IMAGE_TYPES_MASKED ?= ""
# The WICVARS variable is used to define list of bitbake variables used in wic code
# variables from this list is written to <image>.env file
-WICVARS ?= "BBLAYERS DEPLOY_DIR_IMAGE HDDDIR IMAGE_BASENAME IMAGE_BOOT_FILES IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD ISODIR MACHINE_ARCH ROOTFS_SIZE STAGING_DATADIR STAGING_DIR_NATIVE STAGING_LIBDIR TARGET_SYS"
+WICVARS ?= "BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE HDDDIR IMAGE_BASENAME IMAGE_BOOT_FILES IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD ISODIR MACHINE_ARCH ROOTFS_SIZE STAGING_DATADIR STAGING_DIR_NATIVE STAGING_LIBDIR TARGET_SYS"
diff --git a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
index 19e4aa2e4..6c8c1ff60 100644
--- a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass
@@ -2,25 +2,25 @@ inherit image_types kernel-arch
oe_mkimage () {
mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
- -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
+ -d ${IMGDEPLOYDIR}/$1 ${IMGDEPLOYDIR}/$1.u-boot
if [ x$3 = x"clean" ]; then
rm $1
fi
}
-COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
+CONVERSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
-COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native"
-COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
+CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native"
+CONVERSION_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
-COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
-COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip clean"
+CONVERSION_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
+CONVERSION_CMD_gz.u-boot = "${CONVERSION_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip clean"
-COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
-COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2 clean"
+CONVERSION_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
+CONVERSION_CMD_bz2.u-boot = "${CONVERSION_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2 clean"
-COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
-COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma clean"
+CONVERSION_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
+CONVERSION_CMD_lzma.u-boot = "${CONVERSION_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma clean"
IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot cpio.gz.u-boot"
diff --git a/import-layers/yocto-poky/meta/classes/insane.bbclass b/import-layers/yocto-poky/meta/classes/insane.bbclass
index c57b21735..1d7377825 100644
--- a/import-layers/yocto-poky/meta/classes/insane.bbclass
+++ b/import-layers/yocto-poky/meta/classes/insane.bbclass
@@ -54,8 +54,8 @@ UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rule
# feel free to add and correct.
#
# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
-def package_qa_get_machine_dict():
- return {
+def package_qa_get_machine_dict(d):
+ machdata = {
"darwin9" : {
"arm" : (40, 0, 0, True, 32),
},
@@ -66,6 +66,8 @@ def package_qa_get_machine_dict():
"i586" : (3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
"epiphany": (4643, 0, 0, True, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
},
"linux" : {
"aarch64" : (183, 0, 0, True, 64),
@@ -87,6 +89,10 @@ def package_qa_get_machine_dict():
"mipsel": ( 8, 0, 0, True, 32),
"mips64": ( 8, 0, 0, False, 64),
"mips64el": ( 8, 0, 0, True, 64),
+ "mipsisa32r6": ( 8, 0, 0, False, 32),
+ "mipsisa32r6el": ( 8, 0, 0, True, 32),
+ "mipsisa64r6": ( 8, 0, 0, False, 64),
+ "mipsisa64r6el": ( 8, 0, 0, True, 64),
"nios2": (113, 0, 0, True, 32),
"s390": (22, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
@@ -168,18 +174,32 @@ def package_qa_get_machine_dict():
},
}
+ # Add in any extra user supplied data which may come from a BSP layer, removing the
+ # need to always change this class directly
+ extra_machdata = (d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS", True) or "").split()
+ for m in extra_machdata:
+ call = m + "(machdata, d)"
+ locs = { "machdata" : machdata, "d" : d}
+ machdata = bb.utils.better_eval(call, locs)
+
+ return machdata
-def package_qa_clean_path(path,d):
- """ Remove the common prefix from the path. In this case it is the TMPDIR"""
- return path.replace(d.getVar("TMPDIR", True) + "/", "")
+
+def package_qa_clean_path(path, d, pkg=None):
+ """
+ Remove redundant paths from the path for display. If pkg isn't set then
+ TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
+ """
+ if pkg:
+ path = path.replace(os.path.join(d.getVar("PKGDEST", True), pkg), "/")
+ return path.replace(d.getVar("TMPDIR", True), "/").replace("//", "/")
def package_qa_write_error(type, error, d):
logfile = d.getVar('QA_LOGFILE', True)
if logfile:
p = d.getVar('P', True)
- f = file( logfile, "a+")
- print >> f, "%s: %s [%s]" % (p, error, type)
- f.close()
+ with open(logfile, "a+") as f:
+ f.write("%s: %s [%s]\n" % (p, error, type))
def package_qa_handle_error(error_class, error_msg, d):
package_qa_write_error(error_class, error_msg, d)
@@ -400,7 +420,7 @@ def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages)
sysroot_path_usr = sysroot_path + exec_prefix
try:
- ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
+ ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read().decode("utf-8")
except bb.process.CmdError:
error_msg = pn + ": prelink-rtld aborted when processing %s" % path
package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
@@ -495,6 +515,8 @@ def package_qa_check_arch(path,name,d, elf, messages):
"""
Check if archs are compatible
"""
+ import re
+
if not elf:
return
@@ -520,15 +542,15 @@ def package_qa_check_arch(path,name,d, elf, messages):
#if this will throw an exception, then fix the dict above
(machine, osabi, abiversion, littleendian, bits) \
- = package_qa_get_machine_dict()[target_os][target_arch]
+ = package_qa_get_machine_dict(d)[target_os][target_arch]
# Check the architecture and endiannes of the binary
- if not ((machine == elf.machine()) or \
- ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
- package_qa_add_message(messages, "arch", "Architecture did not match (%d to %d) on %s" % \
- (machine, elf.machine(), package_qa_clean_path(path,d)))
- elif not ((bits == elf.abiSize()) or \
- ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
+ is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
+ (target_os == "linux-gnux32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE', True)))
+ if not ((machine == elf.machine()) or is_32):
+ package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
+ (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
+ elif not ((bits == elf.abiSize()) or is_32):
package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
(bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
elif not littleendian == elf.isLittleEndian():
@@ -672,23 +694,24 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
-def package_qa_check_license(workdir, d):
+# Check license variables
+do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
+python populate_lic_qa_checksum() {
"""
- Check for changes in the license files
+ Check for changes in the license files.
"""
import tempfile
sane = True
- lic_files = d.getVar('LIC_FILES_CHKSUM', True)
+ lic_files = d.getVar('LIC_FILES_CHKSUM', True) or ''
lic = d.getVar('LICENSE', True)
pn = d.getVar('PN', True)
if lic == "CLOSED":
return
- if not lic_files:
- package_qa_handle_error("license-checksum", pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)", d)
- return
+ if not lic_files and d.getVar('SRC_URI', True):
+ sane = package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
srcdir = d.getVar('S', True)
@@ -696,7 +719,7 @@ def package_qa_check_license(workdir, d):
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+ sane = package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
continue
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
@@ -720,7 +743,7 @@ def package_qa_check_license(workdir, d):
linesout = 0
for line in fi:
lineno += 1
- if (lineno >= beginline):
+ if (lineno >= beginline):
if ((lineno <= endline) or not endline):
fo.write(line)
linesout += 1
@@ -752,7 +775,11 @@ def package_qa_check_license(workdir, d):
else:
msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
- package_qa_handle_error("license-checksum", msg, d)
+ sane = package_qa_handle_error("license-checksum", msg, d)
+
+ if not sane:
+ bb.fatal("Fatal QA errors found, failing task.")
+}
def package_qa_check_staged(path,d):
"""
@@ -984,12 +1011,12 @@ def package_qa_check_expanded_d(path,name,d,elf,messages):
return sane
def package_qa_check_encoding(keys, encode, d):
- def check_encoding(key,enc):
+ def check_encoding(key, enc):
sane = True
value = d.getVar(key, True)
if value:
try:
- s = unicode(value, enc)
+ s = value.encode(enc)
except UnicodeDecodeError as e:
error_msg = "%s has non %s characters" % (key,enc)
sane = False
@@ -1208,12 +1235,6 @@ Rerun configure task after fixing this.""")
Missing inherit gettext?""" % (gt, config))
###########################################################################
- # Check license variables
- ###########################################################################
-
- package_qa_check_license(workdir, d)
-
- ###########################################################################
# Check unrecognised configure options (with a white list)
###########################################################################
if bb.data.inherits_class("autotools", d):
@@ -1221,7 +1242,7 @@ Missing inherit gettext?""" % (gt, config))
try:
flag = "WARNING: unrecognized options:"
log = os.path.join(d.getVar('B', True), 'config.log')
- output = subprocess.check_output(['grep', '-F', flag, log]).replace(', ', ' ')
+ output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
@@ -1250,10 +1271,9 @@ Missing inherit gettext?""" % (gt, config))
}
python do_qa_unpack() {
- bb.note("Checking has ${S} been created")
-
+ src_uri = d.getVar('SRC_URI', True)
s_dir = d.getVar('S', True)
- if not os.path.exists(s_dir):
+ if src_uri and not os.path.exists(s_dir):
bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN', True), d.getVar('S', False), s_dir))
}
@@ -1261,8 +1281,8 @@ python do_qa_unpack() {
#addtask qa_staging after do_populate_sysroot before do_build
do_populate_sysroot[postfuncs] += "do_qa_staging "
-# Check broken config.log files, for packages requiring Gettext which don't
-# have it in DEPENDS and for correct LIC_FILES_CHKSUM
+# Check broken config.log files, for packages requiring Gettext which
+# don't have it in DEPENDS.
#addtask qa_configure after do_configure before do_compile
do_configure[postfuncs] += "do_qa_configure "
diff --git a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
index 3ed5986a5..ea976c66b 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
@@ -14,7 +14,7 @@ valid_archs = "alpha cris ia64 \
parisc s390 v850 \
avr32 blackfin \
microblaze \
- nios2"
+ nios2 arc xtensa"
def map_kernel_arch(a, d):
import re
@@ -25,7 +25,7 @@ def map_kernel_arch(a, d):
elif re.match('armeb$', a): return 'arm'
elif re.match('aarch64$', a): return 'arm64'
elif re.match('aarch64_be$', a): return 'arm64'
- elif re.match('mips(el|64|64el)$', a): return 'mips'
+ elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
elif re.match('sh(3|4)$', a): return 'sh'
elif re.match('bfin', a): return 'blackfin'
diff --git a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
index 332384de3..05be1f070 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
@@ -1,4 +1,4 @@
-inherit kernel-uboot
+inherit kernel-uboot uboot-sign
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES', True) or ""
@@ -7,17 +7,28 @@ python __anonymous () {
depends = "%s u-boot-mkimage-native dtc-native" % depends
d.setVar("DEPENDS", depends)
+ if d.getVar("UBOOT_ARCH", True) == "x86":
+ replacementtype = "bzImage"
+ else:
+ replacementtype = "zImage"
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE", True) or ""
if 'fitImage' in typeformake.split():
- d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', 'zImage'))
+ d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', replacementtype))
image = d.getVar('INITRAMFS_IMAGE', True)
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ # Verified boot will sign the fitImage and append the public key to
+ # U-boot dtb. We ensure the U-Boot dtb is deployed before assembling
+ # the fitImage:
+ if d.getVar('UBOOT_SIGN_ENABLE', True):
+ uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot', True) or 'u-boot'
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_deploy' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
@@ -132,6 +143,33 @@ EOF
}
#
+# Emit the fitImage ITS setup section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to setup image
+fitimage_emit_section_setup() {
+
+ setup_csum="sha1"
+
+ cat << EOF >> ${1}
+ setup@${2} {
+ description = "Linux setup.bin";
+ data = /incbin/("${3}");
+ type = "x86_setup";
+ arch = "${UBOOT_ARCH}";
+ os = "linux";
+ compression = "none";
+ load = <0x00090000>;
+ entry = <0x00090000>;
+ hash@1 {
+ algo = "${setup_csum}";
+ };
+ };
+EOF
+}
+
+#
# Emit the fitImage ITS ramdisk section
#
# $1 ... .its filename
@@ -193,29 +231,34 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image ID
# $4 ... ramdisk ID
+# $5 ... config ID
fitimage_emit_section_config() {
conf_csum="sha1"
+ if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
+ conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
+ fi
# Test if we have any DTBs at all
- if [ -z "${3}" -a -z "${4}" ] ; then
- conf_desc="Boot Linux kernel"
- fdt_line=""
- ramdisk_line=""
- elif [ -z "${4}" ]; then
- conf_desc="Boot Linux kernel with FDT blob"
- fdt_line="fdt = \"fdt@${3}\";"
- ramdisk_line=""
- elif [ -z "${3}" ]; then
- conf_desc="Boot Linux kernel with ramdisk"
- fdt_line=""
- ramdisk_line="ramdisk = \"ramdisk@${4}\";"
- else
- conf_desc="Boot Linux kernel with FDT blob, ramdisk"
+ conf_desc="Linux kernel"
+ kernel_line="kernel = \"kernel@${2}\";"
+ fdt_line=""
+ ramdisk_line=""
+
+ if [ -n "${3}" ]; then
+ conf_desc="${conf_desc}, FDT blob"
fdt_line="fdt = \"fdt@${3}\";"
+ fi
+
+ if [ -n "${4}" ]; then
+ conf_desc="${conf_desc}, ramdisk"
ramdisk_line="ramdisk = \"ramdisk@${4}\";"
fi
- kernel_line="kernel = \"kernel@${2}\";"
+
+ if [ -n "${5}" ]; then
+ conf_desc="${conf_desc}, setup"
+ setup_line="setup = \"setup@${5}\";"
+ fi
cat << EOF >> ${1}
default = "conf@1";
@@ -224,9 +267,40 @@ fitimage_emit_section_config() {
${kernel_line}
${fdt_line}
${ramdisk_line}
+ ${setup_line}
hash@1 {
algo = "${conf_csum}";
};
+EOF
+
+ if [ ! -z "${conf_sign_keyname}" ] ; then
+
+ sign_line="sign-images = \"kernel\""
+
+ if [ -n "${3}" ]; then
+ sign_line="${sign_line}, \"fdt\""
+ fi
+
+ if [ -n "${4}" ]; then
+ sign_line="${sign_line}, \"ramdisk\""
+ fi
+
+ if [ -n "${5}" ]; then
+ sign_line="${sign_line}, \"setup\""
+ fi
+
+ sign_line="${sign_line};"
+
+ cat << EOF >> ${1}
+ signature@1 {
+ algo = "${conf_csum},rsa2048";
+ key-name-hint = "${conf_sign_keyname}";
+ ${sign_line}
+ };
+EOF
+ fi
+
+ cat << EOF >> ${1}
};
EOF
}
@@ -241,6 +315,7 @@ fitimage_assemble() {
kernelcount=1
dtbcount=""
ramdiskcount=${3}
+ setupcount=""
rm -f ${1} arch/${ARCH}/boot/${2}
fitimage_emit_fit_header ${1}
@@ -274,7 +349,15 @@ fitimage_assemble() {
fi
#
- # Step 3: Prepare a ramdisk section.
+ # Step 3: Prepare a setup section. (For x86)
+ #
+ if test -e arch/${ARCH}/boot/setup.bin ; then
+ setupcount=1
+ fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
+ fi
+
+ #
+ # Step 4: Prepare a ramdisk section.
#
if [ "x${ramdiskcount}" = "x1" ] ; then
# Find and use the first initramfs image archive type we find
@@ -292,26 +375,39 @@ fitimage_assemble() {
# Force the first Kernel and DTB in the default config
kernelcount=1
- dtbcount=1
+ if test -n "${dtbcount}"; then
+ dtbcount=1
+ fi
#
- # Step 4: Prepare a configurations section
+ # Step 5: Prepare a configurations section
#
fitimage_emit_section_maint ${1} confstart
- fitimage_emit_section_config ${1} ${kernelcount} ${dtbcount} ${ramdiskcount}
+ fitimage_emit_section_config ${1} "${kernelcount}" "${dtbcount}" "${ramdiskcount}" "${setupcount}"
fitimage_emit_section_maint ${1} sectend
fitimage_emit_section_maint ${1} fitend
#
- # Step 5: Assemble the image
+ # Step 6: Assemble the image
#
uboot-mkimage \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-f ${1} \
arch/${ARCH}/boot/${2}
+
+ #
+ # Step 7: Sign the image and add public key to U-Boot dtb
+ #
+ if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
+ uboot-mkimage \
+ ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
+ -F -k "${UBOOT_SIGN_KEYDIR}" \
+ -K "${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_BINARY}" \
+ -r arch/${ARCH}/boot/${2}
+ fi
}
do_assemble_fitimage() {
diff --git a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
index e1a70e621..08d226276 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
@@ -30,16 +30,12 @@ PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
+KERNEL_MODULE_PACKAGE_PREFIX ?= ""
+
python split_kernel_module_packages () {
import re
modinfoexp = re.compile("([^=]+)=(.*)")
- kerverrexp = re.compile('^(.*-hh.*)[\.\+].*$')
- depmodpat0 = re.compile("^(.*\.k?o):..*$")
- depmodpat1 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*$")
- depmodpat2 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*\\\$")
- depmodpat3 = re.compile("^\t(.*\.k?o)\s*\\\$")
- depmodpat4 = re.compile("^\t(.*\.k?o)\s*$")
def extract_modinfo(file):
import tempfile, subprocess
@@ -61,68 +57,6 @@ python split_kernel_module_packages () {
vals[m.group(1)] = m.group(2)
return vals
- def parse_depmod():
-
- dvar = d.getVar('PKGD', True)
-
- kernelver = d.getVar('KERNEL_VERSION', True)
- kernelver_stripped = kernelver
- m = kerverrexp.match(kernelver)
- if m:
- kernelver_stripped = m.group(1)
- staging_kernel_dir = d.getVar("STAGING_KERNEL_BUILDDIR", True)
- system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
- if not os.path.exists(system_map_file):
- system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
- if not os.path.exists(system_map_file):
- bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_BUILDDIR '%s'" % (kernelver, dvar, staging_kernel_dir))
-
- cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
- f = os.popen(cmd, 'r')
-
- deps = {}
- line = f.readline()
- while line:
- if not depmodpat0.match(line):
- line = f.readline()
- continue
- m1 = depmodpat1.match(line)
- if m1:
- deps[m1.group(1)] = m1.group(2).split()
- else:
- m2 = depmodpat2.match(line)
- if m2:
- deps[m2.group(1)] = m2.group(2).split()
- line = f.readline()
- m3 = depmodpat3.match(line)
- while m3:
- deps[m2.group(1)].extend(m3.group(1).split())
- line = f.readline()
- m3 = depmodpat3.match(line)
- m4 = depmodpat4.match(line)
- deps[m2.group(1)].extend(m4.group(1).split())
- line = f.readline()
- f.close()
- return deps
-
- def get_dependencies(file, pattern, format):
- # file no longer includes PKGD
- file = file.replace(d.getVar('PKGD', True) or '', '', 1)
- # instead is prefixed with /lib/modules/${KERNEL_VERSION}
- file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1)
-
- if file in module_deps:
- dependencies = []
- for i in module_deps[file]:
- m = re.match(pattern, os.path.basename(i))
- if not m:
- continue
- on = legitimize_package_name(m.group(1))
- dependency_pkg = format % on
- dependencies.append(dependency_pkg)
- return dependencies
- return []
-
def frob_metadata(file, pkg, pattern, format, basename):
vals = extract_modinfo(file)
@@ -171,7 +105,13 @@ python split_kernel_module_packages () {
d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
- for dep in get_dependencies(file, pattern, format):
+ modinfo_deps = []
+ if "depends" in vals and vals["depends"] != "":
+ for dep in vals["depends"].split(","):
+ on = legitimize_package_name(dep)
+ dependency_pkg = format % on
+ modinfo_deps.append(dependency_pkg)
+ for dep in modinfo_deps:
if not dep in rdepends:
rdepends[dep] = []
d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
@@ -179,9 +119,10 @@ python split_kernel_module_packages () {
# Avoid automatic -dev recommendations for modules ending with -dev.
d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
- module_deps = parse_depmod()
module_regex = '^(.*)\.k?o$'
- module_pattern = 'kernel-module-%s'
+
+ module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX', True)
+ module_pattern = module_pattern_prefix + 'kernel-module-%s'
postinst = d.getVar('pkg_postinst_modules', True)
postrm = d.getVar('pkg_postrm_modules', True)
diff --git a/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
index 2a187f549..340503a2d 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-uimage.bbclass
@@ -14,8 +14,7 @@ python __anonymous () {
if d.getVar("KEEPUIMAGE", True) != 'yes':
typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE", True) or ""
if "uImage" in typeformake.split():
- typeformake.replace('uImage', 'vmlinux')
- d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake)
+ d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
}
do_uboot_mkimage() {
diff --git a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
index f86b3ef01..6160a29ec 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -1,5 +1,7 @@
# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_shared_workdir do_fetch do_unpack do_patch"
+SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
+PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
+PATCH_GIT_USER_NAME ?= "OpenEmbedded"
# returns local (absolute) path names for all valid patches in the
# src_uri
@@ -119,72 +121,54 @@ do_kernel_metadata() {
patches="${@" ".join(find_patches(d))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
- # add any explicitly referenced features onto the end of the feature
- # list that is passed to the kernel build scripts.
- if [ -n "${KERNEL_FEATURES}" ]; then
- for feat in ${KERNEL_FEATURES}; do
- addon_features="$addon_features --feature $feat"
- done
- fi
-
# check for feature directories/repos/branches that were part of the
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
- if [ -n "${feat_dirs}" ]; then
- for f in ${feat_dirs}; do
+ for f in ${feat_dirs}; do
if [ -d "${WORKDIR}/$f/meta" ]; then
- includes="$includes -I${WORKDIR}/$f/meta"
- elif [ -d "${WORKDIR}/$f" ]; then
- includes="$includes -I${WORKDIR}/$f"
+ includes="$includes -I${WORKDIR}/$f/kernel-meta"
+ elif [ -d "${WORKDIR}/$f" ]; then
+ includes="$includes -I${WORKDIR}/$f"
fi
- done
+ done
+ for s in ${sccs} ${patches}; do
+ sdir=$(dirname $s)
+ includes="$includes -I${sdir}"
+ # if a SRC_URI passed patch or .scc has a subdir of "kernel-meta",
+ # then we add it to the search path
+ if [ -d "${sdir}/kernel-meta" ]; then
+ includes="$includes -I${sdir}/kernel-meta"
+ fi
+ done
+
+ # expand kernel features into their full path equivalents
+ bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
+ meta_dir=$(kgit --meta)
+
+ # run1: pull all the configuration fragments, no matter where they come from
+ elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
+ if [ -n "${elements}" ]; then
+ scc --force -o ${S}/${meta_dir}:cfg,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
fi
- # updates or generates the target description
- updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
- ${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not update ${machine_branch}"
+ # run2: only generate patches for elements that have been passed on the SRC_URI
+ elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
+ if [ -n "${elements}" ]; then
+ scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES}
fi
}
do_patch() {
cd ${S}
- # executes and modifies the source tree as required
- patchme ${KMACHINE}
- if [ $? -ne 0 ]; then
- bberror "Could not apply patches for ${KMACHINE}."
- bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
- fi
-
- # check to see if the specified SRCREV is reachable from the final branch.
- # if it wasn't something wrong has happened, and we should error.
- machine_srcrev="${SRCREV_machine}"
- if [ -z "${machine_srcrev}" ]; then
- # fallback to SRCREV if a non machine_meta tree is being built
- machine_srcrev="${SRCREV}"
- # if SRCREV cannot be reached something is wrong.
- if [ -z "${machine_srcrev}" ]; then
- bbfatal "Neither SRCREV_machine or SRCREV was specified!"
- fi
- fi
-
- current_branch=`git rev-parse --abbrev-ref HEAD`
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
- if [ "${current_branch}" != "${machine_branch}" ]; then
- bbwarn "After meta data application, the kernel tree branch is ${current_branch}. The"
- bbwarn "SRC_URI specified branch ${machine_branch}. The branch will be forced to ${machine_branch},"
- bbwarn "but this means the board meta data (.scc files) do not match the SRC_URI specification."
- bbwarn "The meta data and branch ${machine_branch} should be inspected to ensure the proper"
- bbwarn "kernel is being built."
- git checkout -f ${machine_branch}
- fi
-
- if [ "${machine_srcrev}" != "AUTOINC" ]; then
- if ! [ "$(git rev-parse --verify ${machine_srcrev}~0)" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
- bberror "SRCREV ${machine_srcrev} was specified, but is not reachable"
- bbfatal "Check the BSP description for incorrect branch selection, or other errors."
+ check_git_config
+ meta_dir=$(kgit --meta)
+ (cd ${meta_dir}; ln -sf patch.queue series)
+ if [ -f "${meta_dir}/series" ]; then
+ kgit-s2q --gen -v --patches .kernel-meta/
+ if [ $? -ne 0 ]; then
+ bberror "Could not apply patches for ${KMACHINE}."
+ bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
fi
fi
}
@@ -253,26 +237,37 @@ do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
- bbnote "kernel configme"
- export KMETA=${KMETA}
+ set +e
- if [ -n "${KCONFIG_MODE}" ]; then
- configmeflags=${KCONFIG_MODE}
- else
- # If a defconfig was passed, use =n as the baseline, which is achieved
- # via --allnoconfig
+ # translate the kconfig_mode into something that merge_config.sh
+ # understands
+ case ${KCONFIG_MODE} in
+ *allnoconfig)
+ config_flags="-n"
+ ;;
+ *alldefconfig)
+ config_flags=""
+ ;;
+ *)
if [ -f ${WORKDIR}/defconfig ]; then
- configmeflags="--allnoconfig"
+ config_flags="-n"
fi
- fi
+ ;;
+ esac
cd ${S}
- PATH=${PATH}:${S}/scripts/util
- configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
+
+ meta_dir=$(kgit --meta)
+ configs="$(scc --configs -o ${meta_dir})"
+ if [ -z "${configs}" ]; then
+ bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
+ fi
+
+ CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
if [ $? -ne 0 ]; then
bbfatal_log "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
fi
-
+
echo "# Global settings from linux recipe" >> ${B}/.config
echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
}
@@ -290,36 +285,23 @@ python do_kernel_configcheck() {
kmeta = "." + kmeta
pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
- cmd = d.expand("cd ${S}; kconf_check -config %s/meta-series ${S} ${B}" % kmeta)
+
+ cmd = d.expand("scc --configs -o ${S}/.kernel-meta")
+ ret, configs = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
+
+ cmd = d.expand("cd ${S}; kconf_check --report -o ${S}/%s/cfg/ ${B}/.config ${S} %s" % (kmeta,configs))
ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
config_check_visibility = int(d.getVar( "KCONF_AUDIT_LEVEL", True ) or 0)
bsp_check_visibility = int(d.getVar( "KCONF_BSP_AUDIT_LEVEL", True ) or 0)
# if config check visibility is non-zero, report dropped configuration values
- mismatch_file = "${S}/" + kmeta + "/" + "mismatch.cfg"
+ mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
if os.path.exists(mismatch_file):
if config_check_visibility:
with open (mismatch_file, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
-
- # if config check visibility is level 2 or higher, report non-hardware options
- nonhw_file = "${S}/" + kmeta + "/" + "nonhw_report.cfg"
- if os.path.exists(nonhw_file):
- if config_check_visibility > 1:
- with open (nonhw_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: BSP specified non-hw configuration:\n\n%s" % results)
-
- bsp_desc = "${S}/" + kmeta + "/" + "top_tgt"
- if os.path.exists(bsp_desc) and bsp_check_visibility > 1:
- with open (bsp_desc, "r") as myfile:
- bsp_tgt = myfile.read()
- m = re.match("^(.*)scratch.obj(.*)$", bsp_tgt)
- if not m is None:
- bb.warn( "[kernel]: An auto generated BSP description was used, this normally indicates a misconfiguration.\n" +
- "Check that your machine (%s) has an associated kernel description." % "${MACHINE}" )
}
# Ensure that the branches (BSP and meta) are on the locations specified by
diff --git a/import-layers/yocto-poky/meta/classes/kernel.bbclass b/import-layers/yocto-poky/meta/classes/kernel.bbclass
index e7e2cb396..25a153cd2 100644
--- a/import-layers/yocto-poky/meta/classes/kernel.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel.bbclass
@@ -1,7 +1,7 @@
inherit linux-kernel-base kernel-module-split
PROVIDES += "virtual/kernel"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native lzop-native"
S = "${STAGING_KERNEL_DIR}"
B = "${WORKDIR}/build"
@@ -127,9 +127,9 @@ PACKAGES_DYNAMIC += "^kernel-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
- int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
- int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}"
+KERNEL_PRIORITY ?= "${@int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
+ int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
+ int(d.getVar('PV', True).split('-')[0].split('+')[0].split('.')[-1])}"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
@@ -140,7 +140,7 @@ KERNEL_IMAGEDEST = "boot"
#
# configuration
#
-export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
+export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE", True) or "ttyS0"}"
KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
@@ -156,10 +156,7 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
-# We don't want to override kernel Makefile variables from the environment
-EXTRA_OEMAKE = ""
-
+EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCPP="${BUILD_CPP}""
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -203,7 +200,7 @@ copy_initramfs() {
echo "Finished copy of initramfs into ./usr"
}
-INITRAMFS_BASE_NAME = "initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
+INITRAMFS_BASE_NAME ?= "initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
do_bundle_initramfs () {
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
@@ -232,8 +229,7 @@ do_bundle_initramfs () {
if [ -n "$realpath" ]; then
mv -f $realpath $realpath.initramfs
mv -f $realpath.bak $realpath
- cd ${B}/${KERNEL_OUTPUT_DIR}
- ln -sf $linkpath.initramfs
+ ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs
else
mv -f ${KERNEL_OUTPUT_DIR}/$type ${KERNEL_OUTPUT_DIR}/$type.initramfs
mv -f ${KERNEL_OUTPUT_DIR}/$type.bak ${KERNEL_OUTPUT_DIR}/$type
@@ -247,6 +243,7 @@ do_bundle_initramfs () {
done
fi
}
+do_bundle_initramfs[dirs] = "${B}"
python do_devshell_prepend () {
os.environ["LDFLAGS"] = ''
@@ -291,7 +288,7 @@ do_compile_kernelmodules() {
# external kernel modules has a dependency on
# other kernel modules and will look at this
# file to do symbol lookups
- cp Module.symvers ${STAGING_KERNEL_BUILDDIR}/
+ cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
else
bbnote "no modules to compile"
fi
@@ -330,6 +327,36 @@ kernel_do_install() {
}
do_install[prefuncs] += "package_get_auto_pr"
+# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
+do_kernel_version_sanity_check() {
+ # The Makefile determines the kernel version shown at runtime
+ # Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
+ VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
+ PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
+ SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
+ EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//)
+
+ # Build a string for regex and a plain version string
+ reg="^${VERSION}\.${PATCHLEVEL}"
+ vers="${VERSION}.${PATCHLEVEL}"
+ if [ -n "${SUBLEVEL}" ]; then
+ # Ignoring a SUBLEVEL of zero is fine
+ if [ "${SUBLEVEL}" = "0" ]; then
+ reg="${reg}(\.${SUBLEVEL})?"
+ else
+ reg="${reg}\.${SUBLEVEL}"
+ vers="${vers}.${SUBLEVEL}"
+ fi
+ fi
+ vers="${vers}${EXTRAVERSION}"
+ reg="${reg}${EXTRAVERSION}"
+
+ if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
+ bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source."
+ fi
+ exit 0
+}
+
addtask shared_workdir after do_compile before do_compile_kernelmodules
addtask shared_workdir_setscene
@@ -364,6 +391,14 @@ do_shared_workdir () {
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
+ if [ -e certs/signing_key.pem ]; then
+ # The signing_key.* files are stored in the certs/ dir in
+ # newer Linux kernels
+ mkdir -p $kerneldir/certs
+ cp certs/signing_key.* $kerneldir/certs/
+ elif [ -e signing_key.priv ]; then
+ cp signing_key.* $kerneldir/
+ fi
# We can also copy over all the generated files and avoid special cases
# like version.h, but we've opted to keep this small until file creep starts
@@ -434,6 +469,7 @@ kernel_do_configure() {
}
do_savedefconfig() {
+ bbplain "Saving defconfig to:\n${B}/defconfig"
oe_runmake -C ${B} savedefconfig
}
do_savedefconfig[nostamp] = "1"
@@ -484,15 +520,18 @@ python split_kernel_packages () {
}
# Many scripts want to look in arch/$arch/boot for the bootable
-# image. This poses a problem for vmlinux based booting. This
-# task arranges to have vmlinux appear in the normalized directory
-# location.
-do_kernel_link_vmlinux() {
+# image. This poses a problem for vmlinux and vmlinuz based
+# booting. This task arranges to have vmlinux and vmlinuz appear
+# in the normalized directory location.
+do_kernel_link_images() {
if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
mkdir ${B}/arch/${ARCH}/boot
fi
cd ${B}/arch/${ARCH}/boot
ln -sf ../../../vmlinux
+ if [ -f ../../../vmlinuz ]; then
+ ln -sf ../../../vmlinuz
+ fi
}
do_strip() {
@@ -522,7 +561,7 @@ do_strip() {
}
do_strip[dirs] = "${B}"
-addtask do_strip before do_sizecheck after do_kernel_link_vmlinux
+addtask do_strip before do_sizecheck after do_kernel_link_images
# Support checking the kernel size since some kernels need to reside in partitions
# with a fixed length or there is a limit in transferring the kernel to memory
@@ -583,8 +622,7 @@ kernel_do_deploy() {
initramfs_base_name=${type}-${INITRAMFS_BASE_NAME}
initramfs_symlink_name=${type}-initramfs-${MACHINE}
install -m 0644 ${KERNEL_OUTPUT_DIR}/${type}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin
- cd ${DEPLOYDIR}
- ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin
+ ln -sf ${initramfs_base_name}.bin ${DEPLOYDIR}/${initramfs_symlink_name}.bin
fi
done
}
@@ -595,4 +633,3 @@ do_deploy[prefuncs] += "package_get_auto_pr"
addtask deploy after do_populate_sysroot
EXPORT_FUNCTIONS do_deploy
-
diff --git a/import-layers/yocto-poky/meta/classes/libc-common.bbclass b/import-layers/yocto-poky/meta/classes/libc-common.bbclass
index bbc80167d..11b0065a6 100644
--- a/import-layers/yocto-poky/meta/classes/libc-common.bbclass
+++ b/import-layers/yocto-poky/meta/classes/libc-common.bbclass
@@ -4,14 +4,12 @@ do_install() {
h=`echo $r|sed -e's,\.x$,.h,'`
install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
done
- install -d ${D}/${sysconfdir}/
- install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
+ install -Dm 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/ld.so.conf
install -d ${D}${localedir}
make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
# get rid of some broken files...
for i in ${GLIBC_BROKEN_LOCALES}; do
- grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
- mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
+ sed -i "/$i/d" ${WORKDIR}/SUPPORTED
done
rm -f ${D}${sysconfdir}/rpc
rm -rf ${D}${datadir}/zoneinfo
diff --git a/import-layers/yocto-poky/meta/classes/libc-package.bbclass b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
index 467d56792..2dc90c44d 100644
--- a/import-layers/yocto-poky/meta/classes/libc-package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
@@ -47,15 +47,6 @@ python __anonymous () {
OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
-do_configure_prepend() {
- if [ -e ${S}/elf/ldd.bash.in ]; then
- sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
- fi
-}
-
-
-
-# indentation removed on purpose
locale_base_postinst() {
#!/bin/sh
@@ -63,33 +54,14 @@ if [ "x$D" != "x" ]; then
exit 1
fi
-rm -rf ${TMP_LOCALE}
-mkdir -p ${TMP_LOCALE}
-if [ -f ${localedir}/locale-archive ]; then
- cp ${localedir}/locale-archive ${TMP_LOCALE}/
-fi
-localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
-mkdir -p ${localedir}/
-mv ${TMP_LOCALE}/locale-archive ${localedir}/
-rm -rf ${TMP_LOCALE}
+localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
-# indentation removed on purpose
locale_base_postrm() {
#!/bin/sh
-
-rm -rf ${TMP_LOCALE}
-mkdir -p ${TMP_LOCALE}
-if [ -f ${localedir}/locale-archive ]; then
- cp ${localedir}/locale-archive ${TMP_LOCALE}/
-fi
-localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
-mv ${TMP_LOCALE}/locale-archive ${localedir}/
-rm -rf ${TMP_LOCALE}
+localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s
}
-
-TMP_LOCALE="/tmp/locale${localedir}"
LOCALETREESRC ?= "${PKGD}"
do_prep_locale_tree() {
@@ -150,6 +122,7 @@ python package_do_split_gconvs () {
c_re = re.compile('^copy "(.*)"')
i_re = re.compile('^include "(\w+)".*')
for l in f.readlines():
+ l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
if m:
dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
@@ -171,6 +144,7 @@ python package_do_split_gconvs () {
c_re = re.compile('^copy "(.*)"')
i_re = re.compile('^include "(\w+)".*')
for l in f.readlines():
+ l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
if m:
dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
@@ -191,6 +165,7 @@ python package_do_split_gconvs () {
c_re = re.compile('^copy "(.*)"')
i_re = re.compile('^include "(\w+)".*')
for l in f.readlines():
+ l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
if m:
dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
@@ -223,7 +198,7 @@ python package_do_split_gconvs () {
# GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
if not to_generate or to_generate == 'all':
- to_generate = supported.keys()
+ to_generate = sorted(supported.keys())
else:
to_generate = to_generate.split()
for locale in to_generate:
@@ -274,9 +249,13 @@ python package_do_split_gconvs () {
"powerpc": " --uint32-align=4 --big-endian ", \
"powerpc64": " --uint32-align=4 --big-endian ", \
"mips": " --uint32-align=4 --big-endian ", \
+ "mipsisa32r6": " --uint32-align=4 --big-endian ", \
"mips64": " --uint32-align=4 --big-endian ", \
+ "mipsisa64r6": " --uint32-align=4 --big-endian ", \
"mipsel": " --uint32-align=4 --little-endian ", \
+ "mipsisa32r6el": " --uint32-align=4 --little-endian ", \
"mips64el":" --uint32-align=4 --little-endian ", \
+ "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
"i586": " --uint32-align=4 --little-endian ", \
"i686": " --uint32-align=4 --little-endian ", \
"x86_64": " --uint32-align=4 --little-endian " }
@@ -285,9 +264,9 @@ python package_do_split_gconvs () {
localedef_opts = locale_arch_options[target_arch]
else:
bb.error("locale_arch_options not found for target_arch=" + target_arch)
- raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
- localedef_opts += " --force --old-style --no-archive --prefix=%s \
+ localedef_opts += " --force --no-archive --prefix=%s \
--inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
@@ -295,7 +274,7 @@ python package_do_split_gconvs () {
(path, i18npath, gconvpath, localedef_opts)
else: # earlier slower qemu way
qemu = qemu_target_binary(d)
- localedef_opts = "--force --old-style --no-archive --prefix=%s \
+ localedef_opts = "--force --no-archive --prefix=%s \
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
@@ -388,4 +367,3 @@ python package_do_split_gconvs () {
python populate_packages_prepend () {
bb.build.exec_func('package_do_split_gconvs', d)
}
-
diff --git a/import-layers/yocto-poky/meta/classes/license.bbclass b/import-layers/yocto-poky/meta/classes/license.bbclass
index 43944e6ee..da4fc3e1d 100644
--- a/import-layers/yocto-poky/meta/classes/license.bbclass
+++ b/import-layers/yocto-poky/meta/classes/license.bbclass
@@ -181,8 +181,10 @@ def license_deployed_manifest(d):
key,val = line.split(": ", 1)
man_dic[dep][key] = val[:-1]
- image_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
- d.getVar('IMAGE_NAME', True), 'image_license.manifest')
+ lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+ d.getVar('IMAGE_NAME', True))
+ bb.utils.mkdirhier(lic_manifest_dir)
+ image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
write_license_files(d, image_license_manifest, man_dic)
def get_deployed_dependencies(d):
@@ -198,7 +200,7 @@ def get_deployed_dependencies(d):
# it might contain the bootloader.
taskdata = d.getVar("BB_TASKDEPDATA", False)
depends = list(set([dep[0] for dep
- in taskdata.itervalues()
+ in list(taskdata.values())
if not dep[0].endswith("-native")]))
extra_depends = d.getVar("EXTRA_IMAGEDEPENDS", True)
boot_depends = get_boot_dependencies(d)
@@ -259,7 +261,7 @@ def get_boot_dependencies(d):
depends.append(dep)
# We need to search for the provider of the dependency
else:
- for taskdep in taskdepdata.itervalues():
+ for taskdep in taskdepdata.values():
# The fifth field contains what the task provides
if dep in taskdep[4]:
info_file = os.path.join(
@@ -340,6 +342,7 @@ def add_package_and_files(d):
def copy_license_files(lic_files_paths, destdir):
import shutil
+ import errno
bb.utils.mkdirhier(destdir)
for (basename, path) in lic_files_paths:
@@ -348,12 +351,21 @@ def copy_license_files(lic_files_paths, destdir):
dst = os.path.join(destdir, basename)
if os.path.exists(dst):
os.remove(dst)
- if os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev):
- os.link(src, dst)
+ canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev)
+ if canlink:
try:
- os.chown(dst,0,0)
+ os.link(src, dst)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ # Copy license files if hard-link is not possible even if st_dev is the
+ # same on source and destination (docker container with device-mapper?)
+ canlink = False
+ else:
+ raise
+ try:
+ if canlink:
+ os.chown(dst,0,0)
except OSError as err:
- import errno
if err.errno in (errno.EPERM, errno.EINVAL):
# Suppress "Operation not permitted" error, as
# sometimes this function is not executed under pseudo.
@@ -362,7 +374,7 @@ def copy_license_files(lic_files_paths, destdir):
pass
else:
raise
- else:
+ if not canlink:
shutil.copyfile(src, dst)
except Exception as e:
bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
@@ -373,21 +385,8 @@ def find_license_files(d):
"""
import shutil
import oe.license
+ from collections import defaultdict, OrderedDict
- pn = d.getVar('PN', True)
- for package in d.getVar('PACKAGES', True):
- if d.getVar('LICENSE_' + package, True):
- license_types = license_types + ' & ' + \
- d.getVar('LICENSE_' + package, True)
-
- #If we get here with no license types, then that means we have a recipe
- #level license. If so, we grab only those.
- try:
- license_types
- except NameError:
- # All the license types at the recipe level
- license_types = d.getVar('LICENSE', True)
-
# All the license files for the package
lic_files = d.getVar('LIC_FILES_CHKSUM', True)
pn = d.getVar('PN', True)
@@ -397,6 +396,8 @@ def find_license_files(d):
generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
# List of basename, path tuples
lic_files_paths = []
+ # Entries from LIC_FILES_CHKSUM
+ lic_chksums = {}
license_source_dirs = []
license_source_dirs.append(generic_directory)
try:
@@ -426,7 +427,6 @@ def find_license_files(d):
license_source = None
# If the generic does not exist we need to check to see if there is an SPDX mapping to it,
# unless NO_GENERIC_LICENSE is set.
-
for lic_dir in license_source_dirs:
if not os.path.isfile(os.path.join(lic_dir, license_type)):
if d.getVarFlag('SPDXLICENSEMAP', license_type, True) != None:
@@ -440,6 +440,7 @@ def find_license_files(d):
license_source = lic_dir
break
+ non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type, True)
if spdx_generic and license_source:
# we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
# audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
@@ -451,13 +452,11 @@ def find_license_files(d):
if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
- elif d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
+ elif non_generic_lic and non_generic_lic in lic_chksums:
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
# of the package rather than the license_source_dirs.
- for (basename, path) in lic_files_paths:
- if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True) == basename:
- lic_files_paths.append(("generic_" + license_type, path))
- break
+ lic_files_paths.append(("generic_" + license_type,
+ os.path.join(srcdir, non_generic_lic)))
else:
# Add explicity avoid of CLOSED license because this isn't generic
if license_type != 'CLOSED':
@@ -466,7 +465,7 @@ def find_license_files(d):
pass
if not generic_directory:
- raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
+ bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
if not lic_files:
# No recipe should have an invalid license file. This is checked else
@@ -478,19 +477,32 @@ def find_license_files(d):
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- raise bb.build.FuncFailed("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
+ bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
# We want the license filename and path
- srclicfile = os.path.join(srcdir, path)
- lic_files_paths.append((os.path.basename(path), srclicfile))
+ chksum = parm['md5'] if 'md5' in parm else parm['sha256']
+ lic_chksums[path] = chksum
v = FindVisitor()
try:
- v.visit_string(license_types)
+ v.visit_string(d.getVar('LICENSE', True))
except oe.license.InvalidLicense as exc:
bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
except SyntaxError:
bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
+ # Add files from LIC_FILES_CHKSUM to list of license files
+ lic_chksum_paths = defaultdict(OrderedDict)
+ for path, chksum in lic_chksums.items():
+ lic_chksum_paths[os.path.basename(path)][chksum] = os.path.join(srcdir, path)
+ for basename, files in lic_chksum_paths.items():
+ if len(files) == 1:
+ lic_files_paths.append((basename, list(files.values())[0]))
+ else:
+ # If there are multiple different license files with identical
+ # basenames we rename them to <file>.0, <file>.1, ...
+ for i, path in enumerate(files.values()):
+ lic_files_paths.append(("%s.%d" % (basename, i), path))
+
return lic_files_paths
def return_spdx(d, license):
@@ -633,7 +645,7 @@ def check_license_format(d):
licenses = d.getVar('LICENSE', True)
from oe.license import license_operator, license_operator_chars, license_pattern
- elements = filter(lambda x: x.strip(), license_operator.split(licenses))
+ elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos - 1]):
@@ -656,8 +668,6 @@ do_rootfs[recrdeptask] += "do_populate_lic"
IMAGE_POSTPROCESS_COMMAND_prepend = "write_deploy_manifest; "
do_image[recrdeptask] += "do_populate_lic"
-do_populate_lic_setscene[dirs] = "${LICSSTATEDIR}/${PN}"
-do_populate_lic_setscene[cleandirs] = "${LICSSTATEDIR}"
python do_populate_lic_setscene () {
sstate_setscene(d)
}
diff --git a/import-layers/yocto-poky/meta/classes/linuxloader.bbclass b/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
index 5c4dc5c51..117b03074 100644
--- a/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
+++ b/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
@@ -1,7 +1,13 @@
linuxloader () {
case ${TARGET_ARCH} in
- powerpc | mips | mipsel | microblaze )
+ powerpc | microblaze )
+ dynamic_loader="${base_libdir}/ld.so.1"
+ ;;
+ mipsisa32r6el | mipsisa32r6 | mipsisa64r6el | mipsisa64r6)
+ dynamic_loader="${base_libdir}/ld-linux-mipsn8.so.1"
+ ;;
+ mips* )
dynamic_loader="${base_libdir}/ld.so.1"
;;
powerpc64)
diff --git a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
index c751385e7..734697f9e 100644
--- a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
+++ b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
@@ -31,14 +31,18 @@ inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
KERNEL_IMAGETYPE ??= "bzImage"
+VM_DEFAULT_KERNEL ??= "${KERNEL_IMAGETYPE}"
populate_kernel() {
dest=$1
install -d $dest
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/vmlinuz
+ bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} as $dest/vmlinuz"
+ if [ -e ${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} ]; then
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} $dest/vmlinuz
+ else
+ bbwarn "${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} doesn't exist"
fi
# initrd is made of concatenation of multiple filesystem images
diff --git a/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass b/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
index 0f7f4235a..2e6fac209 100644
--- a/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/metadata_scm.bbclass
@@ -4,8 +4,7 @@ METADATA_REVISION ?= "${@base_detect_revision(d)}"
def base_detect_revision(d):
path = base_get_scmbasepath(d)
- scms = [base_get_metadata_git_revision, \
- base_get_metadata_svn_revision]
+ scms = [base_get_metadata_git_revision]
for scm in scms:
rev = scm(path, d)
@@ -27,7 +26,7 @@ def base_detect_branch(d):
return "<unknown>"
def base_get_scmbasepath(d):
- return d.getVar( 'COREBASE', True)
+ return os.path.join(d.getVar('COREBASE', True), 'meta')
def base_get_metadata_monotone_branch(path, d):
monotone_branch = "<unknown>"
diff --git a/import-layers/yocto-poky/meta/classes/mirrors.bbclass b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
index 9e6d4836d..11847085b 100644
--- a/import-layers/yocto-poky/meta/classes/mirrors.bbclass
+++ b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
@@ -21,13 +21,13 @@ ${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
-ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
-ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
-ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
+${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
+${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
+${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
+ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR} \n \
http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
diff --git a/import-layers/yocto-poky/meta/classes/module.bbclass b/import-layers/yocto-poky/meta/classes/module.bbclass
index 01c9309eb..68e3d341a 100644
--- a/import-layers/yocto-poky/meta/classes/module.bbclass
+++ b/import-layers/yocto-poky/meta/classes/module.bbclass
@@ -8,6 +8,15 @@ EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
MODULES_INSTALL_TARGET ?= "modules_install"
+python __anonymous () {
+ depends = d.getVar('DEPENDS', True)
+ extra_symbols = []
+ for dep in depends.split():
+ if dep.startswith("kernel-module-"):
+ extra_symbols.append("${STAGING_INCDIR}/" + dep + "/Module.symvers")
+ d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
+}
+
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
@@ -15,6 +24,7 @@ module_do_compile() {
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
AR="${KERNEL_AR}" \
O=${STAGING_KERNEL_BUILDDIR} \
+ KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
${MAKE_TARGETS}
}
@@ -24,6 +34,11 @@ module_do_install() {
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
O=${STAGING_KERNEL_BUILDDIR} \
${MODULES_INSTALL_TARGET}
+
+ install -d -m0755 ${D}${includedir}/${BPN}
+ cp -a --no-preserve=ownership ${B}/Module.symvers ${D}${includedir}/${BPN}
+ # it doesn't actually seem to matter which path is specified here
+ sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
index 67dc72b76..11ae2681f 100644
--- a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
@@ -72,6 +72,7 @@ def preferred_ml_updates(d):
pkg = pkg.replace("virtual/", "")
virt = "virtual/"
for p in prefixes:
+ newval = None
if pkg != "kernel":
newval = p + "-" + val
@@ -86,7 +87,7 @@ def preferred_ml_updates(d):
# implement alternative multilib name
newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
- if not d.getVar(newname, False):
+ if not d.getVar(newname, False) and newval != None:
d.setVar(newname, localdata.expand(newval))
# Avoid future variable key expansion
provexp = d.expand(prov)
diff --git a/import-layers/yocto-poky/meta/classes/multilib_header.bbclass b/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
index 5ee0a2d56..304c28e77 100644
--- a/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib_header.bbclass
@@ -52,3 +52,8 @@ oe_multilib_header() {
oe_multilib_header_class-native () {
return
}
+
+# Nor do we need multilib headers for nativesdk builds.
+oe_multilib_header_class-nativesdk () {
+ return
+}
diff --git a/import-layers/yocto-poky/meta/classes/native.bbclass b/import-layers/yocto-poky/meta/classes/native.bbclass
index f67ef0014..143f8a914 100644
--- a/import-layers/yocto-poky/meta/classes/native.bbclass
+++ b/import-layers/yocto-poky/meta/classes/native.bbclass
@@ -87,8 +87,7 @@ datadir = "${STAGING_DATADIR_NATIVE}"
baselib = "lib"
-# Libtool's default paths are correct for the native machine
-lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
+export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
NATIVE_PACKAGE_PATH_SUFFIX ?= ""
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
diff --git a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
index f74da6267..a78257c19 100644
--- a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass
@@ -10,6 +10,8 @@ LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
+MULTILIBS = ""
+
#
# Update PACKAGE_ARCH and PACKAGE_ARCHS
#
diff --git a/import-layers/yocto-poky/meta/classes/nopackages.bbclass b/import-layers/yocto-poky/meta/classes/nopackages.bbclass
index 0c2761bef..559f5078b 100644
--- a/import-layers/yocto-poky/meta/classes/nopackages.bbclass
+++ b/import-layers/yocto-poky/meta/classes/nopackages.bbclass
@@ -4,3 +4,9 @@ deltask do_package_write_ipk
deltask do_package_write_deb
deltask do_package_qa
deltask do_packagedata
+deltask do_package_setscene
+deltask do_package_write_rpm_setscene
+deltask do_package_write_ipk_setscene
+deltask do_package_write_deb_setscene
+deltask do_package_qa_setscene
+deltask do_packagedata_setscene
diff --git a/import-layers/yocto-poky/meta/classes/npm.bbclass b/import-layers/yocto-poky/meta/classes/npm.bbclass
index 9843e8735..fce4c1146 100644
--- a/import-layers/yocto-poky/meta/classes/npm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/npm.bbclass
@@ -1,9 +1,25 @@
DEPENDS_prepend = "nodejs-native "
+RDEPENDS_${PN}_prepend = "nodejs "
S = "${WORKDIR}/npmpkg"
NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
+# function maps arch names to npm arch names
+def npm_oe_arch_map(target_arch, d):
+ import re
+ if re.match('p(pc|owerpc)(|64)', target_arch): return 'ppc'
+ elif re.match('i.86$', target_arch): return 'ia32'
+ elif re.match('x86_64$', target_arch): return 'x64'
+ elif re.match('arm64$', target_arch): return 'arm'
+ return target_arch
+
+NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH', True), d)}"
+
npm_do_compile() {
+ # Copy in any additionally fetched modules
+ if [ -d ${WORKDIR}/node_modules ] ; then
+ cp -a ${WORKDIR}/node_modules ${S}/
+ fi
# changing the home directory to the working directory, the .npmrc will
# be created in this directory
export HOME=${WORKDIR}
@@ -12,7 +28,7 @@ npm_do_compile() {
# clear cache before every build
npm cache clear
# Install pkg into ${S} without going to the registry
- npm --arch=${TARGET_ARCH} --production --no-registry install
+ npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
}
npm_do_install() {
@@ -35,10 +51,10 @@ python populate_packages_prepend () {
if pdata:
version = pdata.get('version', None)
if version:
- d.setVar('PKGV_%s' % expanded_pkgname, version.encode("utf8"))
+ d.setVar('PKGV_%s' % expanded_pkgname, version)
description = pdata.get('description', None)
if description:
- d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'").encode("utf8"))
+ d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'"))
d.appendVar('RDEPENDS_%s' % d.getVar('PN', True), ' %s' % ' '.join(pkgnames).replace('_', '-'))
}
diff --git a/import-layers/yocto-poky/meta/classes/oelint.bbclass b/import-layers/yocto-poky/meta/classes/oelint.bbclass
index 1b051ca22..c4febc2cf 100644
--- a/import-layers/yocto-poky/meta/classes/oelint.bbclass
+++ b/import-layers/yocto-poky/meta/classes/oelint.bbclass
@@ -45,7 +45,7 @@ python do_lint() {
def findKey(path, key):
ret = True
- f = file('%s' % path, mode = 'r')
+ f = open('%s' % path, mode = 'r')
line = f.readline()
while line:
if line.find(key) != -1:
diff --git a/import-layers/yocto-poky/meta/classes/package.bbclass b/import-layers/yocto-poky/meta/classes/package.bbclass
index 76b9f8649..a6f0a7a63 100644
--- a/import-layers/yocto-poky/meta/classes/package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package.bbclass
@@ -63,7 +63,7 @@ def legitimize_package_name(s):
def fixutf(m):
cp = m.group(1)
if cp:
- return ('\u%s' % cp).decode('unicode_escape').encode('utf-8')
+ return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
# Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
@@ -146,7 +146,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = d.getVar('PACKAGES', True).split()
- split_packages = []
+ split_packages = set()
if postinst:
postinst = '#!/bin/sh\n' + postinst + '\n'
@@ -183,7 +183,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
continue
on = legitimize_package_name(m.group(1))
pkg = output_pattern % on
- split_packages.append(pkg)
+ split_packages.add(pkg)
if not pkg in packages:
if prepend:
packages = [pkg] + packages
@@ -226,7 +226,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
hook(f, pkg, file_regex, output_pattern, m.group(1))
d.setVar('PACKAGES', ' '.join(packages))
- return split_packages
+ return list(split_packages)
PACKAGE_DEPENDS += "file-native"
@@ -259,14 +259,30 @@ def files_from_filevars(filevars):
continue
files.append(f)
- for f in files:
+ symlink_paths = []
+ for ind, f in enumerate(files):
+ # Handle directory symlinks. Truncate path to the lowest level symlink
+ parent = ''
+ for dirname in f.split('/')[:-1]:
+ parent = os.path.join(parent, dirname)
+ if dirname == '.':
+ continue
+ if cpath.islink(parent):
+ bb.warn("FILES contains file '%s' which resides under a "
+ "directory symlink. Please fix the recipe and use the "
+ "real path for the file." % f[1:])
+ symlink_paths.append(f)
+ files[ind] = parent
+ f = parent
+ break
+
if not cpath.islink(f):
if cpath.isdir(f):
newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
if newfiles:
files += newfiles
- return files
+ return files, symlink_paths
# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
def get_conffiles(pkg, d):
@@ -281,7 +297,7 @@ def get_conffiles(pkg, d):
if conffiles == None:
conffiles = ""
conffiles = conffiles.split()
- conf_orig_list = files_from_filevars(conffiles)
+ conf_orig_list = files_from_filevars(conffiles)[0]
# Remove links and directories from conf_orig_list to get conf_list which only contains normal files
conf_list = []
@@ -841,6 +857,9 @@ python split_and_strip_files () {
dvar = d.getVar('PKGD', True)
pn = d.getVar('PN', True)
+ oldcwd = os.getcwd()
+ os.chdir(dvar)
+
# We default to '.debug' style
if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
# Single debug-file-directory style debug info
@@ -864,8 +883,6 @@ python split_and_strip_files () {
sourcefile = d.expand("${WORKDIR}/debugsources.list")
bb.utils.remove(sourcefile)
- os.chdir(dvar)
-
# Return type (bits):
# 0 - not elf
# 1 - ELF
@@ -903,7 +920,8 @@ python split_and_strip_files () {
inodes = {}
libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
- if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
+ if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1' or \
+ d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
for root, dirs, files in cpath.walk(dvar):
for f in files:
file = os.path.join(root, f)
@@ -1051,6 +1069,7 @@ python split_and_strip_files () {
#
# End of strip
#
+ os.chdir(oldcwd)
}
python populate_packages () {
@@ -1108,7 +1127,7 @@ python populate_packages () {
filesvar.replace("//", "/")
origfiles = filesvar.split()
- files = files_from_filevars(origfiles)
+ files, symlink_paths = files_from_filevars(origfiles)
if autodebug and pkg.endswith("-dbg"):
files.extend(debug)
@@ -1149,13 +1168,19 @@ python populate_packages () {
fpath = os.path.join(root,file)
if not cpath.islink(file):
os.link(file, fpath)
- fstat = cpath.stat(file)
- os.chmod(fpath, fstat.st_mode)
- os.chown(fpath, fstat.st_uid, fstat.st_gid)
continue
ret = bb.utils.copyfile(file, fpath)
if ret is False or ret == 0:
- raise bb.build.FuncFailed("File population failed")
+ bb.fatal("File population failed")
+
+ # Check if symlink paths exist
+ for file in symlink_paths:
+ if not os.path.exists(os.path.join(root,file)):
+ bb.fatal("File '%s' cannot be packaged into '%s' because its "
+ "parent directory structure does not exist. One of "
+ "its parent directories is a symlink whose target "
+ "directory is not included in the package." %
+ (file, pkg))
os.umask(oldumask)
os.chdir(workdir)
@@ -1258,8 +1283,8 @@ python emit_pkgdata() {
def write_if_exists(f, pkg, var):
def encode(str):
import codecs
- c = codecs.getencoder("string_escape")
- return c(str)[0]
+ c = codecs.getencoder("unicode_escape")
+ return c(str)[0].decode("latin1")
val = d.getVar('%s_%s' % (var, pkg), True)
if val:
@@ -1465,7 +1490,7 @@ python package_do_shlibs() {
import re, pipes
import subprocess as sub
- exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
+ exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
if exclude_shlibs:
bb.note("not generating shlibs")
return
@@ -1503,7 +1528,7 @@ python package_do_shlibs() {
m = re.match("\s+RPATH\s+([^\s]*)", l)
if m:
rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
- rpath = map(os.path.normpath, rpaths)
+ rpath = list(map(os.path.normpath, rpaths))
for l in lines:
m = re.match("\s+NEEDED\s+([^\s]*)", l)
if m:
@@ -1554,19 +1579,19 @@ python package_do_shlibs() {
if file.endswith('.dylib') or file.endswith('.so'):
rpath = []
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
- err, out = p.communicate()
- # If returned successfully, process stderr for results
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
if p.returncode == 0:
- for l in err.split("\n"):
+ for l in out.split("\n"):
l = l.strip()
if l.startswith('path '):
rpath.append(l.split()[1])
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
- err, out = p.communicate()
- # If returned successfully, process stderr for results
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
if p.returncode == 0:
- for l in err.split("\n"):
+ for l in out.split("\n"):
l = l.strip()
if not l or l.endswith(":"):
continue
@@ -1673,7 +1698,7 @@ python package_do_shlibs() {
bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
continue
if n[0] in shlib_provider.keys():
- shlib_provider_path = list()
+ shlib_provider_path = []
for k in shlib_provider[n[0]].keys():
shlib_provider_path.append(k)
match = None
@@ -2128,4 +2153,3 @@ def mapping_rename_hook(d):
runtime_mapping_rename("RDEPENDS", pkg, d)
runtime_mapping_rename("RRECOMMENDS", pkg, d)
runtime_mapping_rename("RSUGGESTS", pkg, d)
-
diff --git a/import-layers/yocto-poky/meta/classes/package_deb.bbclass b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
index e1d05a74c..fb6034cab 100644
--- a/import-layers/yocto-poky/meta/classes/package_deb.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
@@ -52,6 +52,9 @@ python do_package_deb () {
import re, copy
import textwrap
import subprocess
+ import collections
+
+ oldcwd = os.getcwd()
workdir = d.getVar('WORKDIR', True)
if not workdir:
@@ -117,13 +120,13 @@ python do_package_deb () {
controldir = os.path.join(root, 'DEBIAN')
bb.utils.mkdirhier(controldir)
- os.chmod(controldir, 0755)
+ os.chmod(controldir, 0o755)
try:
import codecs
ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
except OSError:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open control file for writing.")
+ bb.fatal("unable to open control file for writing")
fields = []
pe = d.getVar('PKGE', True)
@@ -173,7 +176,7 @@ python do_package_deb () {
# Special behavior for description...
if 'DESCRIPTION' in fs:
summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
- ctrlfile.write('Description: %s\n' % unicode(summary,'utf-8'))
+ ctrlfile.write('Description: %s\n' % summary)
description = localdata.getVar('DESCRIPTION', True) or "."
description = textwrap.dedent(description).strip()
if '\\n' in description:
@@ -182,29 +185,25 @@ python do_package_deb () {
# We don't limit the width when manually indent, but we do
# need the textwrap.fill() to set the initial_indent and
# subsequent_indent, so set a large width
- ctrlfile.write('%s\n' % unicode(textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' '),'utf-8'))
+ ctrlfile.write('%s\n' % textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' '))
else:
# Auto indent
- ctrlfile.write('%s\n' % unicode(textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '),'utf-8'))
+ ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
else:
- ctrlfile.write(unicode(c % tuple(pullData(fs, localdata)),'utf-8'))
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
except KeyError:
import sys
(type, value, traceback) = sys.exc_info()
bb.utils.unlockfile(lf)
ctrlfile.close()
- raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
- except UnicodeDecodeError:
- bb.utils.unlockfile(lf)
- ctrlfile.close()
- raise bb.build.FuncFailed("Non UTF-8 characters found in one of the fields")
+ bb.fatal("Missing field for deb generation: %s" % value)
# more fields
custom_fields_chunk = get_package_additional_metadata("deb", localdata)
if custom_fields_chunk is not None:
- ctrlfile.write(unicode(custom_fields_chunk))
+ ctrlfile.write(custom_fields_chunk)
ctrlfile.write("\n")
mapping_rename_hook(localdata)
@@ -234,7 +233,7 @@ python do_package_deb () {
rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
debian_cmp_remap(rdepends)
- for dep in rdepends.keys():
+ for dep in list(rdepends.keys()):
if dep == pkg:
del rdepends[dep]
continue
@@ -242,30 +241,31 @@ python do_package_deb () {
del rdepends[dep]
rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
debian_cmp_remap(rrecommends)
- for dep in rrecommends.keys():
+ for dep in list(rrecommends.keys()):
if '*' in dep:
del rrecommends[dep]
rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
debian_cmp_remap(rsuggests)
# Deliberately drop version information here, not wanted/supported by deb
rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
debian_cmp_remap(rprovides)
rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
debian_cmp_remap(rreplaces)
rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
debian_cmp_remap(rconflicts)
if rdepends:
- ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
+ ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
if rsuggests:
- ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
+ ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
if rrecommends:
- ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
+ ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
if rprovides:
- ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
+ ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
if rreplaces:
- ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
+ ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
if rconflicts:
- ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
+ ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
@@ -277,7 +277,7 @@ python do_package_deb () {
scriptfile = open(os.path.join(controldir, script), 'w')
except OSError:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ bb.fatal("unable to open %s script file for writing" % script)
if scriptvar.startswith("#!"):
pos = scriptvar.find("\n") + 1
@@ -293,7 +293,7 @@ python do_package_deb () {
scriptfile.write(scriptvar[pos:])
scriptfile.write('\n')
scriptfile.close()
- os.chmod(os.path.join(controldir, script), 0755)
+ os.chmod(os.path.join(controldir, script), 0o755)
conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
@@ -301,7 +301,7 @@ python do_package_deb () {
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
except OSError:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ bb.fatal("unable to open conffiles for writing")
for f in conffiles_str.split():
if os.path.exists(oe.path.join(root, f)):
conffiles.write('%s\n' % f)
@@ -311,10 +311,11 @@ python do_package_deb () {
ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
if ret != 0:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("dpkg-deb execution failed")
+ bb.fatal("dpkg-deb execution failed")
cleanupcontrol(root)
bb.utils.unlockfile(lf)
+ os.chdir(oldcwd)
}
# Indirect references to these vars
do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
diff --git a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
index f1ad1d5c1..eb0093233 100644
--- a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
@@ -20,6 +20,9 @@ python do_package_ipk () {
import re, copy
import textwrap
import subprocess
+ import collections
+
+ oldcwd = os.getcwd()
workdir = d.getVar('WORKDIR', True)
outdir = d.getVar('PKGWRITEDIRIPK', True)
@@ -105,7 +108,7 @@ python do_package_ipk () {
ctrlfile = open(os.path.join(controldir, 'control'), 'w')
except OSError:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open control file for writing.")
+ bb.fatal("unable to open control file for writing")
fields = []
pe = d.getVar('PKGE', True)
@@ -159,7 +162,7 @@ python do_package_ipk () {
(type, value, traceback) = sys.exc_info()
ctrlfile.close()
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
+ bb.fatal("Missing field for ipk generation: %s" % value)
# more fields
custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
@@ -190,6 +193,7 @@ python do_package_ipk () {
debian_cmp_remap(rsuggests)
# Deliberately drop version information here, not wanted/supported by ipk
rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or ""), [])
+ rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
debian_cmp_remap(rprovides)
rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
debian_cmp_remap(rreplaces)
@@ -222,10 +226,10 @@ python do_package_ipk () {
scriptfile = open(os.path.join(controldir, script), 'w')
except OSError:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ bb.fatal("unable to open %s script file for writing" % script)
scriptfile.write(scriptvar)
scriptfile.close()
- os.chmod(os.path.join(controldir, script), 0755)
+ os.chmod(os.path.join(controldir, script), 0o755)
conffiles_str = ' '.join(get_conffiles(pkg, d))
if conffiles_str:
@@ -233,7 +237,7 @@ python do_package_ipk () {
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
except OSError:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ bb.fatal("unable to open conffiles for writing")
for f in conffiles_str.split():
if os.path.exists(oe.path.join(root, f)):
conffiles.write('%s\n' % f)
@@ -241,10 +245,10 @@ python do_package_ipk () {
os.chdir(basedir)
ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
- d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir), shell=True)
+ d.getVar("OPKGBUILDCMD", True), pkg, pkgoutdir), shell=True)
if ret != 0:
bb.utils.unlockfile(lf)
- raise bb.build.FuncFailed("opkg-build execution failed")
+ bb.fatal("opkg-build execution failed")
if d.getVar('IPK_SIGN_PACKAGES', True) == '1':
ipkver = "%s-%s" % (d.getVar('PKGV', True), d.getVar('PKGR', True))
@@ -254,6 +258,7 @@ python do_package_ipk () {
cleanupcontrol(root)
bb.utils.unlockfile(lf)
+ os.chdir(oldcwd)
}
# Otherwise allarch packages may change depending on override configuration
do_package_ipk[vardepsexclude] = "OVERRIDES"
diff --git a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
index 7d523a16f..c431545f7 100644
--- a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
@@ -58,12 +58,12 @@ def write_rpm_perfiledata(srcname, d):
try:
dependsfile = open(outdepends, 'w')
except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ bb.fatal("unable to open spec file for writing")
dump_filerdeps('RDEPENDS', dependsfile, d)
dependsfile.close()
- os.chmod(outdepends, 0755)
+ os.chmod(outdepends, 0o755)
# OE-core / RPM Provides
outprovides = workdir + "/" + srcname + ".provides"
@@ -71,12 +71,12 @@ def write_rpm_perfiledata(srcname, d):
try:
providesfile = open(outprovides, 'w')
except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ bb.fatal("unable to open spec file for writing")
dump_filerdeps('RPROVIDES', providesfile, d)
providesfile.close()
- os.chmod(outprovides, 0755)
+ os.chmod(outprovides, 0o755)
return (outdepends, outprovides)
@@ -617,7 +617,7 @@ python write_specfile () {
try:
specfile = open(outspecfile, 'w')
except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ bb.fatal("unable to open spec file for writing")
# RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
# of the generated spec file
@@ -702,7 +702,7 @@ python do_package_rpm () {
pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}')
magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
bb.utils.mkdirhier(pkgwritedir)
- os.chmod(pkgwritedir, 0755)
+ os.chmod(pkgwritedir, 0o755)
cmd = rpmbuild
cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
diff --git a/import-layers/yocto-poky/meta/classes/package_tar.bbclass b/import-layers/yocto-poky/meta/classes/package_tar.bbclass
index 854e64528..e217814af 100644
--- a/import-layers/yocto-poky/meta/classes/package_tar.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_tar.bbclass
@@ -4,6 +4,9 @@ IMAGE_PKGTYPE ?= "tar"
python do_package_tar () {
import subprocess
+
+ oldcwd = os.getcwd()
+
workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
@@ -49,6 +52,8 @@ python do_package_tar () {
ret = subprocess.call(args + [tarfn] + dlist)
if ret != 0:
bb.error("Creation of tar %s failed." % tarfn)
+
+ os.chdir(oldcwd)
}
python () {
diff --git a/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass b/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
new file mode 100644
index 000000000..aa01def74
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
@@ -0,0 +1,252 @@
+# Class to avoid copying packages into the feed if they haven't materially changed
+#
+# Copyright (C) 2015 Intel Corporation
+# Released under the MIT license (see COPYING.MIT for details)
+#
+# This class effectively intercepts packages as they are written out by
+# do_package_write_*, causing them to be written into a different
+# directory where we can compare them to whatever older packages might
+# be in the "real" package feed directory, and avoid copying the new
+# package to the feed if it has not materially changed. The idea is to
+# avoid unnecessary churn in the packages when dependencies trigger task
+# reexecution (and thus repackaging). Enabling the class is simple:
+#
+# INHERIT += "packagefeed-stability"
+#
+# Caveats:
+# 1) Latest PR values in the build system may not match those in packages
+# seen on the target (naturally)
+# 2) If you rebuild from sstate without the existing package feed present,
+# you will lose the "state" of the package feed i.e. the preserved old
+# package versions. Not the end of the world, but would negate the
+# entire purpose of this class.
+#
+# Note that running -c cleanall on a recipe will purposely delete the old
+# package files so they will definitely be copied the next time.
+
+python() {
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
+ return
+ # Package backend agnostic intercept
+ # This assumes that the package_write task is called package_write_<pkgtype>
+ # and that the directory in which packages should be written is
+ # pointed to by the variable DEPLOY_DIR_<PKGTYPE>
+ for pkgclass in (d.getVar('PACKAGE_CLASSES', True) or '').split():
+ if pkgclass.startswith('package_'):
+ pkgtype = pkgclass.split('_', 1)[1]
+ pkgwritefunc = 'do_package_write_%s' % pkgtype
+ sstate_outputdirs = d.getVarFlag(pkgwritefunc, 'sstate-outputdirs', False)
+ deploydirvar = 'DEPLOY_DIR_%s' % pkgtype.upper()
+ deploydirvarref = '${' + deploydirvar + '}'
+ pkgcomparefunc = 'do_package_compare_%s' % pkgtype
+
+ if bb.data.inherits_class('image', d):
+ d.appendVarFlag('do_rootfs', 'recrdeptask', ' ' + pkgcomparefunc)
+
+ if bb.data.inherits_class('populate_sdk_base', d):
+ d.appendVarFlag('do_populate_sdk', 'recrdeptask', ' ' + pkgcomparefunc)
+
+ if bb.data.inherits_class('populate_sdk_ext', d):
+ d.appendVarFlag('do_populate_sdk_ext', 'recrdeptask', ' ' + pkgcomparefunc)
+
+ d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
+
+ if d.getVarFlag(pkgwritefunc, 'noexec', True) or not d.getVarFlag(pkgwritefunc, 'task', True):
+ # Packaging is disabled for this recipe, we shouldn't do anything
+ continue
+
+ if deploydirvarref in sstate_outputdirs:
+ deplor_dir_pkgtype = d.expand(deploydirvarref + '-prediff')
+ # Set intermediate output directory
+ d.setVarFlag(pkgwritefunc, 'sstate-outputdirs', sstate_outputdirs.replace(deploydirvarref, deplor_dir_pkgtype))
+ # Update SSTATE_DUPWHITELIST to avoid shared location conflicted error
+ d.appendVar('SSTATE_DUPWHITELIST', ' %s' % deplor_dir_pkgtype)
+
+ d.setVar(pkgcomparefunc, d.getVar('do_package_compare', False))
+ d.setVarFlags(pkgcomparefunc, d.getVarFlags('do_package_compare', False))
+ d.appendVarFlag(pkgcomparefunc, 'depends', ' build-compare-native:do_populate_sysroot')
+ bb.build.addtask(pkgcomparefunc, 'do_build', 'do_packagedata ' + pkgwritefunc, d)
+}
+
+# This isn't the real task function - it's a template that we use in the
+# anonymous python code above
+fakeroot python do_package_compare () {
+ currenttask = d.getVar('BB_CURRENTTASK', True)
+ pkgtype = currenttask.rsplit('_', 1)[1]
+ package_compare_impl(pkgtype, d)
+}
+
+def package_compare_impl(pkgtype, d):
+ import errno
+ import fnmatch
+ import glob
+ import subprocess
+ import oe.sstatesig
+
+ pn = d.getVar('PN', True)
+ deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper(), True)
+ prepath = deploydir + '-prediff/'
+
+ # Find out PKGR values are
+ pkgdatadir = d.getVar('PKGDATA_DIR', True)
+ packages = []
+ try:
+ with open(os.path.join(pkgdatadir, pn), 'r') as f:
+ for line in f:
+ if line.startswith('PACKAGES:'):
+ packages = line.split(':', 1)[1].split()
+ break
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ pass
+
+ if not packages:
+ bb.debug(2, '%s: no packages, nothing to do' % pn)
+ return
+
+ pkgrvalues = {}
+ rpkgnames = {}
+ rdepends = {}
+ pkgvvalues = {}
+ for pkg in packages:
+ with open(os.path.join(pkgdatadir, 'runtime', pkg), 'r') as f:
+ for line in f:
+ if line.startswith('PKGR:'):
+ pkgrvalues[pkg] = line.split(':', 1)[1].strip()
+ if line.startswith('PKGV:'):
+ pkgvvalues[pkg] = line.split(':', 1)[1].strip()
+ elif line.startswith('PKG_%s:' % pkg):
+ rpkgnames[pkg] = line.split(':', 1)[1].strip()
+ elif line.startswith('RDEPENDS_%s:' % pkg):
+ rdepends[pkg] = line.split(':', 1)[1].strip()
+
+ # Prepare a list of the runtime package names for packages that were
+ # actually produced
+ rpkglist = []
+ for pkg, rpkg in rpkgnames.items():
+ if os.path.exists(os.path.join(pkgdatadir, 'runtime', pkg + '.packaged')):
+ rpkglist.append((rpkg, pkg))
+ rpkglist.sort(key=lambda x: len(x[0]), reverse=True)
+
+ pvu = d.getVar('PV', False)
+ if '$' + '{SRCPV}' in pvu:
+ pvprefix = pvu.split('$' + '{SRCPV}', 1)[0]
+ else:
+ pvprefix = None
+
+ pkgwritetask = 'package_write_%s' % pkgtype
+ files = []
+ docopy = False
+ manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
+ mlprefix = d.getVar('MLPREFIX', True)
+ # Copy recipe's all packages if one of the packages are different to make
+ # they have the same PR.
+ with open(manifest, 'r') as f:
+ for line in f:
+ if line.startswith(prepath):
+ srcpath = line.rstrip()
+ if os.path.isfile(srcpath):
+ destpath = os.path.join(deploydir, os.path.relpath(srcpath, prepath))
+
+ # This is crude but should work assuming the output
+ # package file name starts with the package name
+ # and rpkglist is sorted by length (descending)
+ pkgbasename = os.path.basename(destpath)
+ pkgname = None
+ for rpkg, pkg in rpkglist:
+ if mlprefix and pkgtype == 'rpm' and rpkg.startswith(mlprefix):
+ rpkg = rpkg[len(mlprefix):]
+ if pkgbasename.startswith(rpkg):
+ pkgr = pkgrvalues[pkg]
+ destpathspec = destpath.replace(pkgr, '*')
+ if pvprefix:
+ pkgv = pkgvvalues[pkg]
+ if pkgv.startswith(pvprefix):
+ pkgvsuffix = pkgv[len(pvprefix):]
+ if '+' in pkgvsuffix:
+ newpkgv = pvprefix + '*+' + pkgvsuffix.split('+', 1)[1]
+ destpathspec = destpathspec.replace(pkgv, newpkgv)
+ pkgname = pkg
+ break
+ else:
+ bb.warn('Unable to map %s back to package' % pkgbasename)
+ destpathspec = destpath
+
+ oldfile = None
+ if not docopy:
+ oldfiles = glob.glob(destpathspec)
+ if oldfiles:
+ oldfile = oldfiles[-1]
+ result = subprocess.call(['pkg-diff.sh', oldfile, srcpath])
+ if result != 0:
+ docopy = True
+ bb.note("%s and %s are different, will copy packages" % (oldfile, srcpath))
+ else:
+ docopy = True
+ bb.note("No old packages found for %s, will copy packages" % pkgname)
+
+ files.append((pkgname, pkgbasename, srcpath, destpath))
+
+ # Remove all the old files and copy again if docopy
+ if docopy:
+ bb.plain('Copying packages for recipe %s' % pn)
+ pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
+ try:
+ with open(pcmanifest, 'r') as f:
+ for line in f:
+ fn = line.rstrip()
+ if fn:
+ try:
+ os.remove(fn)
+ bb.note('Removed old package %s' % fn)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ pass
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ pass
+
+ # Create new manifest
+ with open(pcmanifest, 'w') as f:
+ for pkgname, pkgbasename, srcpath, destpath in files:
+ destdir = os.path.dirname(destpath)
+ bb.utils.mkdirhier(destdir)
+ # Remove allarch rpm pkg if it is already existed (for
+ # multilib), they're identical in theory, but sstate.bbclass
+ # copies it again, so keep align with that.
+ if os.path.exists(destpath) and pkgtype == 'rpm' \
+ and d.getVar('PACKAGE_ARCH', True) == 'all':
+ os.unlink(destpath)
+ if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
+ # Use a hard link to save space
+ os.link(srcpath, destpath)
+ else:
+ shutil.copyfile(srcpath, destpath)
+ f.write('%s\n' % destpath)
+ else:
+ bb.plain('Not copying packages for recipe %s' % pn)
+
+do_cleansstate[postfuncs] += "pfs_cleanpkgs"
+python pfs_cleanpkgs () {
+ import errno
+ for pkgclass in (d.getVar('PACKAGE_CLASSES', True) or '').split():
+ if pkgclass.startswith('package_'):
+ pkgtype = pkgclass.split('_', 1)[1]
+ deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper(), True)
+ prepath = deploydir + '-prediff'
+ pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
+ try:
+ with open(pcmanifest, 'r') as f:
+ for line in f:
+ fn = line.rstrip()
+ if fn:
+ try:
+ os.remove(fn)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ pass
+ os.remove(pcmanifest)
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ pass
+}
diff --git a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
index 38bdbd382..3928c8a4a 100644
--- a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
+++ b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
@@ -14,6 +14,8 @@ PACKAGE_ARCH ?= "all"
# Fully expanded - so it applies the overrides as well
PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
+LICENSE ?= "MIT"
+
inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')}
# This automatically adds -dbg and -dev flavours of all PACKAGES
diff --git a/import-layers/yocto-poky/meta/classes/patch.bbclass b/import-layers/yocto-poky/meta/classes/patch.bbclass
index 3d22ad838..1f6927be0 100644
--- a/import-layers/yocto-poky/meta/classes/patch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/patch.bbclass
@@ -5,6 +5,9 @@ QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
+PATCH_GIT_USER_NAME ?= "OpenEmbedded"
+PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
+
inherit terminal
def src_patches(d, all = False ):
diff --git a/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass b/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
index dbe11e12d..3f48a0f34 100644
--- a/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/pixbufcache.bbclass
@@ -54,6 +54,7 @@ gdkpixbuf_complete() {
# An error exit during populate_sysroot_setscene allows bitbake to
# try to recover by re-building the package.
#
+DEPENDS_append_class-native = " gdk-pixbuf-native"
SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
# See base.bbclass for the other half of this
@@ -61,7 +62,8 @@ pixbufcache_sstate_postinst() {
if [ "${BB_CURRENTTASK}" = "populate_sysroot" ]; then
${gdkpixbuf_complete}
elif [ "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]; then
- echo "${gdkpixbuf_complete}" >> ${STAGING_DIR}/sstatecompletions
+ if [ -x ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders ]; then
+ echo "${gdkpixbuf_complete}" >> ${STAGING_DIR}/sstatecompletions
+ fi
fi
}
-
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
index 008bb577c..4462b52cb 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -26,6 +26,8 @@ SDK_DIR = "${WORKDIR}/sdk"
SDK_OUTPUT = "${SDK_DIR}/image"
SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
+SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
+
B_task-populate-sdk = "${SDK_DIR}"
SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
@@ -58,8 +60,8 @@ SDK_RELOCATE_AFTER_INSTALL ?= "1"
SDKEXTPATH ?= "~/${@d.getVar('DISTRO', True)}_sdk"
SDK_TITLE ?= "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} SDK"
-SDK_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
-SDK_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
+SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
+SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
@@ -90,9 +92,9 @@ SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK
# Some archs override this, we need the nativesdk version
# turns out this is hard to get from the datastore due to TRANSLATED_TARGET_ARCH
# manipulation.
-SDK_OLDEST_KERNEL = "2.6.32"
+SDK_OLDEST_KERNEL = "3.2.0"
-fakeroot python do_populate_sdk() {
+def populate_sdk_common(d):
from oe.sdk import populate_sdk
from oe.manifest import create_manifest, Manifest
@@ -114,7 +116,16 @@ fakeroot python do_populate_sdk() {
manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
populate_sdk(d)
+
+fakeroot python do_populate_sdk() {
+ populate_sdk_common(d)
}
+SSTATETASKS += "do_populate_sdk"
+SSTATE_SKIP_CREATION_task-populate-sdk = '1'
+do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
+do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
+do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
+do_populate_sdk[stamp-extra-info] = "${MACHINE}${SDKMACHINE}"
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
@@ -136,7 +147,8 @@ python check_sdk_sysroots() {
return os.path.abspath(path)
# Get scan root
- SCAN_ROOT = norm_path("${SDK_OUTPUT}/${SDKPATH}/sysroots/")
+ SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT', True),
+ d.getVar('SDKPATH', True)))
bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
@@ -180,14 +192,14 @@ SDKTAROPTS = "--owner=root --group=root"
fakeroot tar_sdk() {
# Package it up
- mkdir -p ${SDK_DEPLOY}
+ mkdir -p ${SDKDEPLOYDIR}
cd ${SDK_OUTPUT}/${SDKPATH}
- tar ${SDKTAROPTS} -cf - . | pixz > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ tar ${SDKTAROPTS} -cf - . | pixz > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
}
fakeroot create_shar() {
# copy in the template shar extractor script
- cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
rm -f ${T}/pre_install_command ${T}/post_install_command
@@ -203,7 +215,7 @@ ${SDK_POST_INSTALL_COMMAND}
EOF
sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \
-e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \
- ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+ ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
@@ -215,16 +227,16 @@ EOF
-e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
-e '/@SDK_PRE_INSTALL_COMMAND@/d' \
-e '/@SDK_POST_INSTALL_COMMAND@/d' \
- ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+ ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# add execution permission
- chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+ chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# append the SDK tarball
- cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# delete the old tarball, we don't need it anymore
- rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
}
populate_sdk_log_check() {
@@ -251,7 +263,7 @@ def sdk_command_variables(d):
def sdk_variables(d):
variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
- 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI']
+ 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY']
variables.extend(sdk_command_variables(d))
return " ".join(variables)
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
index 87518d176..0f0525d76 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -20,34 +20,43 @@ SDK_EXT_task-populate-sdk-ext = "-ext"
# Options are full or minimal
SDK_EXT_TYPE ?= "full"
+SDK_INCLUDE_PKGDATA ?= "0"
+SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE', True) == 'full' else '0'}"
SDK_RECRDEP_TASKS ?= ""
SDK_LOCAL_CONF_WHITELIST ?= ""
SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
BB_NUMBER_THREADS \
+ BB_NUMBER_PARSE_THREADS \
PARALLEL_MAKE \
PRSERV_HOST \
SSTATE_MIRRORS \
+ DL_DIR \
+ SSTATE_DIR \
+ TMPDIR \
"
SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
SDK_UPDATE_URL ?= ""
SDK_TARGETS ?= "${PN}"
-def get_sdk_install_targets(d):
+def get_sdk_install_targets(d, images_only=False):
sdk_install_targets = ''
- if d.getVar('SDK_EXT_TYPE', True) != 'minimal':
+ if images_only or d.getVar('SDK_EXT_TYPE', True) != 'minimal':
sdk_install_targets = d.getVar('SDK_TARGETS', True)
depd = d.getVar('BB_TASKDEPDATA', False)
- for v in depd.itervalues():
+ for v in depd.values():
if v[1] == 'do_image_complete':
if v[0] not in sdk_install_targets:
sdk_install_targets += ' {}'.format(v[0])
- if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
- sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
+ if not images_only:
+ if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+ sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
+ if d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1':
+ sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
return sdk_install_targets
@@ -76,6 +85,67 @@ SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} Extensible SDK"
+def clean_esdk_builddir(d, sdkbasepath):
+ """Clean up traces of the fake build for create_filtered_tasklist()"""
+ import shutil
+ cleanpaths = 'cache conf/sanity_info conf/templateconf.cfg tmp'.split()
+ for pth in cleanpaths:
+ fullpth = os.path.join(sdkbasepath, pth)
+ if os.path.isdir(fullpth):
+ shutil.rmtree(fullpth)
+ elif os.path.isfile(fullpth):
+ os.remove(fullpth)
+
+def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
+ """
+ Create a filtered list of tasks. Also double-checks that the build system
+ within the SDK basically works and required sstate artifacts are available.
+ """
+ import tempfile
+ import shutil
+ import oe.copy_buildsystem
+
+ # Create a temporary build directory that we can pass to the env setup script
+ shutil.copyfile(sdkbasepath + '/conf/local.conf', sdkbasepath + '/conf/local.conf.bak')
+ try:
+ with open(sdkbasepath + '/conf/local.conf', 'a') as f:
+ # Force the use of sstate from the build system
+ f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR', True))
+ f.write('SSTATE_MIRRORS_forcevariable = ""\n')
+ # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
+ f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
+ # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
+ # be different and we won't be able to find our native sstate)
+ if not bb.data.inherits_class('uninative', d):
+ f.write('INHERIT_remove = "uninative"\n')
+
+ # Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
+ # will not allow in its COREBASE path, so we need to rename the directory temporarily
+ temp_sdkbasepath = d.getVar('SDK_OUTPUT', True) + '/tmp-renamed-sdk'
+ # Delete any existing temp dir
+ try:
+ shutil.rmtree(temp_sdkbasepath)
+ except FileNotFoundError:
+ pass
+ os.rename(sdkbasepath, temp_sdkbasepath)
+ try:
+ cmdprefix = '. %s .; ' % conf_initpath
+ logfile = d.getVar('WORKDIR', True) + '/tasklist_bb_log.txt'
+ try:
+ oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
+ except bb.process.ExecutionError as e:
+ msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
+ if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
+ msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
+ bb.fatal(msg)
+ finally:
+ os.rename(temp_sdkbasepath, sdkbasepath)
+ # Clean out residue of running bitbake, which check_sstate_task_list()
+ # will effectively do
+ clean_esdk_builddir(d, sdkbasepath)
+ finally:
+ os.replace(sdkbasepath + '/conf/local.conf.bak', sdkbasepath + '/conf/local.conf')
+
python copy_buildsystem () {
import re
import shutil
@@ -125,8 +195,8 @@ python copy_buildsystem () {
d.setVar('scriptrelpath', scriptrelpath)
# Write out config file for devtool
- import ConfigParser
- config = ConfigParser.SafeConfigParser()
+ import configparser
+ config = configparser.SafeConfigParser()
config.add_section('General')
config.set('General', 'bitbake_subdir', conf_bbpath)
config.set('General', 'init_path', conf_initpath)
@@ -170,6 +240,14 @@ python copy_buildsystem () {
f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
f.write(' "\n')
+ # Copy uninative tarball
+ # For now this is where uninative.bbclass expects the tarball
+ uninative_file = d.expand('${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2')
+ uninative_checksum = bb.utils.sha256_file(uninative_file)
+ uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
+ bb.utils.mkdirhier(uninative_outdir)
+ shutil.copy(uninative_file, uninative_outdir)
+
env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE', True) or '').split()
env_whitelist_values = {}
@@ -204,7 +282,10 @@ python copy_buildsystem () {
# Write a newline just in case there's none at the end of the original
f.write('\n')
- f.write('INHERIT += "%s"\n\n' % 'uninative')
+ f.write('DL_DIR = "${TOPDIR}/downloads"\n')
+
+ f.write('INHERIT += "%s"\n' % 'uninative')
+ f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH', True), uninative_checksum))
f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
# Some classes are not suitable for SDK, remove them from INHERIT
@@ -219,9 +300,12 @@ python copy_buildsystem () {
# warning.
f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
- # Error if the sigs in the locked-signature file don't match
+ # Warn if the sigs in the locked-signature file don't match
# the sig computed from the metadata.
- f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "error"\n\n')
+ f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
+
+ # Set up whitelist for run on install
+ f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n')
@@ -263,7 +347,7 @@ python copy_buildsystem () {
# Ensure any variables set from the external environment (by way of
# BB_ENV_EXTRAWHITE) are set in the SDK's configuration
extralines = []
- for name, value in env_whitelist_values.iteritems():
+ for name, value in env_whitelist_values.items():
actualvalue = d.getVar(name, True) or ''
if value != actualvalue:
extralines.append('%s = "%s"\n' % (name, actualvalue))
@@ -276,7 +360,7 @@ python copy_buildsystem () {
f.write('\n')
# Filter the locked signatures file to just the sstate tasks we are interested in
- excluded_targets = d.getVar('SDK_TARGETS', True)
+ excluded_targets = get_sdk_install_targets(d, images_only=True)
sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
oe.copy_buildsystem.prune_lockedsigs([],
@@ -289,6 +373,15 @@ python copy_buildsystem () {
# uninative.bbclass sets NATIVELSBSTRING to 'universal'
fixedlsbstring = 'universal'
+ sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1')
+ sdk_ext_type = d.getVar('SDK_EXT_TYPE', True)
+ if sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative:
+ # Create the filtered task list used to generate the sstate cache shipped with the SDK
+ tasklistfn = d.getVar('WORKDIR', True) + '/tasklist.txt'
+ create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
+ else:
+ tasklistfn = None
+
# Add packagedata if enabled
if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base.inc'
@@ -300,7 +393,21 @@ python copy_buildsystem () {
lockedsigs_pruned,
lockedsigs_copy)
- if d.getVar('SDK_EXT_TYPE', True) == 'minimal':
+ if sdk_include_toolchain:
+ lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base2.inc'
+ lockedsigs_toolchain = d.getVar('STAGING_DIR_HOST', True) + '/locked-sigs/locked-sigs-extsdk-toolchain.inc'
+ shutil.move(lockedsigs_pruned, lockedsigs_base)
+ oe.copy_buildsystem.merge_lockedsigs([],
+ lockedsigs_base,
+ lockedsigs_toolchain,
+ lockedsigs_pruned)
+ oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
+ d.getVar('SSTATE_DIR', True),
+ sstate_out, d,
+ fixedlsbstring,
+ filterfile=tasklistfn)
+
+ if sdk_ext_type == 'minimal':
if derivative:
# Assume the user is not going to set up an additional sstate
# mirror, thus we need to copy the additional artifacts (from
@@ -316,12 +423,14 @@ python copy_buildsystem () {
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
d.getVar('SSTATE_DIR', True),
sstate_out, d,
- fixedlsbstring)
+ fixedlsbstring,
+ filterfile=tasklistfn)
else:
oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
d.getVar('SSTATE_DIR', True),
sstate_out, d,
- fixedlsbstring)
+ fixedlsbstring,
+ filterfile=tasklistfn)
# We don't need sstate do_package files
for root, dirs, files in os.walk(sstate_out):
@@ -346,38 +455,98 @@ python copy_buildsystem () {
f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
}
-def extsdk_get_buildtools_filename(d):
- return '*-buildtools-nativesdk-standalone-*.sh'
+def get_current_buildtools(d):
+ """Get the file name of the current buildtools installer"""
+ import glob
+ btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY', True), '*-buildtools-nativesdk-standalone-*.sh'))
+ btfiles.sort(key=os.path.getctime)
+ return os.path.basename(btfiles[-1])
+
+def get_sdk_required_utilities(buildtools_fn, d):
+ """Find required utilities that aren't provided by the buildtools"""
+ sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES', True) or '').split()
+ sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
+ sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
+ buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY', True), buildtools_fn)
+ filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('SDKPATH', '.')
+ sdkpathnative = localdata.getVar('SDKPATHNATIVE', True)
+ sdkbindirs = [localdata.getVar('bindir_nativesdk', True),
+ localdata.getVar('sbindir_nativesdk', True),
+ localdata.getVar('base_bindir_nativesdk', True),
+ localdata.getVar('base_sbindir_nativesdk', True)]
+ for line in filelist.splitlines():
+ splitline = line.split()
+ if len(splitline) > 5:
+ fn = splitline[5]
+ if not fn.startswith('./'):
+ fn = './%s' % fn
+ if fn.startswith(sdkpathnative):
+ relpth = '/' + os.path.relpath(fn, sdkpathnative)
+ for bindir in sdkbindirs:
+ if relpth.startswith(bindir):
+ relpth = os.path.relpath(relpth, bindir)
+ if relpth in sanity_required_utilities:
+ sanity_required_utilities.remove(relpth)
+ break
+ return ' '.join(sanity_required_utilities)
install_tools() {
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
- lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/devtool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/devtool
- lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/recipetool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/recipetool
+ scripts="devtool recipetool oe-find-native-sysroot runqemu*"
+ for script in $scripts; do
+ for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
+ lnr ${scriptfn} ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/`basename $scriptfn`
+ done
+ done
+ # We can't use the same method as above because files in the sysroot won't exist at this point
+ # (they get populated from sstate on installation)
+ if [ "${SDK_INCLUDE_TOOLCHAIN}" == "1" ] ; then
+ binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE',True), d.getVar('TOPDIR', True))}
+ lnr ${SDK_OUTPUT}/${SDKPATH}/$binrelpath/unfsd ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd
+ fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
- localconf=${SDK_OUTPUT}/${SDKPATH}/conf/local.conf
-
# find latest buildtools-tarball and install it
- buildtools_path=`ls -t1 ${SDK_DEPLOY}/${@extsdk_get_buildtools_filename(d)} | head -n1`
- install $buildtools_path ${SDK_OUTPUT}/${SDKPATH}
-
- # For now this is where uninative.bbclass expects the tarball
- chksum=`sha256sum ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 | cut -f 1 -d ' '`
- install -d ${SDK_OUTPUT}/${SDKPATH}/downloads/uninative/$chksum/
- install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}/downloads/uninative/$chksum/
- echo "UNINATIVE_CHECKSUM[${BUILD_ARCH}] = '$chksum'" >> ${SDK_OUTPUT}/${SDKPATH}/conf/local.conf
+ install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
}
do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
-# Since bitbake won't run as root it doesn't make sense to try and install
-# the extensible sdk as root.
sdk_ext_preinst() {
+ # Since bitbake won't run as root it doesn't make sense to try and install
+ # the extensible sdk as root.
if [ "`id -u`" = "0" ]; then
echo "ERROR: The extensible sdk cannot be installed as root."
exit 1
fi
+ if ! command -v locale > /dev/null; then
+ echo "ERROR: The installer requires the locale command, please install it first"
+ exit 1
+ fi
+ # Check setting of LC_ALL set above
+ canonicalised_locale=`echo $LC_ALL | sed 's/UTF-8/utf8/'`
+ if ! locale -a | grep -q $canonicalised_locale ; then
+ echo "ERROR: the installer requires the $LC_ALL locale to be installed (but not selected), please install it first"
+ exit 1
+ fi
+ # The relocation script used by buildtools installer requires python
+ if ! command -v python > /dev/null; then
+ echo "ERROR: The installer requires python, please install it first"
+ exit 1
+ fi
+ missing_utils=""
+ for util in ${SDK_REQUIRED_UTILITIES}; do
+ if ! command -v $util > /dev/null; then
+ missing_utils="$missing_utils $util"
+ fi
+ done
+ if [ -n "$missing_utils" ] ; then
+ echo "ERROR: the SDK requires the following missing utilities, please install them: $missing_utils"
+ exit 1
+ fi
SDK_EXTENSIBLE="1"
if [ "$publish" = "1" ] ; then
EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=ext-sdk-prepare.py"
@@ -392,14 +561,16 @@ SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
sdk_ext_postinst() {
printf "\nExtracting buildtools...\n"
cd $target_sdk_dir
- printf "buildtools\ny" | ./*buildtools-nativesdk-standalone* > /dev/null || ( printf 'ERROR: buildtools installation failed\n' ; exit 1 )
+ env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
+ printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
# Delete the buildtools tar file since it won't be used again
- rm ./*buildtools-nativesdk-standalone*.sh -f
+ rm -f ./${SDK_BUILDTOOLS_INSTALLER}
+ # We don't need the log either since it succeeded
+ rm -f buildtools.log
# Make sure when the user sets up the environment, they also get
# the buildtools-tarball tools in their path.
- env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
# Allow bitbake environment setup to be ran as part of this sdk.
@@ -420,7 +591,7 @@ sdk_ext_postinst() {
# current working directory when first ran, nor will it set $1 when
# sourcing a script. That is why this has to look so ugly.
LOGFILE="$target_sdk_dir/preparing_build_system.log"
- sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py '${SDK_INSTALL_TARGETS}' >> $LOGFILE 2>&1" || { echo "ERROR: SDK preparation failed: see $LOGFILE"; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
rm $target_sdk_dir/ext-sdk-prepare.py
fi
echo done
@@ -438,12 +609,22 @@ fakeroot python do_populate_sdk_ext() {
bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH', True), d.getVar('BUILD_ARCH', True)))
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
+ buildtools_fn = get_current_buildtools(d)
+ d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
+ d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
+ d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
- bb.build.exec_func("do_populate_sdk", d)
+ populate_sdk_common(d)
}
def get_ext_sdk_depends(d):
- return d.getVarFlag('do_rootfs', 'depends', True) + ' ' + d.getVarFlag('do_build', 'depends', True)
+ # Note: the deps varflag is a list not a string, so we need to specify expand=False
+ deps = d.getVarFlag('do_image_complete', 'deps', False)
+ pn = d.getVar('PN', True)
+ deplist = ['%s:%s' % (pn, dep) for dep in deps]
+ for task in ['do_image_complete', 'do_rootfs', 'do_build']:
+ deplist.extend((d.getVarFlag(task, 'depends', True) or '').split())
+ return ' '.join(deplist)
python do_sdk_depends() {
# We have to do this separately in its own task so we avoid recursing into
@@ -471,7 +652,8 @@ do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk \
- ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1' else ''}"
+ ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1' else ''} \
+ ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1' else ''}"
do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
@@ -484,4 +666,13 @@ do_populate_sdk_ext[vardeps] += "copy_buildsystem \
# always runs.
do_populate_sdk_ext[nostamp] = "1"
+SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
+
+SSTATETASKS += "do_populate_sdk_ext"
+SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
+do_populate_sdk_ext[cleandirs] = "${SDKDEPLOYDIR}"
+do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
+do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
+do_populate_sdk_ext[stamp-extra-info] = "${MACHINE}"
+
addtask populate_sdk_ext after do_sdk_depends
diff --git a/import-layers/yocto-poky/meta/classes/python-dir.bbclass b/import-layers/yocto-poky/meta/classes/python-dir.bbclass
index ebfa4b30f..a11dc350b 100644
--- a/import-layers/yocto-poky/meta/classes/python-dir.bbclass
+++ b/import-layers/yocto-poky/meta/classes/python-dir.bbclass
@@ -1,5 +1,5 @@
-PYTHON_BASEVERSION ?= "2.7"
-PYTHON_ABI ?= ""
+PYTHON_BASEVERSION = "2.7"
+PYTHON_ABI = ""
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
-PYTHON_PN = "python${@'' if '${PYTHON_BASEVERSION}'.startswith('2') else '3'}"
+PYTHON_PN = "python"
PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/import-layers/yocto-poky/meta/classes/python3-dir.bbclass b/import-layers/yocto-poky/meta/classes/python3-dir.bbclass
new file mode 100644
index 000000000..06bb046d9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/python3-dir.bbclass
@@ -0,0 +1,5 @@
+PYTHON_BASEVERSION = "3.5"
+PYTHON_ABI = "m"
+PYTHON_DIR = "python${PYTHON_BASEVERSION}"
+PYTHON_PN = "python3"
+PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/import-layers/yocto-poky/meta/classes/python3native.bbclass b/import-layers/yocto-poky/meta/classes/python3native.bbclass
index 8ec6b769d..ef468b3fd 100644
--- a/import-layers/yocto-poky/meta/classes/python3native.bbclass
+++ b/import-layers/yocto-poky/meta/classes/python3native.bbclass
@@ -1,7 +1,13 @@
-PYTHON_BASEVERSION = "3.5"
-
-inherit python-dir
+inherit python3-dir
PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
EXTRANATIVEPATH += "${PYTHON_PN}-native"
-DEPENDS += " ${PYTHON_PN}-native "
+DEPENDS_append = " ${PYTHON_PN}-native "
+
+# python-config and other scripts are using distutils modules
+# which we patch to access these variables
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+# autoconf macros will use their internal default preference otherwise
+export PYTHON
diff --git a/import-layers/yocto-poky/meta/classes/pythonnative.bbclass b/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
index 97029dc52..4e0381b56 100644
--- a/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
+++ b/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
@@ -5,4 +5,12 @@ PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
# PYTHON_EXECUTABLE is used by cmake
PYTHON_EXECUTABLE="${PYTHON}"
EXTRANATIVEPATH += "${PYTHON_PN}-native"
-DEPENDS += " ${PYTHON_PN}-native "
+DEPENDS_append = " ${PYTHON_PN}-native "
+
+# python-config and other scripts are using distutils modules
+# which we patch to access these variables
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+# autoconf macros will use their internal default preference otherwise
+export PYTHON
diff --git a/import-layers/yocto-poky/meta/classes/qemu.bbclass b/import-layers/yocto-poky/meta/classes/qemu.bbclass
index 75739dbbf..f2d4d1c9e 100644
--- a/import-layers/yocto-poky/meta/classes/qemu.bbclass
+++ b/import-layers/yocto-poky/meta/classes/qemu.bbclass
@@ -4,6 +4,11 @@
#
def qemu_target_binary(data):
+ package_arch = data.getVar("PACKAGE_ARCH", True)
+ qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch, True) or "")
+ if qemu_target_binary:
+ return qemu_target_binary
+
target_arch = data.getVar("TARGET_ARCH", True)
if target_arch in ("i486", "i586", "i686"):
target_arch = "i386"
diff --git a/import-layers/yocto-poky/meta/classes/qemuboot.bbclass b/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
new file mode 100644
index 000000000..b5cc93dc9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
@@ -0,0 +1,82 @@
+# Help runqemu boot target board, "QB" means Qemu Boot, the following
+# vars can be set in conf files, such as <bsp.conf> to make it can be
+# boot by runqemu:
+#
+# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
+# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
+# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
+# QB_MEM: memory, e.g., "-m 512"
+# QB_MACHINE: qemu machine, e.g., "-machine virt"
+# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
+# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
+# set it when support kvm.
+# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
+# option, e.g., "console=ttyS0 console=tty"
+# QB_DTB: qemu dtb name
+# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
+# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
+# when QB_AUDIO_DRV is set.
+# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
+# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
+# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no -device virtio-net-device,netdev=net0"
+# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
+# QB_SLIRP_OPT: network option for SLIRP mode, e.g.,
+# "-netdev user,id=net0 -device virtio-net-device,netdev=net0"
+# QB_ROOTFS_OPT: used as rootfs, e.g.,
+# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
+# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
+# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
+# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
+# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
+# Note, runqemu will replace "@PORT@" with the port number which is used.
+#
+# Usage:
+# IMAGE_CLASSES += "qemuboot"
+# See "runqemu help" for more info
+
+QB_MEM ?= "-m 256"
+QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
+QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_FSTYPE ?= "ext4"
+QB_OPT_APPEND ?= "-show-cursor"
+
+# Create qemuboot.conf
+ROOTFS_POSTPROCESS_COMMAND += "write_qemuboot_conf; "
+
+python write_qemuboot_conf() {
+ import configparser
+
+ build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE', \
+ 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME', \
+ 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE', \
+ 'STAGING_DIR_HOST']
+
+ # Vars from bsp
+ qb_vars = []
+ for k in d.keys():
+ if k.startswith('QB_'):
+ qb_vars.append(k)
+
+ qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('IMAGE_NAME', True))
+ qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('IMAGE_LINK_NAME', True))
+ cf = configparser.ConfigParser()
+ cf.add_section('config_bsp')
+ for k in build_vars + qb_vars:
+ cf.set('config_bsp', k, '%s' % d.getVar(k, True))
+
+ # QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
+ # to the kernel file, which hinders relocatability of the qb conf.
+ # Read the link and replace it with the full filename of the target.
+ kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE', True), d.getVar('QB_DEFAULT_KERNEL', True))
+ kernel = os.path.realpath(kernel_link)
+ cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
+
+ bb.utils.mkdirhier(os.path.dirname(qemuboot))
+ with open(qemuboot, 'w') as f:
+ cf.write(f)
+
+ if os.path.lexists(qemuboot_link):
+ os.remove(qemuboot_link)
+ os.symlink(os.path.basename(qemuboot), qemuboot_link)
+}
diff --git a/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass b/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
index 295611f0f..add34df9d 100644
--- a/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
+++ b/import-layers/yocto-poky/meta/classes/recipe_sanity.bbclass
@@ -8,7 +8,7 @@ def bad_runtime_vars(cfgdata, d):
return
for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
- val = d.getVar(var, 0)
+ val = d.getVar(var, False)
if val and val != cfgdata.get(var):
__note("%s should be %s_${PN}" % (var, var), d)
@@ -16,11 +16,11 @@ __recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
def req_vars(cfgdata, d):
for var in d.getVar("__recipe_sanity_reqvars", True).split():
- if not d.getVar(var, 0):
+ if not d.getVar(var, False):
__note("%s should be set" % var, d)
for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
- val = d.getVar(var, 0)
+ val = d.getVar(var, False)
cfgval = cfgdata.get(var)
if not val:
@@ -29,7 +29,7 @@ def req_vars(cfgdata, d):
__note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
def var_renames_overwrite(cfgdata, d):
- renames = d.getVar("__recipe_sanity_renames", 0)
+ renames = d.getVar("__recipe_sanity_renames", False)
if renames:
for (key, newkey, oldvalue, newvalue) in renames:
if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
@@ -50,7 +50,7 @@ def can_use_autotools_base(cfgdata, d):
if cfg.find(i) != -1:
return False
- for clsfile in d.getVar("__inherit_cache", 0):
+ for clsfile in d.getVar("__inherit_cache", False):
(base, _) = os.path.splitext(os.path.basename(clsfile))
if cfg.find("%s_do_configure" % base) != -1:
__note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
@@ -60,7 +60,7 @@ def can_use_autotools_base(cfgdata, d):
def can_delete_FILESPATH(cfgdata, d):
expected = cfgdata.get("FILESPATH")
expectedpaths = d.expand(expected)
- unexpanded = d.getVar("FILESPATH", 0)
+ unexpanded = d.getVar("FILESPATH", False)
filespath = d.getVar("FILESPATH", True).split(":")
filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
for fp in filespath:
@@ -73,7 +73,7 @@ def can_delete_FILESPATH(cfgdata, d):
def can_delete_FILESDIR(cfgdata, d):
expected = cfgdata.get("FILESDIR")
#expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
- unexpanded = d.getVar("FILESDIR", 0)
+ unexpanded = d.getVar("FILESDIR", False)
if unexpanded is None:
return False
@@ -90,7 +90,7 @@ def can_delete_others(p, cfgdata, d):
for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
"SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
#for k in cfgdata:
- unexpanded = d.getVar(k, 0)
+ unexpanded = d.getVar(k, False)
cfgunexpanded = cfgdata.get(k)
if not cfgunexpanded:
continue
@@ -117,7 +117,7 @@ python do_recipe_sanity () {
#(can_use_autotools_base, "candidate for use of autotools_base"),
(incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
]
- cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
+ cfgdata = d.getVar("__recipe_sanity_cfgdata", False)
for (func, msg) in sanitychecks:
if func(cfgdata, d):
@@ -143,8 +143,8 @@ python recipe_sanity_eh () {
cfgdata = {}
for k in d.keys():
- if not isinstance(d.getVar(k, 0), bb.data_smart.DataSmart):
- cfgdata[k] = d.getVar(k, 0)
+ if not isinstance(d.getVar(k, False), bb.data_smart.DataSmart):
+ cfgdata[k] = d.getVar(k, False)
d.setVar("__recipe_sanity_cfgdata", cfgdata)
#d.setVar("__recipe_sanity_cfgdata", d)
diff --git a/import-layers/yocto-poky/meta/classes/report-error.bbclass b/import-layers/yocto-poky/meta/classes/report-error.bbclass
index 82b5bcd69..5bb231efc 100644
--- a/import-layers/yocto-poky/meta/classes/report-error.bbclass
+++ b/import-layers/yocto-poky/meta/classes/report-error.bbclass
@@ -42,8 +42,8 @@ python errorreport_handler () {
data['distro'] = e.data.getVar("DISTRO", True)
data['target_sys'] = e.data.getVar("TARGET_SYS", True)
data['failures'] = []
- data['component'] = e.getPkgs()[0]
- data['branch_commit'] = base_detect_branch(e.data) + ": " + base_detect_revision(e.data)
+ data['component'] = " ".join(e.getPkgs())
+ data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
@@ -58,6 +58,13 @@ python errorreport_handler () {
try:
logFile = codecs.open(log, 'r', 'utf-8')
logdata = logFile.read()
+
+ # Replace host-specific paths so the logs are cleaner
+ for d in ("TOPDIR", "TMPDIR"):
+ s = e.data.getVar(d, True)
+ if s:
+ logdata = logdata.replace(s, d)
+
logFile.close()
except:
logdata = "Unable to read log file"
diff --git a/import-layers/yocto-poky/meta/classes/rm_work.bbclass b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
index c647d88d2..b71a9d1cf 100644
--- a/import-layers/yocto-poky/meta/classes/rm_work.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
@@ -15,6 +15,9 @@
# to try and reduce disk usage
BB_SCHEDULER ?= "completion"
+# Run the rm_work task in the idle scheduling class
+BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
+
RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
BB_DEFAULT_TASK = "rm_work_all"
@@ -63,14 +66,10 @@ do_rm_work () {
i=dummy
break
;;
- *do_rootfs*)
- i=dummy
- break
- ;;
- *do_image*)
- i=dummy
- break
- ;;
+ *do_rootfs*|*do_image*|*do_bootimg*|*do_bootdirectdisk*|*do_vmimg*)
+ i=dummy
+ break
+ ;;
*do_build*)
i=dummy
break
@@ -119,6 +118,8 @@ rm_work_rootfs () {
rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
python () {
+ if bb.data.inherits_class('kernel', d):
+ d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN", True))
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split()
pn = d.getVar("PN", True)
diff --git a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
index 95d28afa3..0c7ceea54 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
@@ -15,13 +15,13 @@ ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
# Write manifest
-IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
+IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.rootfs.manifest"
ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
# Set default target for systemd images
SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
@@ -30,7 +30,34 @@ ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
-
+systemd_create_users () {
+ for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+ [ -e $conffile ] || continue
+ grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
+ if [ "$type" = "u" ]; then
+ useradd_params="--shell /sbin/nologin"
+ [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
+ [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
+ useradd_params="$useradd_params --system $name"
+ eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
+ elif [ "$type" = "g" ]; then
+ groupadd_params=""
+ [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
+ groupadd_params="$groupadd_params --system $name"
+ eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
+ elif [ "$type" = "m" ]; then
+ group=$id
+ if [ ! `grep -q "^${group}:" ${IMAGE_ROOTFS}${sysconfdir}/group` ]; then
+ eval groupadd --root ${IMAGE_ROOTFS} --system $group
+ fi
+ if [ ! `grep -q "^${name}:" ${IMAGE_ROOTFS}${sysconfdir}/passwd` ]; then
+ eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name
+ fi
+ eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
+ fi
+ done
+ done
+}
#
# A hook function to support read-only-rootfs IMAGE_FEATURES
@@ -73,27 +100,6 @@ read_only_rootfs_hook () {
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
fi
-
- if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
- # Update user database files so that services don't fail for a read-only systemd system
- for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
- [ -e $conffile ] || continue
- grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
- if [ "$type" = "u" ]; then
- useradd_params=""
- [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
- [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
- useradd_params="$useradd_params --system $name"
- eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
- elif [ "$type" = "g" ]; then
- groupadd_params=""
- [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
- groupadd_params="$groupadd_params --system $name"
- eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
- fi
- done
- done
- fi
}
#
@@ -211,7 +217,7 @@ python write_image_manifest () {
from oe.rootfs import image_list_installed_packages
from oe.utils import format_pkg_list
- deploy_dir = d.getVar('DEPLOY_DIR_IMAGE', True)
+ deploy_dir = d.getVar('IMGDEPLOYDIR', True)
link_name = d.getVar('IMAGE_LINK_NAME', True)
manifest_name = d.getVar('IMAGE_MANIFEST', True)
@@ -226,9 +232,6 @@ python write_image_manifest () {
if os.path.exists(manifest_name):
manifest_link = deploy_dir + "/" + link_name + ".manifest"
if os.path.lexists(manifest_link):
- if d.getVar('RM_OLD_IMAGE', True) == "1" and \
- os.path.exists(os.path.realpath(manifest_link)):
- os.remove(os.path.realpath(manifest_link))
os.remove(manifest_link)
os.symlink(os.path.basename(manifest_name), manifest_link)
}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
index 0d2e897c2..37730a710 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs_rpm.bbclass
@@ -5,8 +5,8 @@
ROOTFS_PKGMANAGE = "rpm smartpm"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
-# Add 50Meg of extra space for Smart
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}"
+# Add 100Meg of extra space for Smart
+IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 102400", "" ,d)}"
# Smart is python based, so be sure python-native is available to us.
EXTRANATIVEPATH += "python-native"
@@ -24,11 +24,6 @@ do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
do_rootfs[recrdeptask] += "do_package_write_rpm"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
-# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
-# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
-do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
-do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
-
python () {
if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
diff --git a/import-layers/yocto-poky/meta/classes/sanity.bbclass b/import-layers/yocto-poky/meta/classes/sanity.bbclass
index 77813e41b..7682ffbb8 100644
--- a/import-layers/yocto-poky/meta/classes/sanity.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sanity.bbclass
@@ -135,8 +135,7 @@ is a good way to visualise the changes."""
bb.note("Your conf/bblayers.conf has been automatically updated.")
return
- if not status.reparse:
- status.addresult()
+ status.addresult()
elif current_lconf == 6 and lconf_version > 6:
# Handle rename of meta-yocto -> meta-poky
@@ -459,19 +458,19 @@ def check_gcc_march(sanity_data):
# Check if GCC could work without march
if not result:
- status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc gcc_test.c -o gcc_test")
+ status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} gcc_test.c -o gcc_test"))
if status == 0:
result = True;
if not result:
- status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=native gcc_test.c -o gcc_test")
+ status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=native gcc_test.c -o gcc_test"))
if status == 0:
message = "BUILD_CFLAGS_append = \" -march=native\""
result = True;
if not result:
build_arch = sanity_data.getVar('BUILD_ARCH', True)
- status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=%s gcc_test.c -o gcc_test" % build_arch)
+ status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=%s gcc_test.c -o gcc_test" % build_arch))
if status == 0:
message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
result = True;
@@ -557,20 +556,17 @@ def check_perl_modules(sanity_data):
return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
return None
-def sanity_check_conffiles(status, d):
+def sanity_check_conffiles(d):
funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
for func in funcs:
conffile, current_version, required_version, func = func.split(":")
if check_conf_exists(conffile, d) and d.getVar(current_version, True) is not None and \
d.getVar(current_version, True) != d.getVar(required_version, True):
- success = True
try:
bb.build.exec_func(func, d, pythonexception=True)
except NotImplementedError as e:
- success = False
- status.addresult(str(e))
- if success:
- status.reparse = True
+ bb.fatal(e)
+ d.setVar("BB_INVALIDCONF", True)
def sanity_handle_abichanges(status, d):
#
@@ -673,11 +669,11 @@ def check_sanity_version_change(status, d):
if not check_app_exists("${MAKE}", d):
missing = missing + "GNU make,"
- if not check_app_exists('${BUILD_PREFIX}gcc', d):
- missing = missing + "C Compiler (%sgcc)," % d.getVar("BUILD_PREFIX", True)
+ if not check_app_exists('${BUILD_CC}', d):
+ missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC", True)
- if not check_app_exists('${BUILD_PREFIX}g++', d):
- missing = missing + "C++ Compiler (%sg++)," % d.getVar("BUILD_PREFIX", True)
+ if not check_app_exists('${BUILD_CXX}', d):
+ missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX", True)
required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
@@ -746,7 +742,7 @@ def check_sanity_version_change(status, d):
status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
bbpaths = d.getVar('BBPATH', True).split(":")
- if ("." in bbpaths or "./" in bbpaths or "" in bbpaths) and not status.reparse:
+ if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
status.addresult("BBPATH references the current directory, either through " \
"an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
"layer configuration is adding empty elements to BBPATH.\n\t "\
@@ -765,6 +761,16 @@ def check_sanity_version_change(status, d):
# Check that TMPDIR isn't located on nfs
status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
+def sanity_check_locale(d):
+ """
+ Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
+ """
+ import locale
+ try:
+ locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
+ except locale.Error:
+ raise_sanity_error("You system needs to support the en_US.UTF-8 locale.", d)
+
def check_sanity_everybuild(status, d):
import os, stat
# Sanity tests which test the users environment so need to run at each build (or are so cheap
@@ -784,7 +790,7 @@ def check_sanity_everybuild(status, d):
if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
- sanity_check_conffiles(status, d)
+ sanity_check_locale(d)
paths = d.getVar('PATH', True).split(":")
if "." in paths or "./" in paths or "" in paths:
@@ -829,8 +835,8 @@ def check_sanity_everybuild(status, d):
check_supported_distro(d)
- omask = os.umask(022)
- if omask & 0755:
+ omask = os.umask(0o022)
+ if omask & 0o755:
status.addresult("Please use a umask which allows a+rx and u+rwx\n")
os.umask(omask)
@@ -891,13 +897,13 @@ def check_sanity_everybuild(status, d):
continue
if mirror.startswith('file://'):
- import urlparse
- check_symlink(urlparse.urlparse(mirror).path, d)
+ import urllib
+ check_symlink(urllib.parse.urlparse(mirror).path, d)
# SSTATE_MIRROR ends with a /PATH string
if mirror.endswith('/PATH'):
# remove /PATH$ from SSTATE_MIRROR to get a working
# base directory path
- mirror_base = urlparse.urlparse(mirror[:-1*len('/PATH')]).path
+ mirror_base = urllib.parse.urlparse(mirror[:-1*len('/PATH')]).path
check_symlink(mirror_base, d)
# Check that TMPDIR hasn't changed location since the last time we were run
@@ -920,17 +926,17 @@ def check_sanity_everybuild(status, d):
with open(checkfile, "w") as f:
f.write(tmpdir)
- # Check /bin/sh links to dash or bash
- real_sh = os.path.realpath('/bin/sh')
- if not real_sh.endswith('/dash') and not real_sh.endswith('/bash'):
- status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
+ # If /bin/sh is a symlink, check that it points to dash or bash
+ if os.path.islink('/bin/sh'):
+ real_sh = os.path.realpath('/bin/sh')
+ if not real_sh.endswith('/dash') and not real_sh.endswith('/bash'):
+ status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
def check_sanity(sanity_data):
class SanityStatus(object):
def __init__(self):
self.messages = ""
self.network_error = False
- self.reparse = False
def addresult(self, message):
if message:
@@ -986,7 +992,6 @@ def check_sanity(sanity_data):
if status.messages != "":
raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
- return status.reparse
# Create a copy of the datastore and finalise it to ensure appends and
# overrides are set - the datastore has yet to be finalised at ConfigParsed
@@ -995,15 +1000,20 @@ def copy_data(e):
sanity_data.finalize()
return sanity_data
+addhandler config_reparse_eventhandler
+config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
+python config_reparse_eventhandler() {
+ sanity_check_conffiles(e.data)
+}
+
addhandler check_sanity_eventhandler
check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
python check_sanity_eventhandler() {
if bb.event.getName(e) == "SanityCheck":
sanity_data = copy_data(e)
+ check_sanity(sanity_data)
if e.generateevents:
sanity_data.setVar("SANITY_USE_EVENTS", "1")
- reparse = check_sanity(sanity_data)
- e.data.setVar("BB_INVALIDCONF", reparse)
bb.event.fire(bb.event.SanityCheckPassed(), e.data)
elif bb.event.getName(e) == "NetworkTest":
sanity_data = copy_data(e)
diff --git a/import-layers/yocto-poky/meta/classes/scons.bbclass b/import-layers/yocto-poky/meta/classes/scons.bbclass
index 1579b05c6..b9ae19d58 100644
--- a/import-layers/yocto-poky/meta/classes/scons.bbclass
+++ b/import-layers/yocto-poky/meta/classes/scons.bbclass
@@ -10,7 +10,7 @@ scons_do_compile() {
}
scons_do_install() {
- ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \
+ ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
die "scons install execution failed."
}
diff --git a/import-layers/yocto-poky/meta/classes/sdl.bbclass b/import-layers/yocto-poky/meta/classes/sdl.bbclass
deleted file mode 100644
index cc31288f6..000000000
--- a/import-layers/yocto-poky/meta/classes/sdl.bbclass
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
-#
-
-DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
-SECTION = "x11/games"
diff --git a/import-layers/yocto-poky/meta/classes/sip.bbclass b/import-layers/yocto-poky/meta/classes/sip.bbclass
deleted file mode 100644
index 6ed2a13bd..000000000
--- a/import-layers/yocto-poky/meta/classes/sip.bbclass
+++ /dev/null
@@ -1,61 +0,0 @@
-# Build Class for Sip based Python Bindings
-# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
-#
-STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip"
-
-DEPENDS =+ "sip-native"
-RDEPENDS_${PN} += "python-sip"
-
-# default stuff, do not uncomment
-# EXTRA_SIPTAGS = "-tWS_X11 -tQt_4_3_0"
-
-# do_generate is before do_configure so ensure that sip_native is populated in sysroot before executing it
-do_generate[depends] += "sip-native:do_populate_sysroot"
-
-sip_do_generate() {
- if [ -z "${SIP_MODULES}" ]; then
- MODULES="`ls sip/*mod.sip`"
- else
- MODULES="${SIP_MODULES}"
- fi
-
- if [ -z "$MODULES" ]; then
- die "SIP_MODULES not set and no modules found in $PWD"
- else
- bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
- fi
-
- if [ -z "${EXTRA_SIPTAGS}" ]; then
- die "EXTRA_SIPTAGS needs to be set!"
- else
- SIPTAGS="${EXTRA_SIPTAGS}"
- fi
-
- if [ ! -z "${SIP_FEATURES}" ]; then
- FEATURES="-z ${SIP_FEATURES}"
- bbnote "sip feature file: ${SIP_FEATURES}"
- fi
-
- for module in $MODULES
- do
- install -d ${module}/
- echo "calling 'sip4 -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'"
- sip4 -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf \
- sip/${module}/${module}mod.sip || die "Error calling sip on ${module}"
- sed -e 's,target,TARGET,' -e 's,sources,SOURCES,' -e 's,headers,HEADERS,' \
- ${module}/${module}.sbf | sed s,"moc_HEADERS =","HEADERS +=", \
- >${module}/${module}.pro
- echo "TEMPLATE=lib" >>${module}/${module}.pro
- [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro
- [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro
- [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro
- [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro
- [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro
- [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro
- true
- done
-}
-
-EXPORT_FUNCTIONS do_generate
-
-addtask generate after do_unpack do_patch before do_configure
diff --git a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
index 50141a353..6eca004c5 100644
--- a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
@@ -36,7 +36,11 @@ def siteinfo_data(d):
"mips": "endian-big bit-32 mips-common",
"mips64": "endian-big bit-64 mips-common",
"mips64el": "endian-little bit-64 mips-common",
+ "mipsisa64r6": "endian-big bit-64 mips-common",
+ "mipsisa64r6el": "endian-little bit-64 mips-common",
"mipsel": "endian-little bit-32 mips-common",
+ "mipsisa32r6": "endian-big bit-32 mips-common",
+ "mipsisa32r6el": "endian-little bit-32 mips-common",
"powerpc": "endian-big bit-32 powerpc-common",
"nios2": "endian-little bit-32 nios2-common",
"powerpc64": "endian-big bit-64 powerpc-common",
@@ -107,6 +111,14 @@ def siteinfo_data(d):
"x86_64-mingw32": "bit-64",
}
+ # Add in any extra user supplied data which may come from a BSP layer, removing the
+ # need to always change this class directly
+ extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS", True) or "").split()
+ for m in extra_siteinfo:
+ call = m + "(archinfo, osinfo, targetinfo, d)"
+ locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
+ archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
+
hostarch = d.getVar("HOST_ARCH", True)
hostos = d.getVar("HOST_OS", True)
target = "%s-%s" % (hostarch, hostos)
@@ -145,7 +157,7 @@ python () {
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
-def siteinfo_get_files(d, no_cache = False):
+def siteinfo_get_files(d, aclocalcache = False):
sitedata = siteinfo_data(d)
sitefiles = ""
for path in d.getVar("BBPATH", True).split(":"):
@@ -154,11 +166,17 @@ def siteinfo_get_files(d, no_cache = False):
if os.path.exists(filename):
sitefiles += filename + " "
- if no_cache: return sitefiles
+ if not aclocalcache:
+ return sitefiles
- # Now check for siteconfig cache files
- # Use the files copied to the aclocal cache generated by autotools.bbclass
- # to avoid races
+ # Now check for siteconfig cache files in the directory setup by autotools.bbclass to
+ # avoid races.
+ #
+ # ACLOCALDIR may or may not exist so cache should only be set to True from autotools.bbclass
+ # after files have been copied into this location. To do otherwise risks parsing/signature
+ # issues and the directory being created/removed whilst this code executes. This can happen
+ # when a multilib recipe is parsed along with its base variant which may be running at the time
+ # causing rare but nasty failures
path_siteconfig = d.getVar('ACLOCALDIR', True)
if path_siteconfig and os.path.isdir(path_siteconfig):
for i in os.listdir(path_siteconfig):
@@ -166,7 +184,6 @@ def siteinfo_get_files(d, no_cache = False):
continue
filename = os.path.join(path_siteconfig, i)
sitefiles += filename + " "
-
return sitefiles
#
diff --git a/import-layers/yocto-poky/meta/classes/spdx.bbclass b/import-layers/yocto-poky/meta/classes/spdx.bbclass
index 0c9276584..89394d3a9 100644
--- a/import-layers/yocto-poky/meta/classes/spdx.bbclass
+++ b/import-layers/yocto-poky/meta/classes/spdx.bbclass
@@ -219,14 +219,13 @@ def hash_string(data):
def run_fossology(foss_command, full_spdx):
import string, re
import subprocess
-
- p = subprocess.Popen(foss_command.split(),
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- foss_output, foss_error = p.communicate()
- if p.returncode != 0:
+
+ try:
+ foss_output = subprocess.check_output(foss_command.split(),
+ stderr=subprocess.STDOUT).decode('utf-8')
+ except subprocess.CalledProcessError as e:
return None
- foss_output = unicode(foss_output, "utf-8")
foss_output = string.replace(foss_output, '\r', '')
# Package info
diff --git a/import-layers/yocto-poky/meta/classes/sstate.bbclass b/import-layers/yocto-poky/meta/classes/sstate.bbclass
index 8c623271a..172384b37 100644
--- a/import-layers/yocto-poky/meta/classes/sstate.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sstate.bbclass
@@ -17,6 +17,9 @@ SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
+# explicitly make PV to depend on evaluated value of PV variable
+PV[vardepvalue] = "${PV}"
+
# We don't want the sstate to depend on things like the distro string
# of the system, we let the sstate paths take care of this.
SSTATE_EXTRAPATH[vardepvalue] = ""
@@ -27,6 +30,8 @@ SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR
SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
# Archive the sources for many architectures in one deploy folder
SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+# Ignore overlapping README
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR}/sdk/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt"
SSTATE_SCAN_FILES ?= "*.la *-config *_config"
SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
@@ -95,7 +100,7 @@ python () {
scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
d.setVar('SSTATE_SCAN_CMD', scan_cmd)
- unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
+ unique_tasks = sorted(set((d.getVar('SSTATETASKS', True) or "").split()))
d.setVar('SSTATETASKS', " ".join(unique_tasks))
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
@@ -170,6 +175,8 @@ def sstate_install(ss, d):
if os.access(manifest, os.R_OK):
bb.fatal("Package already staged (%s)?!" % manifest)
+ d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
+
locks = []
for lock in ss['lockfiles-shared']:
locks.append(bb.utils.lockfile(lock, True))
@@ -200,6 +207,7 @@ def sstate_install(ss, d):
f = os.path.normpath(f)
realmatch = True
for w in whitelist:
+ w = os.path.normpath(w)
if f.startswith(w):
realmatch = False
break
@@ -402,6 +410,13 @@ def sstate_clean_manifest(manifest, d):
except OSError:
pass
+ postrm = manifest + ".postrm"
+ if os.path.exists(manifest + ".postrm"):
+ import subprocess
+ os.chmod(postrm, 0o755)
+ subprocess.call(postrm, shell=True)
+ oe.path.remove(postrm)
+
oe.path.remove(manifest)
def sstate_clean(ss, d):
@@ -563,6 +578,8 @@ def sstate_package(ss, d):
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
+ if d.getVar('SSTATE_SKIP_CREATION', True) == '1':
+ continue
srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
for walkroot, dirs, files in os.walk(state[1]):
for file in files:
@@ -623,10 +640,10 @@ def pstaging_fetch(sstatefetch, sstatepkg, d):
# Try a fetch from the sstate mirror, if it fails just return and
# we will build the package
- uris = ['file://{0}'.format(sstatefetch),
- 'file://{0}.siginfo'.format(sstatefetch)]
+ uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
+ 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
- uris += ['file://{0}.sig'.format(sstatefetch)]
+ uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
for srcuri in uris:
localdata.setVar('SRC_URI', srcuri)
@@ -634,12 +651,6 @@ def pstaging_fetch(sstatefetch, sstatepkg, d):
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
fetcher.download()
- # Need to optimise this, if using file:// urls, the fetcher just changes the local path
- # For now work around by symlinking
- localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
- if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
- os.symlink(localpath, sstatepkg)
-
except bb.fetch2.BBFetchException:
break
@@ -647,7 +658,7 @@ def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
accelerate = sstate_installpkg(shared_state, d)
if not accelerate:
- raise bb.build.FuncFailed("No suitable staging package found")
+ bb.fatal("No suitable staging package found")
python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
@@ -661,9 +672,9 @@ python sstate_task_postfunc () {
sstate_install(shared_state, d)
for intercept in shared_state['interceptfuncs']:
bb.build.exec_func(intercept, d, (d.getVar("WORKDIR", True),))
- omask = os.umask(002)
- if omask != 002:
- bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
+ omask = os.umask(0o002)
+ if omask != 0o002:
+ bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
sstate_package(shared_state, d)
os.umask(omask)
}
@@ -725,6 +736,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
ret = []
missed = []
+ missing = []
extension = ".tgz"
if siginfo:
extension = extension + ".siginfo"
@@ -746,6 +758,18 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
return spec, extrapath, tname
+ def sstate_pkg_to_pn(pkg, d):
+ """
+ Translate an sstate filename to a PN value by way of SSTATE_PKGSPEC. This is slightly hacky but
+ we don't have access to everything in this context.
+ """
+ pkgspec = d.getVar('SSTATE_PKGSPEC', False)
+ try:
+ idx = pkgspec.split(':').index('${PN}')
+ except ValueError:
+ bb.fatal('Unable to find ${PN} in SSTATE_PKGSPEC')
+ return pkg.split(':')[idx]
+
for task in range(len(sq_fn)):
@@ -780,6 +804,8 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
localdata.delVar('BB_NO_NETWORK')
+ whitelist = bb.runqueue.get_setscene_enforce_whitelist(d)
+
from bb.fetch2 import FetchConnectionCache
def checkstatus_init(thread_worker):
thread_worker.connection_cache = FetchConnectionCache()
@@ -806,7 +832,14 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
except:
missed.append(task)
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
+ if whitelist:
+ pn = sstate_pkg_to_pn(sstatefile, d)
+ taskname = sq_task[task]
+ if not bb.runqueue.check_setscene_enforce_whitelist(pn, taskname, whitelist):
+ missing.append(task)
+ bb.error('Sstate artifact unavailable for %s.%s' % (pn, taskname))
+ pass
+ bb.event.fire(bb.event.ProcessProgress("Checking sstate mirror object availability", len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
for task in range(len(sq_fn)):
@@ -817,16 +850,23 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
tasklist.append((task, sstatefile))
if tasklist:
- bb.note("Checking sstate mirror object availability (for %s objects)" % len(tasklist))
+ bb.event.fire(bb.event.ProcessStarted("Checking sstate mirror object availability", len(tasklist)), d)
+
import multiprocessing
nproc = min(multiprocessing.cpu_count(), len(tasklist))
+ bb.event.enable_threadlock()
pool = oe.utils.ThreadedPool(nproc, len(tasklist),
worker_init=checkstatus_init, worker_end=checkstatus_end)
for t in tasklist:
pool.add_task(checkstatus, t)
pool.start()
pool.wait_completion()
+ bb.event.disable_threadlock()
+
+ bb.event.fire(bb.event.ProcessFinished("Checking sstate mirror object availability"), d)
+ if whitelist and missing:
+ bb.fatal('Required artifacts were unavailable - exiting')
inheritlist = d.getVar("INHERIT", True)
if "toaster" in inheritlist:
@@ -905,6 +945,9 @@ def setscene_depvalid(task, taskdependees, notneeded, d):
# Nothing need depend on libc-initial/gcc-cross-initial
if "-initial" in taskdependees[task][0]:
continue
+ # For meta-extsdk-toolchain we want all sysroot dependencies
+ if taskdependees[dep][0] == 'meta-extsdk-toolchain':
+ return False
# Native/Cross populate_sysroot need their dependencies
if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
return False
@@ -982,6 +1025,8 @@ python sstate_eventhandler2() {
for r in toremove:
(stamp, manifest, workdir) = r.split()
for m in glob.glob(manifest + ".*"):
+ if m.endswith(".postrm"):
+ continue
sstate_clean_manifest(m, d)
bb.utils.remove(stamp + "*")
if removeworkdir:
diff --git a/import-layers/yocto-poky/meta/classes/staging.bbclass b/import-layers/yocto-poky/meta/classes/staging.bbclass
index bc5dfa81a..a0b09a00b 100644
--- a/import-layers/yocto-poky/meta/classes/staging.bbclass
+++ b/import-layers/yocto-poky/meta/classes/staging.bbclass
@@ -1,3 +1,37 @@
+# These directories will be staged in the sysroot
+SYSROOT_DIRS = " \
+ ${includedir} \
+ ${libdir} \
+ ${base_libdir} \
+ ${nonarch_base_libdir} \
+ ${datadir} \
+"
+
+# These directories are also staged in the sysroot when they contain files that
+# are usable on the build system
+SYSROOT_DIRS_NATIVE = " \
+ ${bindir} \
+ ${sbindir} \
+ ${base_bindir} \
+ ${base_sbindir} \
+ ${libexecdir} \
+ ${sysconfdir} \
+ ${localstatedir} \
+"
+SYSROOT_DIRS_append_class-native = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS_append_class-cross = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS_append_class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
+
+# These directories will not be staged in the sysroot
+SYSROOT_DIRS_BLACKLIST = " \
+ ${mandir} \
+ ${docdir} \
+ ${infodir} \
+ ${datadir}/locale \
+ ${datadir}/applications \
+ ${datadir}/fonts \
+ ${datadir}/pixmaps \
+"
sysroot_stage_dir() {
src="$1"
@@ -14,43 +48,18 @@ sysroot_stage_dir() {
)
}
-sysroot_stage_libdir() {
- src="$1"
- dest="$2"
-
- sysroot_stage_dir $src $dest
-}
-
sysroot_stage_dirs() {
from="$1"
to="$2"
- sysroot_stage_dir $from${includedir} $to${includedir}
- if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
- sysroot_stage_dir $from${bindir} $to${bindir}
- sysroot_stage_dir $from${sbindir} $to${sbindir}
- sysroot_stage_dir $from${base_bindir} $to${base_bindir}
- sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
- sysroot_stage_dir $from${libexecdir} $to${libexecdir}
- sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
- sysroot_stage_dir $from${localstatedir} $to${localstatedir}
- fi
- if [ -d $from${libdir} ]
- then
- sysroot_stage_libdir $from${libdir} $to${libdir}
- fi
- if [ -d $from${base_libdir} ]
- then
- sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
- fi
- if [ -d $from${nonarch_base_libdir} ]
- then
- sysroot_stage_libdir $from${nonarch_base_libdir} $to${nonarch_base_libdir}
- fi
- sysroot_stage_dir $from${datadir} $to${datadir}
- # We don't care about docs/info/manpages/locales
- rm -rf $to${mandir}/ $to${docdir}/ $to${infodir}/ ${to}${datadir}/locale/
- rm -rf $to${datadir}/applications/ $to${datadir}/fonts/ $to${datadir}/pixmaps/
+ for dir in ${SYSROOT_DIRS}; do
+ sysroot_stage_dir "$from$dir" "$to$dir"
+ done
+
+ # Remove directories we do not care about
+ for dir in ${SYSROOT_DIRS_BLACKLIST}; do
+ rm -rf "$to$dir"
+ done
}
sysroot_stage_all() {
@@ -172,13 +181,26 @@ python sysroot_cleansstate () {
do_configure[prefuncs] += "sysroot_cleansstate"
+BB_SETSCENE_VERIFY_FUNCTION2 = "sysroot_checkhashes2"
+
+def sysroot_checkhashes2(covered, tasknames, fns, d, invalidtasks):
+ problems = set()
+ configurefns = set()
+ for tid in invalidtasks:
+ if tasknames[tid] == "do_configure" and tid not in covered:
+ configurefns.add(fns[tid])
+ for tid in covered:
+ if tasknames[tid] == "do_populate_sysroot" and fns[tid] in configurefns:
+ problems.add(tid)
+ return problems
+
BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
problems = set()
configurefnids = set()
if not invalidtasks:
- invalidtasks = xrange(len(tasknames))
+ invalidtasks = range(len(tasknames))
for task in invalidtasks:
if tasknames[task] == "do_configure" and task not in covered:
configurefnids.add(fnids[task])
diff --git a/import-layers/yocto-poky/meta/classes/syslinux.bbclass b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
index 4fcb0c5e7..7778fd708 100644
--- a/import-layers/yocto-poky/meta/classes/syslinux.bbclass
+++ b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
@@ -30,6 +30,7 @@ SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
SYSLINUX_PROMPT ?= "0"
SYSLINUX_TIMEOUT ?= "50"
AUTO_SYSLINUXMENU ?= "1"
+SYSLINUX_ALLOWOPTIONS ?= "1"
SYSLINUX_ROOT ?= "${ROOT}"
SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg"
SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
@@ -71,7 +72,7 @@ syslinux_hddimg_populate() {
}
syslinux_hddimg_install() {
- syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+ syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
}
syslinux_hdddirect_install() {
@@ -99,12 +100,12 @@ python build_syslinux_cfg () {
cfile = d.getVar('SYSLINUX_CFG', True)
if not cfile:
- raise bb.build.FuncFailed('Unable to read SYSLINUX_CFG')
+ bb.fatal('Unable to read SYSLINUX_CFG')
try:
- cfgfile = file(cfile, 'w')
+ cfgfile = open(cfile, 'w')
except OSError:
- raise bb.build.funcFailed('Unable to open %s' % (cfile))
+ bb.fatal('Unable to open %s' % cfile)
cfgfile.write('# Automatically created by OE\n')
@@ -114,7 +115,12 @@ python build_syslinux_cfg () {
for opt in opts.split(';'):
cfgfile.write('%s\n' % opt)
- cfgfile.write('ALLOWOPTIONS 1\n');
+ allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS', True)
+ if allowoptions:
+ cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
+ else:
+ cfgfile.write('ALLOWOPTIONS 1\n')
+
syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
@@ -154,7 +160,7 @@ python build_syslinux_cfg () {
overrides = localdata.getVar('OVERRIDES', True)
if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
+ bb.fatal('OVERRIDES not defined')
localdata.setVar('OVERRIDES', label + ':' + overrides)
bb.data.update_data(localdata)
@@ -166,7 +172,7 @@ python build_syslinux_cfg () {
root= d.getVar('SYSLINUX_ROOT', True)
if not root:
- raise bb.build.FuncFailed('SYSLINUX_ROOT not defined')
+ bb.fatal('SYSLINUX_ROOT not defined')
for btype in btypes:
cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
@@ -190,3 +196,4 @@ python build_syslinux_cfg () {
cfgfile.close()
}
+build_syslinux_cfg[dirs] = "${S}"
diff --git a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
new file mode 100644
index 000000000..05244c7e5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
@@ -0,0 +1,124 @@
+# Copyright (C) 2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
+# The original standalone gummiboot project is dead without any more
+# maintenance. As a start point, we replace all gummitboot occurrences
+# with systemd-boot in gummiboot.bbclass to have a base version of this
+# systemd-boot.bbclass.
+#
+# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
+# (images built by image-live.bbclass or image-vm.bbclass)
+
+do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
+do_bootdirectdisk[depends] += "${MLPREFIX}systemd-boot:do_deploy"
+
+EFIDIR = "/EFI/BOOT"
+
+SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
+SYSTEMD_BOOT_ENTRIES ?= ""
+SYSTEMD_BOOT_TIMEOUT ?= "10"
+
+# Need UUID utility code.
+inherit fs-uuid
+
+efi_populate() {
+ DEST=$1
+
+ EFI_IMAGE="systemd-bootia32.efi"
+ DEST_EFI_IMAGE="bootia32.efi"
+ if [ "${TARGET_ARCH}" = "x86_64" ]; then
+ EFI_IMAGE="systemd-bootx64.efi"
+ DEST_EFI_IMAGE="bootx64.efi"
+ fi
+
+ install -d ${DEST}${EFIDIR}
+ # systemd-boot requires these paths for configuration files
+ # they are not customizable so no point in new vars
+ install -d ${DEST}/loader
+ install -d ${DEST}/loader/entries
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
+ install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
+ for i in ${SYSTEMD_BOOT_ENTRIES}; do
+ install -m 0644 ${i} ${DEST}/loader/entries
+ done
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/vmlinuz ${EFIIMGDIR}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
+
+python build_efi_cfg() {
+ s = d.getVar("S", True)
+ labels = d.getVar('LABELS', True)
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('SYSTEMD_BOOT_CFG', True)
+ try:
+ cfgfile = open(cfile, 'w')
+ except OSError:
+ bb.fatal('Unable to open %s' % cfile)
+
+ cfgfile.write('# Automatically created by OE\n')
+ cfgfile.write('default %s\n' % (labels.split()[0]))
+ timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT', True)
+ if timeout:
+ cfgfile.write('timeout %s\n' % timeout)
+ else:
+ cfgfile.write('timeout 10\n')
+ cfgfile.close()
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ overrides = localdata.getVar('OVERRIDES', True)
+ if not overrides:
+ bb.fatal('OVERRIDES not defined')
+
+ entryfile = "%s/%s.conf" % (s, label)
+ d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
+ try:
+ entrycfg = open(entryfile, "w")
+ except OSError:
+ bb.fatal('Unable to open %s' % entryfile)
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
+ bb.data.update_data(localdata)
+
+ entrycfg.write('title %s\n' % label)
+ entrycfg.write('linux /vmlinuz\n')
+
+ append = localdata.getVar('APPEND', True)
+ initrd = localdata.getVar('INITRD', True)
+
+ if initrd:
+ entrycfg.write('initrd /initrd\n')
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ entrycfg.write('options LABEL=%s ' % lb)
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ entrycfg.write('%s' % append)
+ entrycfg.write('\n')
+ entrycfg.close()
+}
diff --git a/import-layers/yocto-poky/meta/classes/systemd.bbclass b/import-layers/yocto-poky/meta/classes/systemd.bbclass
index db7873fbe..d56c760a1 100644
--- a/import-layers/yocto-poky/meta/classes/systemd.bbclass
+++ b/import-layers/yocto-poky/meta/classes/systemd.bbclass
@@ -165,8 +165,7 @@ python systemd_populate_packages() {
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \
- (pkg_systemd, service))
+ bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
# Run all modifications once when creating package
if os.path.exists(d.getVar("D", True)):
diff --git a/import-layers/yocto-poky/meta/classes/terminal.bbclass b/import-layers/yocto-poky/meta/classes/terminal.bbclass
index 9f4c24e90..a94f755a4 100644
--- a/import-layers/yocto-poky/meta/classes/terminal.bbclass
+++ b/import-layers/yocto-poky/meta/classes/terminal.bbclass
@@ -29,7 +29,7 @@ def emit_terminal_func(command, envdata, d):
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
script.write("\n")
- os.chmod(runfile, 0755)
+ os.chmod(runfile, 0o755)
return runfile
diff --git a/import-layers/yocto-poky/meta/classes/testexport.bbclass b/import-layers/yocto-poky/meta/classes/testexport.bbclass
new file mode 100644
index 000000000..514702082
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/testexport.bbclass
@@ -0,0 +1,206 @@
+# Copyright (C) 2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+#
+#
+# testexport.bbclass allows to execute runtime test outside OE environment.
+# Most of the tests are commands run on target image over ssh.
+# To use it add testexport to global inherit and call your target image with -c testexport
+# You can try it out like this:
+# - First build an image. i.e. core-image-sato
+# - Add INHERIT += "testexport" in local.conf
+# - Then bitbake core-image-sato -c testexport. That will generate the directory structure
+# to execute the runtime tests using runexported.py.
+#
+# For more information on TEST_SUITES check testimage class.
+
+TEST_LOG_DIR ?= "${WORKDIR}/testexport"
+TEST_EXPORT_DIR ?= "${TMPDIR}/testexport/${PN}"
+TEST_EXPORT_PACKAGED_DIR ?= "packages/packaged"
+TEST_EXPORT_EXTRACTED_DIR ?= "packages/extracted"
+
+TEST_TARGET ?= "simpleremote"
+TEST_TARGET_IP ?= ""
+TEST_SERVER_IP ?= ""
+
+TEST_EXPORT_SDK_PACKAGES ?= ""
+TEST_EXPORT_SDK_ENABLED ?= "0"
+TEST_EXPORT_SDK_NAME ?= "testexport-tools-nativesdk"
+TEST_EXPORT_SDK_DIR ?= "sdk"
+
+TEST_EXPORT_DEPENDS = ""
+TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
+TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
+TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
+
+python do_testexport() {
+ testexport_main(d)
+}
+
+addtask testexport
+do_testexport[nostamp] = "1"
+do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}"
+do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}"
+
+def exportTests(d,tc):
+ import json
+ import shutil
+ import pkgutil
+ import re
+ import oe.path
+
+ exportpath = d.getVar("TEST_EXPORT_DIR", True)
+
+ savedata = {}
+ savedata["d"] = {}
+ savedata["target"] = {}
+ savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
+ savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
+
+ keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
+ and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func", True)]
+ for key in keys:
+ try:
+ savedata["d"][key] = d.getVar(key, True)
+ except bb.data_smart.ExpansionError:
+ # we don't care about those anyway
+ pass
+
+ json_file = os.path.join(exportpath, "testdata.json")
+ with open(json_file, "w") as f:
+ json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
+
+ # Replace absolute path with relative in the file
+ exclude_path = os.path.join(d.getVar("COREBASE", True),'meta','lib','oeqa')
+ f1 = open(json_file,'r').read()
+ f2 = open(json_file,'w')
+ m = f1.replace(exclude_path,'oeqa')
+ f2.write(m)
+ f2.close()
+
+ # now start copying files
+ # we'll basically copy everything under meta/lib/oeqa, with these exceptions
+ # - oeqa/targetcontrol.py - not needed
+ # - oeqa/selftest - something else
+ # That means:
+ # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
+ # - the contents of oeqa/utils and oeqa/runtime/files
+ # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
+ # - __init__.py files
+ bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
+ bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
+ # copy test modules, this should cover tests in other layers too
+ bbpath = d.getVar("BBPATH", True).split(':')
+ for t in tc.testslist:
+ isfolder = False
+ if re.search("\w+\.\w+\.test_\S+", t):
+ t = '.'.join(t.split('.')[:3])
+ mod = pkgutil.get_loader(t)
+ # More depth than usual?
+ if (t.count('.') > 2):
+ for p in bbpath:
+ foldername = os.path.join(p, 'lib', os.sep.join(t.split('.')).rsplit(os.sep, 1)[0])
+ if os.path.isdir(foldername):
+ isfolder = True
+ target_folder = os.path.join(exportpath, "oeqa", "runtime", os.path.basename(foldername))
+ if not os.path.exists(target_folder):
+ oe.path.copytree(foldername, target_folder)
+ if not isfolder:
+ shutil.copy2(mod.path, os.path.join(exportpath, "oeqa/runtime"))
+ json_file = "%s.json" % mod.path.rsplit(".", 1)[0]
+ if os.path.isfile(json_file):
+ shutil.copy2(json_file, os.path.join(exportpath, "oeqa/runtime"))
+ # Get meta layer
+ for layer in d.getVar("BBLAYERS", True).split():
+ if os.path.basename(layer) == "meta":
+ meta_layer = layer
+ break
+ # copy oeqa/oetest.py and oeqa/runexported.py
+ oeqadir = os.path.join(meta_layer, "lib/oeqa")
+ shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
+ shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
+ # copy oeqa/utils/*.py
+ for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
+ for f in files:
+ if f.endswith(".py"):
+ shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
+ # copy oeqa/runtime/files/*
+ for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
+ for f in files:
+ shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
+
+ # Create tar file for common parts of testexport
+ create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR", True))
+
+ # Copy packages needed for runtime testing
+ test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR", True)
+ if os.listdir(test_pkg_dir):
+ export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True), "packages")
+ oe.path.copytree(test_pkg_dir, export_pkg_dir)
+ # Create tar file for packages needed by the DUT
+ create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE", True), export_pkg_dir)
+
+ # Copy SDK
+ if d.getVar("TEST_EXPORT_SDK_ENABLED", True) == "1":
+ sdk_deploy = d.getVar("SDK_DEPLOY", True)
+ tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME", True)
+ tarball_path = os.path.join(sdk_deploy, tarball_name)
+ export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR", True),
+ d.getVar("TEST_EXPORT_SDK_DIR", True))
+ bb.utils.mkdirhier(export_sdk_dir)
+ shutil.copy2(tarball_path, export_sdk_dir)
+
+ # Create tar file for the sdk
+ create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH", True), export_sdk_dir)
+
+ bb.plain("Exported tests to: %s" % exportpath)
+
+def testexport_main(d):
+ from oeqa.oetest import ExportTestContext
+ from oeqa.targetcontrol import get_target_controller
+ from oeqa.utils.dump import get_host_dumper
+
+ test_create_extract_dirs(d)
+ export_dir = d.getVar("TEST_EXPORT_DIR", True)
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+ bb.utils.remove(export_dir, recurse=True)
+ bb.utils.mkdirhier(export_dir)
+
+ # the robot dance
+ target = get_target_controller(d)
+
+ # test context
+ tc = ExportTestContext(d, target)
+
+ # this is a dummy load of tests
+ # we are doing that to find compile errors in the tests themselves
+ # before booting the image
+ try:
+ tc.loadTests()
+ except Exception as e:
+ import traceback
+ bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+ tc.extract_packages()
+ exportTests(d,tc)
+
+def create_tarball(d, tar_name, src_dir):
+
+ import tarfile
+
+ tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR", True), tar_name)
+ current_dir = os.getcwd()
+ src_dir = src_dir.rstrip('/')
+ dir_name = os.path.dirname(src_dir)
+ base_name = os.path.basename(src_dir)
+
+ os.chdir(dir_name)
+ tar = tarfile.open(tar_path, "w:gz")
+ tar.add(base_name)
+ tar.close()
+ os.chdir(current_dir)
+
+
+testexport_main[vardepsexclude] =+ "BB_ORIGENV"
+
+inherit testimage
diff --git a/import-layers/yocto-poky/meta/classes/testimage.bbclass b/import-layers/yocto-poky/meta/classes/testimage.bbclass
index e77bb1192..6b6781d86 100644
--- a/import-layers/yocto-poky/meta/classes/testimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testimage.bbclass
@@ -8,7 +8,7 @@
# To use it add testimage to global inherit and call your target image with -c testimage
# You can try it out like this:
# - first build a qemu core-image-sato
-# - add INHERIT += "testimage" in local.conf
+# - add IMAGE_CLASSES += "testimage" in local.conf
# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
# You can set (or append to) TEST_SUITES in local.conf to select the tests
@@ -30,7 +30,10 @@
TEST_LOG_DIR ?= "${WORKDIR}/testimage"
TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
-TEST_EXPORT_ONLY ?= "0"
+TEST_INSTALL_TMP_DIR ?= "${WORKDIR}/testimage/install_tmp"
+TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
+TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
+TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'smart rpm', '', d)}"
MINTESTSUITE = "ping"
@@ -48,25 +51,33 @@ DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs
DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} connman xorg perl python \
${DEVTESTSUITE} parselogs ${RPMTESTSUITE}"
DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcvs buildiptables buildsudoku \
+DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcvs buildiptables buildgalculator \
connman ${DEVTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
# aarch64 has no graphics
DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
-#qemumips is too slow for buildsudoku
-DEFAULT_TEST_SUITES_remove_qemumips = "buildsudoku"
+# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
+# mitigate this by removing build tests for qemumips machines.
+MIPSREMOVE ??= "buildcvs buildiptables buildgalculator"
+DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
TEST_TARGET ?= "qemu"
-TEST_TARGET_IP ?= ""
-TEST_SERVER_IP ?= ""
TESTIMAGEDEPENDS = ""
TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python-smartpm-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot', '', d)}"
+
TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
TESTIMAGELOCK_qemuall = ""
@@ -103,102 +114,12 @@ testimage_dump_host () {
python do_testimage() {
testimage_main(d)
}
+
addtask testimage
do_testimage[nostamp] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
-def exportTests(d,tc):
- import json
- import shutil
- import pkgutil
- import re
-
- exportpath = d.getVar("TEST_EXPORT_DIR", True)
-
- savedata = {}
- savedata["d"] = {}
- savedata["target"] = {}
- savedata["host_dumper"] = {}
- for key in tc.__dict__:
- # special cases
- if key not in ['d', 'target', 'host_dumper', 'suite']:
- savedata[key] = getattr(tc, key)
- savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
- savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
-
- keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
- and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func", True)]
- for key in keys:
- try:
- savedata["d"][key] = d.getVar(key, True)
- except bb.data_smart.ExpansionError:
- # we don't care about those anyway
- pass
-
- savedata["host_dumper"]["parent_dir"] = tc.host_dumper.parent_dir
- savedata["host_dumper"]["cmds"] = tc.host_dumper.cmds
-
- json_file = os.path.join(exportpath, "testdata.json")
- with open(json_file, "w") as f:
- json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
-
- # Replace absolute path with relative in the file
- exclude_path = os.path.join(d.getVar("COREBASE", True),'meta','lib','oeqa')
- f1 = open(json_file,'r').read()
- f2 = open(json_file,'w')
- m = f1.replace(exclude_path,'oeqa')
- f2.write(m)
- f2.close()
-
- # now start copying files
- # we'll basically copy everything under meta/lib/oeqa, with these exceptions
- # - oeqa/targetcontrol.py - not needed
- # - oeqa/selftest - something else
- # That means:
- # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
- # - the contents of oeqa/utils and oeqa/runtime/files
- # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
- # - __init__.py files
- bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
- bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
- # copy test modules, this should cover tests in other layers too
- bbpath = d.getVar("BBPATH", True).split(':')
- for t in tc.testslist:
- isfolder = False
- if re.search("\w+\.\w+\.test_\S+", t):
- t = '.'.join(t.split('.')[:3])
- mod = pkgutil.get_loader(t)
- # More depth than usual?
- if (t.count('.') > 2):
- for p in bbpath:
- foldername = os.path.join(p, 'lib', os.sep.join(t.split('.')).rsplit(os.sep, 1)[0])
- if os.path.isdir(foldername):
- isfolder = True
- target_folder = os.path.join(exportpath, "oeqa", "runtime", os.path.basename(foldername))
- if not os.path.exists(target_folder):
- shutil.copytree(foldername, target_folder)
- if not isfolder:
- shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
- # copy __init__.py files
- oeqadir = pkgutil.get_loader("oeqa").filename
- shutil.copy2(os.path.join(oeqadir, "__init__.py"), os.path.join(exportpath, "oeqa"))
- shutil.copy2(os.path.join(oeqadir, "runtime/__init__.py"), os.path.join(exportpath, "oeqa/runtime"))
- # copy oeqa/oetest.py and oeqa/runexported.py
- shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
- shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
- # copy oeqa/utils/*.py
- for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
- for f in files:
- if f.endswith(".py"):
- shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
- # copy oeqa/runtime/files/*
- for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
- for f in files:
- shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
-
- bb.plain("Exported tests to: %s" % exportpath)
-
def testimage_main(d):
import unittest
import os
@@ -210,11 +131,8 @@ def testimage_main(d):
from oeqa.utils.dump import get_host_dumper
pn = d.getVar("PN", True)
- export = oe.utils.conditional("TEST_EXPORT_ONLY", "1", True, False, d)
bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
- if export:
- bb.utils.remove(d.getVar("TEST_EXPORT_DIR", True), recurse=True)
- bb.utils.mkdirhier(d.getVar("TEST_EXPORT_DIR", True))
+ test_create_extract_dirs(d)
# we need the host dumper in test context
host_dumper = get_host_dumper(d)
@@ -234,29 +152,39 @@ def testimage_main(d):
import traceback
bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
- if export:
+ tc.extract_packages()
+ target.deploy()
+ try:
+ bootparams = None
+ if d.getVar('VIRTUAL-RUNTIME_init_manager', '') == 'systemd':
+ bootparams = 'systemd.log_level=debug systemd.log_target=console'
+ target.start(extra_bootparams=bootparams)
+ starttime = time.time()
+ result = tc.runTests()
+ stoptime = time.time()
+ if result.wasSuccessful():
+ bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
+ msg = "%s - OK - All required tests passed" % pn
+ skipped = len(result.skipped)
+ if skipped:
+ msg += " (skipped=%d)" % skipped
+ bb.plain(msg)
+ else:
+ bb.fatal("%s - FAILED - check the task log and the ssh log" % pn)
+ finally:
signal.signal(signal.SIGTERM, tc.origsigtermhandler)
- tc.origsigtermhandler = None
- exportTests(d,tc)
- else:
- target.deploy()
- try:
- target.start()
- starttime = time.time()
- result = tc.runTests()
- stoptime = time.time()
- if result.wasSuccessful():
- bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
- msg = "%s - OK - All required tests passed" % pn
- skipped = len(result.skipped)
- if skipped:
- msg += " (skipped=%d)" % skipped
- bb.plain(msg)
- else:
- raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
- finally:
- signal.signal(signal.SIGTERM, tc.origsigtermhandler)
- target.stop()
+ target.stop()
+
+def test_create_extract_dirs(d):
+ install_path = d.getVar("TEST_INSTALL_TMP_DIR", True)
+ package_path = d.getVar("TEST_PACKAGED_DIR", True)
+ extracted_path = d.getVar("TEST_EXTRACTED_DIR", True)
+ bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+ bb.utils.remove(package_path, recurse=True)
+ bb.utils.mkdirhier(install_path)
+ bb.utils.mkdirhier(package_path)
+ bb.utils.mkdirhier(extracted_path)
+
testimage_main[vardepsexclude] =+ "BB_ORIGENV"
diff --git a/import-layers/yocto-poky/meta/classes/testsdk.bbclass b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
index f4dc2c36d..77c9203cf 100644
--- a/import-layers/yocto-poky/meta/classes/testsdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
@@ -44,7 +44,7 @@ def run_test_context(CTestContext, d, testdir, tcname, pn, *args):
msg += " (skipped=%d)" % skipped
bb.plain(msg)
else:
- raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
+ bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
def testsdk_main(d):
import os
@@ -65,7 +65,7 @@ def testsdk_main(d):
try:
subprocess.check_output("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
except subprocess.CalledProcessError as e:
- bb.fatal("Couldn't install the SDK:\n%s" % e.output)
+ bb.fatal("Couldn't install the SDK:\n%s" % e.output.decode("utf-8"))
try:
run_test_context(SDKTestContext, d, sdktestdir, tcname, pn)
@@ -113,10 +113,18 @@ def testsdkext_main(d):
testdir = d.expand("${WORKDIR}/testsdkext/")
bb.utils.remove(testdir, True)
bb.utils.mkdirhier(testdir)
+ sdkdir = os.path.join(testdir, 'tc')
try:
- subprocess.check_output("%s -y -d %s/tc" % (tcname, testdir), shell=True)
+ subprocess.check_output("%s -y -d %s" % (tcname, sdkdir), shell=True)
except subprocess.CalledProcessError as e:
- bb.fatal("Couldn't install the SDK EXT:\n%s" % e.output)
+ msg = "Couldn't install the extensible SDK:\n%s" % e.output.decode("utf-8")
+ logfn = os.path.join(sdkdir, 'preparing_build_system.log')
+ if os.path.exists(logfn):
+ msg += '\n\nContents of preparing_build_system.log:\n'
+ with open(logfn, 'r') as f:
+ for line in f:
+ msg += line
+ bb.fatal(msg)
try:
bb.plain("Running SDK Compatibility tests ...")
diff --git a/import-layers/yocto-poky/meta/classes/tinderclient.bbclass b/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
index 2bc75fc65..917b74d88 100644
--- a/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
+++ b/import-layers/yocto-poky/meta/classes/tinderclient.bbclass
@@ -10,10 +10,10 @@ def tinder_http_post(server, selector, content_type, body):
h.endheaders()
h.send(body)
errcode, errmsg, headers = h.getreply()
- #print errcode, errmsg, headers
+ #print(errcode, errmsg, headers)
return (errcode,errmsg, headers, h.file)
except:
- print "Error sending the report!"
+ print("Error sending the report!")
# try again
pass
@@ -82,7 +82,7 @@ def tinder_format_http_post(d,status,log):
# we only need on build_status.pl but sending it
# always does not hurt
try:
- f = file(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
+ f = open(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
id = f.read()
variables['machine_id'] = id
except:
@@ -111,11 +111,11 @@ def tinder_build_start(d):
selector = url + "/xml/build_start.pl"
- #print "selector %s and url %s" % (selector, url)
+ #print("selector %s and url %s" % (selector, url))
# now post it
errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print errcode, errmsg, headers
+ #print(errcode, errmsg, headers)
report = h_file.read()
# now let us find the machine id that was assigned to us
@@ -127,7 +127,7 @@ def tinder_build_start(d):
# now we will need to save the machine number
# we will override any previous numbers
- f = file(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
+ f = open(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
f.write(report)
@@ -147,8 +147,8 @@ def tinder_send_http(d, status, _log):
while len(new_log) > 0:
content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print errcode, errmsg, headers
- #print h.file.read()
+ #print(errcode, errmsg, headers)
+ #print(h.file.read())
new_log = new_log[18000:]
@@ -278,7 +278,7 @@ def tinder_do_tinder_report(event):
try:
# truncate the tinder log file
- f = file(event.data.getVar('TINDER_LOG', True), 'w')
+ f = open(event.data.getVar('TINDER_LOG', True), 'w')
f.write("")
f.close()
except:
@@ -287,7 +287,7 @@ def tinder_do_tinder_report(event):
try:
# write a status to the file. This is needed for the -k option
# of BitBake
- g = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ g = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
g.write("")
g.close()
except IOError:
@@ -319,14 +319,14 @@ def tinder_do_tinder_report(event):
log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
status = 200
# remember the failure for the -k case
- h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ h = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
h.write("200")
elif name == "BuildCompleted":
log += "Build Completed\n"
status = 100
# Check if we have a old status...
try:
- h = file(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
+ h = open(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
status = int(h.read())
except:
pass
@@ -342,7 +342,7 @@ def tinder_do_tinder_report(event):
log += "Error:Was Runtime: %d\n" % event.isRuntime()
status = 200
# remember the failure for the -k case
- h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
+ h = open(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
h.write("200")
# now post the log
diff --git a/import-layers/yocto-poky/meta/classes/toaster.bbclass b/import-layers/yocto-poky/meta/classes/toaster.bbclass
index 1a70f14a9..4bddf34e9 100644
--- a/import-layers/yocto-poky/meta/classes/toaster.bbclass
+++ b/import-layers/yocto-poky/meta/classes/toaster.bbclass
@@ -33,6 +33,7 @@ python toaster_layerinfo_dumpdata() {
def _get_git_branch(layer_path):
branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0]
+ branch = branch.decode('utf-8')
branch = branch.replace('refs/heads/', '').rstrip()
return branch
@@ -135,60 +136,16 @@ python toaster_package_dumpdata() {
# 2. Dump output image files information
-python toaster_image_dumpdata() {
- """
- Image filename for output images is not standardized.
- image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually
- have hardcoded ways to create image file names in them.
- So we look for files starting with the set name.
-
- We also look for other files in the images/ directory which don't
- match IMAGE_NAME, such as the kernel bzImage, modules tarball etc.
- """
-
- dir_to_walk = d.getVar('DEPLOY_DIR_IMAGE', True);
- image_name = d.getVar('IMAGE_NAME', True);
- image_info_data = {}
- artifact_info_data = {}
-
- # collect all images and artifacts in the images directory
- for dirpath, dirnames, filenames in os.walk(dir_to_walk):
- for filename in filenames:
- full_path = os.path.join(dirpath, filename)
- try:
- if filename.startswith(image_name):
- # image
- image_info_data[full_path] = os.stat(full_path).st_size
- else:
- # other non-image artifact
- if not os.path.islink(full_path):
- artifact_info_data[full_path] = os.stat(full_path).st_size
- except OSError as e:
- bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
-
- bb.event.fire(bb.event.MetadataEvent("ImageFileSize", image_info_data), d)
- bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize", artifact_info_data), d)
-}
-
python toaster_artifact_dumpdata() {
"""
- Dump data about artifacts in the SDK_DEPLOY directory
+ Dump data about SDK variables
"""
- dir_to_walk = d.getVar("SDK_DEPLOY", True)
- artifact_info_data = {}
-
- # collect all artifacts in the sdk directory
- for dirpath, dirnames, filenames in os.walk(dir_to_walk):
- for filename in filenames:
- full_path = os.path.join(dirpath, filename)
- try:
- if not os.path.islink(full_path):
- artifact_info_data[full_path] = os.stat(full_path).st_size
- except OSError as e:
- bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
+ event_data = {
+ "TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME", True)
+ }
- bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize", artifact_info_data), d)
+ bb.event.fire(bb.event.MetadataEvent("SDKArtifactInfo", event_data), d)
}
# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
@@ -331,15 +288,22 @@ python toaster_buildhistory_dump() {
images[target][dependsname] = {'size': 0, 'depends' : []}
images[target][pname]['depends'].append((dependsname, deptype))
- with open("%s/files-in-image.txt" % installed_img_path, "r") as fin:
- for line in fin:
- lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
- if lc[0].startswith("l"):
- files[target]['syms'].append(lc)
- elif lc[0].startswith("d"):
- files[target]['dirs'].append(lc)
- else:
- files[target]['files'].append(lc)
+ # files-in-image.txt is only generated if an image file is created,
+ # so the file entries ('syms', 'dirs', 'files') for a target will be
+ # empty for rootfs builds and other "image" tasks which don't
+ # produce image files
+ # (e.g. "bitbake core-image-minimal -c populate_sdk")
+ files_in_image_path = "%s/files-in-image.txt" % installed_img_path
+ if os.path.exists(files_in_image_path):
+ with open(files_in_image_path, "r") as fin:
+ for line in fin:
+ lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
+ if lc[0].startswith("l"):
+ files[target]['syms'].append(lc)
+ elif lc[0].startswith("d"):
+ files[target]['dirs'].append(lc)
+ else:
+ files[target]['files'].append(lc)
for pname in images[target]:
if not pname in allpkgs:
@@ -360,15 +324,18 @@ python toaster_buildhistory_dump() {
}
-# dump information related to license manifest path
-
-python toaster_licensemanifest_dump() {
- deploy_dir = d.getVar('DEPLOY_DIR', True);
- image_name = d.getVar('IMAGE_NAME', True);
-
- data = { 'deploy_dir' : deploy_dir, 'image_name' : image_name }
-
- bb.event.fire(bb.event.MetadataEvent("LicenseManifestPath", data), d)
+# get list of artifacts from sstate manifest
+python toaster_artifacts() {
+ if e.taskname in ["do_deploy", "do_image_complete", "do_populate_sdk", "do_populate_sdk_ext"]:
+ d2 = d.createCopy()
+ d2.setVar('FILE', e.taskfile)
+ d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
+ manifest = oe.sstatesig.sstate_get_manifest_filename(e.taskname[3:], d2)[0]
+ if os.access(manifest, os.R_OK):
+ with open(manifest) as fmanifest:
+ artifacts = [fname.strip() for fname in fmanifest]
+ data = {"task": e.taskid, "artifacts": artifacts}
+ bb.event.fire(bb.event.MetadataEvent("TaskArtifacts", data), d2)
}
# set event handlers
@@ -381,17 +348,17 @@ toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSu
addhandler toaster_buildhistory_dump
toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
+addhandler toaster_artifacts
+toaster_artifacts[eventmask] = "bb.runqueue.runQueueTaskSkipped bb.runqueue.runQueueTaskCompleted"
+
do_packagedata_setscene[postfuncs] += "toaster_package_dumpdata "
do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
do_package[postfuncs] += "toaster_package_dumpdata "
do_package[vardepsexclude] += "toaster_package_dumpdata "
-do_image_complete[postfuncs] += "toaster_image_dumpdata "
-do_image_complete[vardepsexclude] += "toaster_image_dumpdata "
-
-do_rootfs[postfuncs] += "toaster_licensemanifest_dump "
-do_rootfs[vardepsexclude] += "toaster_licensemanifest_dump "
-
do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
+
+do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
+do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata " \ No newline at end of file
diff --git a/import-layers/yocto-poky/meta/classes/toolchain-scripts-base.bbclass b/import-layers/yocto-poky/meta/classes/toolchain-scripts-base.bbclass
new file mode 100644
index 000000000..2489b9dbe
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/toolchain-scripts-base.bbclass
@@ -0,0 +1,11 @@
+#This function create a version information file
+toolchain_create_sdk_version () {
+ local versionfile=$1
+ rm -f $versionfile
+ touch $versionfile
+ echo 'Distro: ${DISTRO}' >> $versionfile
+ echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
+ echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
+ echo 'Timestamp: ${DATETIME}' >> $versionfile
+}
+toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
diff --git a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
index 2e2c93af4..0e11f2d7a 100644
--- a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
+++ b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -1,4 +1,4 @@
-inherit siteinfo kernel-arch
+inherit toolchain-scripts-base siteinfo kernel-arch
# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
# doesn't always match our expectations... but we default to the stock value
@@ -6,9 +6,13 @@ REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
TARGET_CC_ARCH_append_libc-uclibc = " -muclibc"
TARGET_CC_ARCH_append_libc-musl = " -mmusl"
+# default debug prefix map isn't valid in the SDK
+DEBUG_PREFIX_MAP = ""
+
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
- # Create environment setup script
+ # Create environment setup script. Remember that $SDKTARGETSYSROOT should
+ # only be expanded on the target at runtime.
base_sbindir=${10:-${base_sbindir_nativesdk}}
base_bindir=${9:-${base_bindir_nativesdk}}
sbindir=${8:-${sbindir_nativesdk}}
@@ -29,7 +33,7 @@ toolchain_create_sdk_env_script () {
echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
echo "export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$CCACHE_PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
- echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
+ echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
@@ -89,19 +93,19 @@ toolchain_shared_env_script () {
# Append environment subscripts
if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
- source \$envfile
+ . \$envfile
done
fi
if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
- source \$envfile
+ . \$envfile
done
fi
EOF
}
#we get the cached site config in the runtime
-TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
+TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
@@ -132,18 +136,6 @@ toolchain_create_sdk_siteconfig () {
# The immediate expansion above can result in unwanted path dependencies here
toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
-#This function create a version information file
-toolchain_create_sdk_version () {
- local versionfile=$1
- rm -f $versionfile
- touch $versionfile
- echo 'Distro: ${DISTRO}' >> $versionfile
- echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
- echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
- echo 'Timestamp: ${DATETIME}' >> $versionfile
-}
-toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
-
python __anonymous () {
import oe.classextend
deps = ""
diff --git a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
index cb061af34..3f760f2fb 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
@@ -3,7 +3,7 @@
# The format to specify it, in the machine, is:
#
# UBOOT_CONFIG ??= <default>
-# UBOOT_CONFIG[foo] = "config,images"
+# UBOOT_CONFIG[foo] = "config,images,binary"
#
# or
#
@@ -11,9 +11,13 @@
#
# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
+UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
+
python () {
ubootmachine = d.getVar("UBOOT_MACHINE", True)
ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
+ ubootbinary = d.getVar('UBOOT_BINARY', True)
+ ubootbinaries = d.getVar('UBOOT_BINARIES', True)
# The "doc" varflag is special, we don't want to see it here
ubootconfigflags.pop('doc', None)
@@ -27,6 +31,9 @@ python () {
if ubootmachine and ubootconfigflags:
raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
+ if ubootconfigflags and ubootbinaries:
+ raise bb.parse.SkipPackage("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
+
if not ubootconfigflags:
return
@@ -36,13 +43,19 @@ python () {
for f, v in ubootconfigflags.items():
if config == f:
items = v.split(',')
- if items[0] and len(items) > 2:
- raise bb.parse.SkipPackage('Only config,images can be specified!')
+ if items[0] and len(items) > 3:
+ raise bb.parse.SkipPackage('Only config,images,binary can be specified!')
d.appendVar('UBOOT_MACHINE', ' ' + items[0])
# IMAGE_FSTYPES appending
if len(items) > 1 and items[1]:
bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
+ if len(items) > 2 and items[2]:
+ bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % items[2])
+ d.appendVar('UBOOT_BINARIES', ' ' + items[2])
+ else:
+ bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
+ d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
break
elif len(ubootconfig) == 0:
raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
diff --git a/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
new file mode 100644
index 000000000..df91386c0
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
@@ -0,0 +1,126 @@
+# uboot-extlinux-config.bbclass
+#
+# This class allow the extlinux.conf generation for U-Boot use. The
+# U-Boot support for it is given to allow the Generic Distribution
+# Configuration specification use by OpenEmbedded-based products.
+#
+# External variables:
+#
+# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
+# default console.
+# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
+# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
+# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
+# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
+# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
+# concatenate and use as an initrd (optional).
+# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
+# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
+#
+# If there's only one label system will boot automatically and menu won't be
+# created. If you want to use more than one labels, e.g linux and alternate,
+# use overrides to set menu description, console and others variables.
+#
+# Ex:
+#
+# UBOOT_EXTLINUX_LABELS ??= "default fallback"
+#
+# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
+#
+# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
+# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
+#
+# Results:
+#
+# menu title Select the boot mode
+# LABEL Linux Default
+# KERNEL ../zImage
+# FDTDIR ../
+# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
+# LABEL Linux Fallback
+# KERNEL ../zImage-fallback
+# FDTDIR ../
+# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
+#
+# Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved
+# Released under the MIT license (see packages/COPYING)
+#
+# The kernel has an internal default console, which you can override with
+# a console=...some_tty...
+UBOOT_EXTLINUX_CONSOLE ??= "console=${console}"
+UBOOT_EXTLINUX_LABELS ??= "linux"
+UBOOT_EXTLINUX_FDTDIR ??= "../"
+UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
+UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
+UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
+
+UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
+
+python create_extlinux_config() {
+ if d.getVar("UBOOT_EXTLINUX", True) != "1":
+ return
+
+ if not d.getVar('WORKDIR', True):
+ bb.error("WORKDIR not defined, unable to package")
+
+ labels = d.getVar('UBOOT_EXTLINUX_LABELS', True)
+ if not labels:
+ bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
+
+ if not labels.strip():
+ bb.fatal("No labels, nothing to do")
+
+ cfile = d.getVar('UBOOT_EXTLINUX_CONFIG', True)
+ if not cfile:
+ bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
+
+ try:
+ with open(cfile, 'w') as cfgfile:
+ cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
+
+ if len(labels.split()) > 1:
+ cfgfile.write('menu title Select the boot mode\n')
+
+ for label in labels.split():
+ localdata = bb.data.createCopy(d)
+
+ overrides = localdata.getVar('OVERRIDES', True)
+ if not overrides:
+ bb.fatal('OVERRIDES not defined')
+
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
+ bb.data.update_data(localdata)
+
+ extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE', True)
+
+ menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION', True)
+ if not menu_description:
+ menu_description = label
+
+ root = localdata.getVar('UBOOT_EXTLINUX_ROOT', True)
+ if not root:
+ bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
+
+ kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE', True)
+ fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR', True)
+ if fdtdir:
+ cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
+ (menu_description, kernel_image, fdtdir))
+ else:
+ cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
+
+ kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS', True)
+
+ initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD', True)
+ if initrd:
+ cfgfile.write('\tINITRD %s\n'% initrd)
+
+ kernel_args = root + " " + kernel_args
+ cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console))
+
+ except OSError:
+ bb.fatal('Unable to open %s' % (cfile))
+}
+
+do_install[prefuncs] += "create_extlinux_config"
diff --git a/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass b/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass
new file mode 100644
index 000000000..3c56db887
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass
@@ -0,0 +1,95 @@
+# This file is part of U-Boot verified boot support and is intended to be
+# inherited from u-boot recipe and from kernel-fitimage.bbclass.
+#
+# The signature procedure requires the user to generate an RSA key and
+# certificate in a directory and to define the following variable:
+#
+# UBOOT_SIGN_KEYDIR = "/keys/directory"
+# UBOOT_SIGN_KEYNAME = "dev" # keys name in keydir (eg. "dev.crt", "dev.key")
+# UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
+# UBOOT_SIGN_ENABLE = "1"
+#
+# As verified boot depends on fitImage generation, following is also required:
+#
+# KERNEL_CLASSES ?= " kernel-fitimage "
+# KERNEL_IMAGETYPE ?= "fitImage"
+#
+# The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot.
+#
+# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
+# treat the device tree blob:
+#
+# u-boot:do_deploy_dtb
+# u-boot:do_deploy
+# virtual/kernel:do_assemble_fitimage
+# u-boot:do_concat_dtb
+# u-boot:do_install
+#
+# For more details on signature process, please refer to U-boot documentation.
+
+# Signature activation.
+UBOOT_SIGN_ENABLE ?= "0"
+
+# Default value for deployment filenames.
+UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
+UBOOT_DTB_BINARY ?= "u-boot.dtb"
+UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
+UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
+UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
+UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
+
+#
+# Following is relevant only for u-boot recipes:
+#
+
+do_deploy_dtb () {
+ mkdir -p ${DEPLOYDIR}
+ cd ${DEPLOYDIR}
+
+ if [ -f ${B}/${UBOOT_DTB_BINARY} ]; then
+ install ${B}/${UBOOT_DTB_BINARY} ${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
+ rm -f ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SYMLINK}
+ ln -sf ${UBOOT_DTB_IMAGE} ${UBOOT_DTB_SYMLINK}
+ ln -sf ${UBOOT_DTB_IMAGE} ${UBOOT_DTB_BINARY}
+ fi
+ if [ -f ${B}/${UBOOT_NODTB_BINARY} ]; then
+ install ${B}/${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
+ rm -f ${UBOOT_NODTB_BINARY} ${UBOOT_NODTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_BINARY}
+ fi
+}
+
+do_concat_dtb () {
+ # Concatenate U-Boot w/o DTB & DTB with public key
+ # (cf. kernel-fitimage.bbclass for more details)
+ if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ]; then
+ if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
+ [ -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then
+ cd ${B}
+ oe_runmake EXT_DTB=${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
+ install ${S}/${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
+ install ${S}/${UBOOT_BINARY} ${DEPLOY_DIR_IMAGE}/${UBOOT_IMAGE}
+ elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then
+ cd ${DEPLOYDIR}
+ cat ${UBOOT_NODTB_IMAGE} ${UBOOT_DTB_IMAGE} | tee ${B}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+ else
+ bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ fi
+ fi
+}
+
+python () {
+ uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot', True) or 'u-boot'
+ if d.getVar('UBOOT_SIGN_ENABLE', True) == '1' and d.getVar('PN', True) == uboot_pn:
+ kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel', True)
+
+ # u-boot.dtb and u-boot-nodtb.bin are deployed _before_ do_deploy
+ # Thus, do_deploy_setscene will also populate them in DEPLOY_IMAGE_DIR
+ bb.build.addtask('do_deploy_dtb', 'do_deploy', 'do_compile', d)
+
+ # do_concat_dtb is scheduled _before_ do_install as it overwrite the
+ # u-boot.bin in both DEPLOYDIR and DEPLOY_IMAGE_DIR.
+ bb.build.addtask('do_concat_dtb', 'do_install', None, d)
+ d.appendVarFlag('do_concat_dtb', 'depends', ' %s:do_assemble_fitimage' % kernel_pn)
+}
diff --git a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
index 70a818572..1fdd68131 100644
--- a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
+++ b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
@@ -71,14 +71,14 @@ def gen_updatealternativesvardeps(d):
# First compute them for non_pkg versions
for v in vars:
- for flag in (d.getVarFlags(v) or {}):
+ for flag in sorted((d.getVarFlags(v) or {}).keys()):
if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
continue
d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
for p in pkgs:
for v in vars:
- for flag in (d.getVarFlags("%s_%s" % (v,p)) or {}):
+ for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
continue
d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
diff --git a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
index 2a0a74a5f..321924bb3 100644
--- a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
+++ b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
@@ -1,6 +1,7 @@
UPDATERCPN ?= "${PN}"
-DEPENDS_append_class-target = " update-rc.d-native update-rc.d initscripts"
+DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d-native update-rc.d initscripts', '', d)}"
+
UPDATERCD = "update-rc.d"
UPDATERCD_class-cross = ""
UPDATERCD_class-native = ""
@@ -12,7 +13,7 @@ INIT_D_DIR = "${sysconfdir}/init.d"
updatercd_preinst() {
if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
+ ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
fi
if type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
@@ -36,8 +37,8 @@ fi
}
updatercd_prerm() {
-if [ -z "$D" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
+if [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
+ ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
fi
}
@@ -56,15 +57,15 @@ fi
def update_rc_after_parse(d):
if d.getVar('INITSCRIPT_PACKAGES', False) == None:
if d.getVar('INITSCRIPT_NAME', False) == None:
- raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
+ bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
if d.getVar('INITSCRIPT_PARAMS', False) == None:
- raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
+ bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
python __anonymous() {
update_rc_after_parse(d)
}
-PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd "
+PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
@@ -120,8 +121,7 @@ python populate_packages_updatercd () {
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
- if bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) or \
- not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
+ if not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
if pkgs == None:
pkgs = d.getVar('UPDATERCPN', True)
diff --git a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
index a9b506d05..afb580aed 100644
--- a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -4,6 +4,7 @@ def update_useradd_static_config(d):
import argparse
import itertools
import re
+ import errno
class myArgumentParser( argparse.ArgumentParser ):
def _print_message(self, message, file=None):
@@ -15,7 +16,7 @@ def update_useradd_static_config(d):
error(message)
def error(self, message):
- raise bb.build.FuncFailed(message)
+ bb.fatal(message)
def list_extend(iterable, length, obj = None):
"""Ensure that iterable is the specified length by extending with obj
@@ -30,22 +31,33 @@ def update_useradd_static_config(d):
are set)."""
id_table = dict()
for conf in file_list.split():
- if os.path.exists(conf):
- f = open(conf, "r")
- for line in f:
- if line.startswith('#'):
- continue
- # Make sure there always are at least exp_fields elements in
- # the field list. This allows for leaving out trailing
- # colons in the files.
- fields = list_extend(line.rstrip().split(":"), exp_fields)
- if fields[0] not in id_table:
- id_table[fields[0]] = fields
- else:
- id_table[fields[0]] = list(itertools.imap(lambda x, y: x or y, fields, id_table[fields[0]]))
+ try:
+ with open(conf, "r") as f:
+ for line in f:
+ if line.startswith('#'):
+ continue
+ # Make sure there always are at least exp_fields
+ # elements in the field list. This allows for leaving
+ # out trailing colons in the files.
+ fields = list_extend(line.rstrip().split(":"), exp_fields)
+ if fields[0] not in id_table:
+ id_table[fields[0]] = fields
+ else:
+ id_table[fields[0]] = list(map(lambda x, y: x or y, fields, id_table[fields[0]]))
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ pass
return id_table
+ def handle_missing_id(id, type, pkg):
+ # For backwards compatibility we accept "1" in addition to "error"
+ if d.getVar('USERADD_ERROR_DYNAMIC', True) == 'error' or d.getVar('USERADD_ERROR_DYNAMIC', True) == '1':
+ #bb.error("Skipping recipe %s, package %s which adds %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
+ bb.fatal("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
+ elif d.getVar('USERADD_ERROR_DYNAMIC', True) == 'warn':
+ bb.warn("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN', True), pkg, type, id))
+
# We parse and rewrite the useradd components
def rewrite_useradd(params):
# The following comes from --help on useradd from shadow
@@ -93,9 +105,9 @@ def update_useradd_static_config(d):
if not param:
continue
try:
- uaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
+ uaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
except:
- raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -112,6 +124,8 @@ def update_useradd_static_config(d):
users = merge_files(get_passwd_list(d), 7)
if uaargs.LOGIN not in users:
+ if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
+ handle_missing_id(uaargs.LOGIN, 'user', pkg)
continue
field = users[uaargs.LOGIN]
@@ -161,9 +175,8 @@ def update_useradd_static_config(d):
uaargs.shell = field[6] or uaargs.shell
# Should be an error if a specific option is set...
- if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not ((uaargs.uid and uaargs.uid.isdigit()) and uaargs.gid):
- #bb.error("Skipping recipe %s, package %s which adds username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
- raise bb.build.FuncFailed("%s - %s: Username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
+ if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
+ handle_missing_id(uaargs.LOGIN, 'user', pkg)
# Reconstruct the args...
newparam = ['', ' --defaults'][uaargs.defaults]
@@ -227,9 +240,9 @@ def update_useradd_static_config(d):
continue
try:
# If we're processing multiple lines, we could have left over values here...
- gaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
+ gaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
except:
- raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -244,6 +257,8 @@ def update_useradd_static_config(d):
groups = merge_files(get_group_list(d), 4)
if gaargs.GROUP not in groups:
+ if not gaargs.gid or not gaargs.gid.isdigit():
+ handle_missing_id(gaargs.GROUP, 'group', pkg)
continue
field = groups[gaargs.GROUP]
@@ -253,9 +268,8 @@ def update_useradd_static_config(d):
bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
gaargs.gid = field[2]
- if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not (gaargs.gid and gaargs.gid.isdigit()):
- #bb.error("Skipping recipe %s, package %s which adds groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
- raise bb.build.FuncFailed("%s - %s: Groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
+ if not gaargs.gid or not gaargs.gid.isdigit():
+ handle_missing_id(gaargs.GROUP, 'group', pkg)
# Reconstruct the args...
newparam = ['', ' --force'][gaargs.force]
@@ -271,6 +285,19 @@ def update_useradd_static_config(d):
return ";".join(newparams).strip()
+ # The parsing of the current recipe depends on the content of
+ # the files listed in USERADD_UID/GID_TABLES. We need to tell bitbake
+ # about that explicitly to trigger re-parsing and thus re-execution of
+ # this code when the files change.
+ bbpath = d.getVar('BBPATH', True)
+ for varname, default in (('USERADD_UID_TABLES', 'files/passwd'),
+ ('USERADD_GID_TABLES', 'files/group')):
+ tables = d.getVar(varname, True)
+ if not tables:
+ tables = default
+ for conf_file in tables.split():
+ bb.parse.mark_dependency(d, bb.utils.which(bbpath, conf_file))
+
# Load and process the users and groups, rewriting the adduser/addgroup params
useradd_packages = d.getVar('USERADD_PACKAGES', True)
diff --git a/import-layers/yocto-poky/meta/classes/useradd.bbclass b/import-layers/yocto-poky/meta/classes/useradd.bbclass
index ee402acef..3cff08e00 100644
--- a/import-layers/yocto-poky/meta/classes/useradd.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd.bbclass
@@ -3,11 +3,7 @@ inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append = "${USERADDDEPENDS}"
-USERADDDEPENDS = " base-files shadow-native shadow-sysroot shadow"
-USERADDDEPENDS_class-cross = ""
-USERADDDEPENDS_class-native = ""
-USERADDDEPENDS_class-nativesdk = ""
+DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow"
# This preinstall function can be run in four different contexts:
#
@@ -54,15 +50,15 @@ if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
echo "Running groupadd commands..."
# Invoke multiple instances of groupadd for parameter lists
# separated by ';'
- opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
- remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
+ opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
+ remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
while test "x$opts" != "x"; do
perform_groupadd "$SYSROOT" "$OPT $opts"
if test "x$opts" = "x$remaining"; then
break
fi
- opts=`echo "$remaining" | cut -d ';' -f 1`
- remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
+ remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
done
fi
@@ -70,15 +66,15 @@ if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
echo "Running useradd commands..."
# Invoke multiple instances of useradd for parameter lists
# separated by ';'
- opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
- remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
+ opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
+ remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
while test "x$opts" != "x"; do
perform_useradd "$SYSROOT" "$OPT $opts"
if test "x$opts" = "x$remaining"; then
break
fi
- opts=`echo "$remaining" | cut -d ';' -f 1`
- remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
+ remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
done
fi
@@ -86,15 +82,15 @@ if test "x`echo $GROUPMEMS_PARAM | tr -d '[:space:]'`" != "x"; then
echo "Running groupmems commands..."
# Invoke multiple instances of groupmems for parameter lists
# separated by ';'
- opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
- remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
+ opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
+ remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
while test "x$opts" != "x"; do
perform_groupmems "$SYSROOT" "$OPT $opts"
if test "x$opts" = "x$remaining"; then
break
fi
- opts=`echo "$remaining" | cut -d ';' -f 1`
- remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
+ remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
done
fi
}
@@ -103,7 +99,7 @@ useradd_sysroot () {
# Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
- export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+ export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
# Explicitly set $D since it isn't set to anything
# before do_install
@@ -129,57 +125,53 @@ useradd_sysroot_sstate () {
userdel_sysroot_sstate () {
if test "x${STAGING_DIR_TARGET}" != "x"; then
- if [ "${BB_CURRENTTASK}" = "configure" -o "${BB_CURRENTTASK}" = "clean" ]; then
- export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+ if [ "${BB_CURRENTTASK}" = "clean" ]; then
+ export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
OPT="--root ${STAGING_DIR_TARGET}"
# Remove groups and users defined for package
GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
- if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
- user=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ user=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
+ while test "x$user" != "x"; do
perform_userdel "${STAGING_DIR_TARGET}" "$OPT $user"
- fi
-
- if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
- group=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
- perform_groupdel "${STAGING_DIR_TARGET}" "$OPT $group"
- fi
+ user=`echo "$remaining" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ remaining=`echo "$remaining" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
+ done
+
+ user=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
+ while test "x$user" != "x"; do
+ perform_groupdel "${STAGING_DIR_TARGET}" "$OPT $user"
+ user=`echo "$remaining" | cut -d ';' -f 1 | awk '{ print $NF }'`
+ remaining=`echo "$remaining" | cut -d ';' -f 2- -s | sed -e 's#[ \t]*$##'`
+ done
fi
fi
}
-SSTATECLEANFUNCS = "userdel_sysroot_sstate"
-SSTATECLEANFUNCS_class-cross = ""
-SSTATECLEANFUNCS_class-native = ""
-SSTATECLEANFUNCS_class-nativesdk = ""
+SSTATECLEANFUNCS_append_class-target = " userdel_sysroot_sstate"
do_install[prefuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC = "useradd_sysroot"
-SYSROOTFUNC_class-cross = ""
-SYSROOTFUNC_class-native = ""
-SYSROOTFUNC_class-nativesdk = ""
-SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}"
-SYSROOTPOSTFUNC = "useradd_sysroot_sstate"
-SYSROOTPOSTFUNC_class-cross = ""
-SYSROOTPOSTFUNC_class-native = ""
-SYSROOTPOSTFUNC_class-nativesdk = ""
-
-USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
-USERADDSETSCENEDEPS_class-cross = ""
-USERADDSETSCENEDEPS_class-native = ""
-USERADDSETSCENEDEPS_class-nativesdk = ""
+SYSROOTFUNC_class-target = "useradd_sysroot"
+SYSROOTFUNC = ""
+
+SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
+
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
+USERADDSETSCENEDEPS_class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
def update_useradd_after_parse(d):
useradd_packages = d.getVar('USERADD_PACKAGES', True)
if not useradd_packages:
- raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
+ bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
@@ -203,7 +195,7 @@ def get_all_cmd_params(d, cmd_type):
for pkg in useradd_packages.split():
param = d.getVar(param_type % pkg, True)
if param:
- params.append(param)
+ params.append(param.rstrip(" ;"))
return "; ".join(params)
diff --git a/import-layers/yocto-poky/meta/classes/useradd_base.bbclass b/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
index 0d81accd1..ba87edc57 100644
--- a/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd_base.bbclass
@@ -17,7 +17,6 @@ perform_groupadd () {
local groupname=`echo "$opts" | awk '{ print $NF }'`
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" = "x"; then
- opts=`echo $opts | sed s/\'/\"/g`
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" = "x"; then
@@ -35,7 +34,6 @@ perform_useradd () {
local username=`echo "$opts" | awk '{ print $NF }'`
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" = "x"; then
- opts=`echo $opts | sed s/\'/\"/g`
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO useradd \$opts\" || true
user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" = "x"; then
@@ -53,14 +51,6 @@ perform_groupmems () {
local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
bbnote "${PN}: Running groupmems command with group $groupname and user $username"
- # groupmems fails if /etc/gshadow does not exist
- local gshadow=""
- if [ -f $rootdir${sysconfdir}/gshadow ]; then
- gshadow="yes"
- else
- gshadow="no"
- touch $rootdir${sysconfdir}/gshadow
- fi
local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
if test "x$mem_exists" = "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
@@ -71,10 +61,6 @@ perform_groupmems () {
else
bbnote "${PN}: group $groupname already contains $username, not re-adding it"
fi
- if test "x$gshadow" = "xno"; then
- rm -f $rootdir${sysconfdir}/gshadow
- rm -f $rootdir${sysconfdir}/gshadow-
- fi
}
perform_groupdel () {
diff --git a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
index 5bcfd0b72..7ba56e28a 100644
--- a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
@@ -46,8 +46,8 @@ python do_checkuri() {
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.checkstatus()
- except bb.fetch2.BBFetchException, e:
- raise bb.build.FuncFailed(e)
+ except bb.fetch2.BBFetchException as e:
+ bb.fatal(str(e))
}
addtask checkuriall after do_checkuri
diff --git a/import-layers/yocto-poky/meta/classes/utils.bbclass b/import-layers/yocto-poky/meta/classes/utils.bbclass
index 81b92cb5e..dbb5e4cbb 100644
--- a/import-layers/yocto-poky/meta/classes/utils.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utils.bbclass
@@ -24,6 +24,7 @@ def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
def base_contains(variable, checkvalues, truevalue, falsevalue, d):
+ bb.note('base_contains is deprecated, please use bb.utils.contains instead.')
return bb.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
def base_both_contain(variable1, variable2, checkvalue, d):
@@ -61,15 +62,18 @@ def is_machine_specific(d):
oe_soinstall() {
# Purpose: Install shared library file and
# create the necessary links
- # Example:
- #
- # oe_
- #
- #bbnote installing shared library $1 to $2
- #
+ # Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
libname=`basename $1`
+ case "$libname" in
+ *.so)
+ bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
+ ;;
+ esac
install -m 755 $1 $2/$libname
sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ if [ -z $sonamelink ]; then
+ bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
+ fi
solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
ln -sf $libname $2/$sonamelink
ln -sf $libname $2/$solink
@@ -248,7 +252,7 @@ oe_machinstall() {
create_cmdline_wrapper () {
# Create a wrapper script where commandline options are needed
#
- # These are useful to work around relocation issues, by passing extra options
+ # These are useful to work around relocation issues, by passing extra options
# to a program
#
# Usage: create_cmdline_wrapper FILENAME <extra-options>
@@ -302,7 +306,7 @@ hardlinkdir () {
def check_app_exists(app, d):
- app = d.expand(app)
+ app = d.expand(app).strip()
path = d.getVar('PATH', d, True)
return bool(bb.utils.which(path, app))
@@ -322,7 +326,7 @@ def base_set_filespath(path, d):
overrides.reverse()
for o in overrides:
for p in path:
- if p != "":
+ if p != "":
filespath.append(os.path.join(p, o))
return ":".join(filespath)
@@ -378,3 +382,50 @@ def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = '
else:
ret = values
return " ".join(ret)
+
+def all_multilib_tune_list(vars, d):
+ """
+ Return a list of ${VAR} for each variable VAR in vars from each
+ multilib tune configuration.
+ Is safe to be called from a multilib recipe/context as it can
+ figure out the original tune and remove the multilib overrides.
+ """
+ values = {}
+ for v in vars:
+ values[v] = []
+
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False).split(":")
+ newoverrides = []
+ for o in overrides:
+ if not o.startswith("virtclass-multilib-"):
+ newoverrides.append(o)
+ localdata.setVar("OVERRIDES", ":".join(newoverrides))
+ localdata.setVar("MLPREFIX", "")
+ origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL", True)
+ if origdefault:
+ localdata.setVar("DEFAULTTUNE", origdefault)
+ bb.data.update_data(localdata)
+ values['ml'] = ['']
+ for v in vars:
+ values[v].append(localdata.getVar(v, True))
+ variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ for item in variants.split():
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", item + "-")
+ bb.data.update_data(localdata)
+ values[v].append(localdata.getVar(v, True))
+ values['ml'].append(item)
+ return values
+
+# If the user hasn't set up their name/email, set some defaults
+check_git_config() {
+ if ! git config user.email > /dev/null ; then
+ git config --local user.email "${PATCH_GIT_USER_EMAIL}"
+ fi
+ if ! git config user.name > /dev/null ; then
+ git config --local user.name "${PATCH_GIT_USER_NAME}"
+ fi
+}
OpenPOWER on IntegriCloud