summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/meta/classes')
-rw-r--r--import-layers/yocto-poky/meta/classes/allarch.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/archiver.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/autotools.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/base.bbclass55
-rw-r--r--import-layers/yocto-poky/meta/classes/buildhistory.bbclass83
-rw-r--r--import-layers/yocto-poky/meta/classes/ccache.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/cmake.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/cml1.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/cross-canadian.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/cross.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/cve-check.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/devshell.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/devtool-source.bbclass165
-rw-r--r--import-layers/yocto-poky/meta/classes/distrodata.bbclass73
-rw-r--r--import-layers/yocto-poky/meta/classes/distrooverrides.bbclass32
-rw-r--r--import-layers/yocto-poky/meta/classes/externalsrc.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/gettext.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/gnomebase.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/go.bbclass227
-rw-r--r--import-layers/yocto-poky/meta/classes/goarch.bbclass61
-rw-r--r--import-layers/yocto-poky/meta/classes/grub-efi.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-doc.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/icecc.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/image-live.bbclass19
-rw-r--r--import-layers/yocto-poky/meta/classes/image-prelink.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/image-vm.bbclass171
-rw-r--r--import-layers/yocto-poky/meta/classes/image.bbclass120
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types.bbclass86
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types_wic.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/insane.bbclass218
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass112
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass23
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel.bbclass71
-rw-r--r--import-layers/yocto-poky/meta/classes/license.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/linuxloader.bbclass42
-rw-r--r--import-layers/yocto-poky/meta/classes/live-vm-common.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/mirrors.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/module.bbclass22
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/native.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/own-mirrors.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/package.bbclass39
-rw-r--r--import-layers/yocto-poky/meta/classes/package_deb.bbclass119
-rw-r--r--import-layers/yocto-poky/meta/classes/package_ipk.bbclass98
-rw-r--r--import-layers/yocto-poky/meta/classes/package_rpm.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass31
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass43
-rw-r--r--import-layers/yocto-poky/meta/classes/python3native.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/pythonnative.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/qemuboot.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/report-error.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/rm_work.bbclass43
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/sanity.bbclass83
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_rpm.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/siteinfo.bbclass25
-rw-r--r--import-layers/yocto-poky/meta/classes/sstate.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/staging.bbclass136
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd-boot.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/testexport.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/testimage.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/testsdk.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-config.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/uninative.bbclass18
-rw-r--r--import-layers/yocto-poky/meta/classes/update-alternatives.bbclass33
-rw-r--r--import-layers/yocto-poky/meta/classes/update-rc.d.bbclass23
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass146
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/utils.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/waf.bbclass17
80 files changed, 1741 insertions, 1131 deletions
diff --git a/import-layers/yocto-poky/meta/classes/allarch.bbclass b/import-layers/yocto-poky/meta/classes/allarch.bbclass
index a7ce02464..51ba509cd 100644
--- a/import-layers/yocto-poky/meta/classes/allarch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/allarch.bbclass
@@ -43,8 +43,8 @@ python () {
d.setVar("INHIBIT_PACKAGE_STRIP", "1")
# These multilib values shouldn't change allarch packages so exclude them
- d.setVarFlag("emit_pkgdata", "vardepsexclude", "MULTILIB_VARIANTS")
- d.setVarFlag("write_specfile", "vardepsexclude", "MULTILIBS")
+ d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
+ d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
diff --git a/import-layers/yocto-poky/meta/classes/archiver.bbclass b/import-layers/yocto-poky/meta/classes/archiver.bbclass
index 18c5b9668..ec80ad47a 100644
--- a/import-layers/yocto-poky/meta/classes/archiver.bbclass
+++ b/import-layers/yocto-poky/meta/classes/archiver.bbclass
@@ -223,6 +223,8 @@ python do_ar_configured() {
import shutil
# Forcibly expand the sysroot paths as we're about to change WORKDIR
+ d.setVar('STAGING_DIR_HOST', d.getVar('STAGING_DIR_HOST'))
+ d.setVar('STAGING_DIR_TARGET', d.getVar('STAGING_DIR_TARGET'))
d.setVar('RECIPE_SYSROOT', d.getVar('RECIPE_SYSROOT'))
d.setVar('RECIPE_SYSROOT_NATIVE', d.getVar('RECIPE_SYSROOT_NATIVE'))
diff --git a/import-layers/yocto-poky/meta/classes/autotools.bbclass b/import-layers/yocto-poky/meta/classes/autotools.bbclass
index ac04a07cb..efa4098d6 100644
--- a/import-layers/yocto-poky/meta/classes/autotools.bbclass
+++ b/import-layers/yocto-poky/meta/classes/autotools.bbclass
@@ -141,7 +141,7 @@ ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
python autotools_aclocals () {
# Refresh variable with cache files
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, aclocalcache=True))
+ d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
}
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
diff --git a/import-layers/yocto-poky/meta/classes/base.bbclass b/import-layers/yocto-poky/meta/classes/base.bbclass
index d95afb7b9..bd0d6e3ca 100644
--- a/import-layers/yocto-poky/meta/classes/base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/base.bbclass
@@ -61,22 +61,15 @@ oe_runmake() {
def base_dep_prepend(d):
- #
- # Ideally this will check a flag so we will operate properly in
- # the case where host == build == target, for now we don't work in
- # that case though.
- #
+ if d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ return ""
+ return "${BASE_DEFAULT_DEPS}"
- deps = ""
- # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
- # we need that built is the responsibility of the patch function / class, not
- # the application.
- if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
- if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
- deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
- return deps
+BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
-BASEDEPENDS = "${@base_dep_prepend(d)}"
+BASEDEPENDS = ""
+BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
+BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
DEPENDS_prepend="${BASEDEPENDS} "
@@ -185,7 +178,7 @@ def pkgarch_mapping(d):
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS") or "").split()
- layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
+ layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None)) \
for i in layers]
@@ -213,7 +206,7 @@ def buildcfg_vars(d):
for var in statusvars:
value = d.getVar(var)
if value is not None:
- yield '%-17s = "%s"' % (var, value)
+ yield '%-20s = "%s"' % (var, value)
def buildcfg_neededvars(d):
needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
@@ -227,7 +220,7 @@ def buildcfg_neededvars(d):
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
addhandler base_eventhandler
-base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
+base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
python base_eventhandler() {
import bb.runqueue
@@ -242,6 +235,16 @@ python base_eventhandler() {
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
+ if isinstance(e, bb.event.MultiConfigParsed):
+ # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
+ # own contexts so the variables get expanded correctly for that arch, then inject back into
+ # the main data store.
+ deps = []
+ for config in e.mcdata:
+ deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
+ deps = " ".join(deps)
+ e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
+
if isinstance(e, bb.event.BuildStarted):
localdata = bb.data.createCopy(e.data)
statuslines = []
@@ -391,7 +394,7 @@ python () {
# These take the form:
#
# PACKAGECONFIG ??= "<default options>"
- # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
+ # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends"
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
if pkgconfigflags:
pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
@@ -433,12 +436,13 @@ python () {
extradeps = []
extrardeps = []
+ extrarrecs = []
extraconf = []
for flag, flagval in sorted(pkgconfigflags.items()):
items = flagval.split(",")
num = len(items)
- if num > 4:
- bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend can be specified!"
+ if num > 5:
+ bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend can be specified!"
% (d.getVar('PN'), flag))
if flag in pkgconfig:
@@ -446,12 +450,15 @@ python () {
extradeps.append(items[2])
if num >= 4 and items[3]:
extrardeps.append(items[3])
+ if num >= 5 and items[4]:
+ extrarrecs.append(items[4])
if num >= 1 and items[0]:
extraconf.append(items[0])
elif num >= 2 and items[1]:
extraconf.append(items[1])
appendVar('DEPENDS', extradeps)
appendVar('RDEPENDS_${PN}', extrardeps)
+ appendVar('RRECOMMENDS_${PN}', extrarrecs)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
pn = d.getVar('PN')
@@ -617,16 +624,16 @@ python () {
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
# *.xz should DEPEND on xz-native for unpacking
- elif path.endswith('.xz'):
+ elif path.endswith('.xz') or path.endswith('.txz'):
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
# .zip should DEPEND on unzip-native for unpacking
- elif path.endswith('.zip'):
+ elif path.endswith('.zip') or path.endswith('.jar'):
d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
# file is needed by rpm2cpio.sh
- elif path.endswith('.src.rpm'):
- d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
+ elif path.endswith('.rpm'):
+ d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
if needsrcrev:
d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
diff --git a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
index 3823c664a..7a5534edd 100644
--- a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
@@ -192,7 +192,7 @@ python buildhistory_emit_pkghistory() {
pe = d.getVar('PE') or "0"
pv = d.getVar('PV')
pr = d.getVar('PR')
- layer = bb.utils.get_file_layer(d.getVar('FILE', True), d)
+ layer = bb.utils.get_file_layer(d.getVar('FILE'), d)
pkgdata_dir = d.getVar('PKGDATA_DIR')
packages = ""
@@ -348,6 +348,7 @@ def write_recipehistory(rcpinfo, d):
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
f.write(u"LAYER = %s\n" % rcpinfo.layer)
+ write_latest_srcrev(d, pkghistdir)
def write_pkghistory(pkginfo, d):
bb.debug(2, "Writing package history for package %s" % pkginfo.name)
@@ -600,26 +601,19 @@ END
python buildhistory_get_extra_sdkinfo() {
import operator
- import math
+ from oe.sdk import get_extra_sdkinfo
+
+ sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
+ extra_info = get_extra_sdkinfo(sstate_dir)
if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext' and \
"sdk" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
- tasksizes = {}
- filesizes = {}
- for root, _, files in os.walk(d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')):
- for fn in files:
- if fn.endswith('.tgz'):
- fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
- task = fn.rsplit(':', 1)[1].split('_', 1)[1].split('.')[0]
- origtotal = tasksizes.get(task, 0)
- tasksizes[task] = origtotal + fsize
- filesizes[fn] = fsize
with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-package-sizes.txt'), 'w') as f:
- filesizes_sorted = sorted(filesizes.items(), key=operator.itemgetter(1, 0), reverse=True)
+ filesizes_sorted = sorted(extra_info['filesizes'].items(), key=operator.itemgetter(1, 0), reverse=True)
for fn, size in filesizes_sorted:
f.write('%10d KiB %s\n' % (size, fn))
with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-task-sizes.txt'), 'w') as f:
- tasksizes_sorted = sorted(tasksizes.items(), key=operator.itemgetter(1, 0), reverse=True)
+ tasksizes_sorted = sorted(extra_info['tasksizes'].items(), key=operator.itemgetter(1, 0), reverse=True)
for task, size in tasksizes_sorted:
f.write('%10d KiB %s\n' % (size, task))
}
@@ -715,20 +709,23 @@ def buildhistory_get_sdkvars(d):
def buildhistory_get_cmdline(d):
- if sys.argv[0].endswith('bin/bitbake'):
- bincmd = 'bitbake'
- else:
- bincmd = sys.argv[0]
- return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
+ argv = d.getVar('BB_CMDLINE', False)
+ if argv:
+ if argv[0].endswith('bin/bitbake'):
+ bincmd = 'bitbake'
+ else:
+ bincmd = argv[0]
+ return '%s %s' % (bincmd, ' '.join(argv[1:]))
+ return ''
buildhistory_single_commit() {
if [ "$3" = "" ] ; then
commitopts="${BUILDHISTORY_DIR}/ --allow-empty"
- item="No changes"
+ shortlogprefix="No changes: "
else
- commitopts="$3 metadata-revs"
- item="$3"
+ commitopts=""
+ shortlogprefix=""
fi
if [ "${BUILDHISTORY_BUILD_FAILURES}" = "0" ] ; then
result="succeeded"
@@ -745,7 +742,7 @@ buildhistory_single_commit() {
esac
commitmsgfile=`mktemp`
cat > $commitmsgfile << END
-$item: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $2
+${shortlogprefix}Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $2
cmd: $1
@@ -789,9 +786,7 @@ END
git add -A .
# porcelain output looks like "?? packages/foo/bar"
# Ensure we commit metadata-revs with the first commit
- for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
- buildhistory_single_commit "$CMDLINE" "$HOSTNAME" "$entry"
- done
+ buildhistory_single_commit "$CMDLINE" "$HOSTNAME" dummy
git gc --auto --quiet
else
buildhistory_single_commit "$CMDLINE" "$HOSTNAME"
@@ -829,6 +824,8 @@ python buildhistory_eventhandler() {
interrupted = getattr(e, '_interrupted', 0)
localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
bb.build.exec_func("buildhistory_commit", localdata)
+ else:
+ bb.note("No commit since BUILDHISTORY_COMMIT != '1'")
}
addhandler buildhistory_eventhandler
@@ -874,7 +871,10 @@ def _get_srcrev_values(d):
do_fetch[postfuncs] += "write_srcrev"
do_fetch[vardepsexclude] += "write_srcrev"
python write_srcrev() {
- pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
+ write_latest_srcrev(d, d.getVar('BUILDHISTORY_DIR_PACKAGE'))
+}
+
+def write_latest_srcrev(d, pkghistdir):
srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
srcrevs, tag_srcrevs = _get_srcrev_values(d)
@@ -912,4 +912,33 @@ python write_srcrev() {
else:
if os.path.exists(srcrevfile):
os.remove(srcrevfile)
+
+do_testimage[postfuncs] += "write_ptest_result"
+do_testimage[vardepsexclude] += "write_ptest_result"
+
+python write_ptest_result() {
+ write_latest_ptest_result(d, d.getVar('BUILDHISTORY_DIR'))
}
+
+def write_latest_ptest_result(d, histdir):
+ import glob
+ import subprocess
+ test_log_dir = d.getVar('TEST_LOG_DIR')
+ input_ptest = os.path.join(test_log_dir, 'ptest_log')
+ output_ptest = os.path.join(histdir, 'ptest')
+ if os.path.exists(input_ptest):
+ try:
+ # Lock it avoid race issue
+ lock = bb.utils.lockfile(output_ptest + "/ptest.lock")
+ bb.utils.mkdirhier(output_ptest)
+ oe.path.copytree(input_ptest, output_ptest)
+ # Sort test result
+ for result in glob.glob('%s/pass.fail.*' % output_ptest):
+ bb.debug(1, 'Processing %s' % result)
+ cmd = ['sort', result, '-o', result]
+ bb.debug(1, 'Running %s' % cmd)
+ ret = subprocess.call(cmd)
+ if ret != 0:
+ bb.error('Failed to run %s!' % cmd)
+ finally:
+ bb.utils.unlockfile(lock)
diff --git a/import-layers/yocto-poky/meta/classes/ccache.bbclass b/import-layers/yocto-poky/meta/classes/ccache.bbclass
index d58c8f6e5..960902065 100644
--- a/import-layers/yocto-poky/meta/classes/ccache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/ccache.bbclass
@@ -1,6 +1,5 @@
CCACHE = "${@bb.utils.which(d.getVar('PATH'), 'ccache') and 'ccache '}"
export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_TARGET_SYS}/${PN}"
-CCACHE_DISABLE[unexport] = "1"
# We need to stop ccache considering the current directory or the
# debug-prefix-map target directory to be significant when calculating
@@ -10,6 +9,3 @@ export CCACHE_NOHASHDIR ?= "1"
DEPENDS_append_class-target = " ccache-native"
DEPENDS[vardepvalueexclude] = " ccache-native"
-
-do_configure[dirs] =+ "${CCACHE_DIR}"
-do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
diff --git a/import-layers/yocto-poky/meta/classes/cmake.bbclass b/import-layers/yocto-poky/meta/classes/cmake.bbclass
index 12df617ad..ac2c1519b 100644
--- a/import-layers/yocto-poky/meta/classes/cmake.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cmake.bbclass
@@ -31,6 +31,9 @@ OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECMAKE_BUILD_prepend_task-compile = "${PARALLEL_MAKE} "
+EXTRA_OECMAKE_BUILD_prepend_task-install = "${PARALLEL_MAKEINST} "
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
@@ -135,13 +138,13 @@ cmake_do_configure() {
do_compile[progress] = "percent"
cmake_do_compile() {
- cd ${B}
- base_do_compile VERBOSE=1
+ bbnote VERBOSE=1 cmake --build '${B}' -- ${EXTRA_OECMAKE_BUILD}
+ VERBOSE=1 cmake --build '${B}' -- ${EXTRA_OECMAKE_BUILD}
}
cmake_do_install() {
- cd ${B}
- oe_runmake 'DESTDIR=${D}' install
+ bbnote DESTDIR='${D}' cmake --build '${B}' --target install -- ${EXTRA_OECMAKE_BUILD}
+ DESTDIR='${D}' cmake --build '${B}' --target install -- ${EXTRA_OECMAKE_BUILD}
}
EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/import-layers/yocto-poky/meta/classes/cml1.bbclass b/import-layers/yocto-poky/meta/classes/cml1.bbclass
index 38e6613c4..926747f2b 100644
--- a/import-layers/yocto-poky/meta/classes/cml1.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cml1.bbclass
@@ -64,7 +64,8 @@ python do_diffconfig() {
if isdiff:
statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
subprocess.call(statement, shell=True)
-
+ # No need to check the exit code as we know it's going to be
+ # non-zero, but that's what we expect.
shutil.copy(configorig, config)
bb.plain("Config fragment has been dumped into:\n %s" % fragment)
diff --git a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
index 49388d4cf..1928455cf 100644
--- a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -15,7 +15,7 @@ STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${S
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
-BASECANADIANEXTRAOS ?= "linux-uclibc linux-musl"
+BASECANADIANEXTRAOS ?= "linux-musl"
CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
CANADIANEXTRAVENDOR = ""
MODIFYTOS ??= "1"
@@ -36,11 +36,9 @@ python () {
tos = d.getVar("TARGET_OS")
whitelist = []
extralibcs = [""]
- if "uclibc" in d.getVar("BASECANADIANEXTRAOS"):
- extralibcs.append("uclibc")
if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
- for variant in ["", "spe", "x32", "eabi", "n32"]:
+ for variant in ["", "spe", "x32", "eabi", "n32", "ilp32"]:
for libc in extralibcs:
entry = "linux"
if variant and libc:
@@ -80,7 +78,7 @@ python () {
for extraos in d.getVar("BASECANADIANEXTRAOS").split():
d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
if tarch == "arm" or tarch == "armeb":
- d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi linux-uclibceabi")
+ d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
d.setVar("TARGET_OS", "linux-gnueabi")
else:
d.setVar("TARGET_OS", "linux")
@@ -115,11 +113,6 @@ HOST_CC_ARCH = "${SDK_CC_ARCH}"
HOST_LD_ARCH = "${SDK_LD_ARCH}"
HOST_AS_ARCH = "${SDK_AS_ARCH}"
-TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
-TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
-TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
-TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
-
#assign DPKG_ARCH
DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
diff --git a/import-layers/yocto-poky/meta/classes/cross.bbclass b/import-layers/yocto-poky/meta/classes/cross.bbclass
index 4feb01ecc..d217717e6 100644
--- a/import-layers/yocto-poky/meta/classes/cross.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross.bbclass
@@ -50,7 +50,7 @@ SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
-target_base_prefix := "${base_prefix}"
+target_base_prefix := "${root_prefix}"
target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
target_base_libdir = "${target_base_prefix}/${baselib}"
diff --git a/import-layers/yocto-poky/meta/classes/cve-check.bbclass b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
index 13ec62ec9..bc2f03f7d 100644
--- a/import-layers/yocto-poky/meta/classes/cve-check.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
@@ -83,6 +83,11 @@ python cve_check_write_rootfs_manifest () {
import shutil
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ if os.path.exists(deploy_file):
+ bb.utils.remove(deploy_file)
+
if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
bb.note("Writing rootfs CVE manifest")
deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
@@ -102,6 +107,7 @@ python cve_check_write_rootfs_manifest () {
}
ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def get_patches_cves(d):
"""
@@ -112,10 +118,24 @@ def get_patches_cves(d):
pn = d.getVar("PN")
cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+
+ # Matches last CVE-1234-211432 in the file name, also if written
+ # with small letters. Not supporting multiple CVE id's in a single
+ # file name.
+ cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+
patched_cves = set()
bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
for url in src_patches(d):
patch_file = bb.fetch.decodeurl(url)[2]
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+
with open(patch_file, "r", encoding="utf-8") as f:
try:
patch_text = f.read()
@@ -134,7 +154,7 @@ def get_patches_cves(d):
for cve in cves.split():
bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
patched_cves.add(cve)
- else:
+ elif not fname_match:
bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
return patched_cves
@@ -149,7 +169,7 @@ def check_cves(d, patched_cves):
cves_patched = []
cves_unpatched = []
bpn = d.getVar("CVE_PRODUCT")
- pv = d.getVar("PV").split("git+")[0]
+ pv = d.getVar("PV").split("+git")[0]
cves = " ".join(patched_cves)
cve_db_dir = d.getVar("CVE_CHECK_DB_DIR")
cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST"))
@@ -171,7 +191,7 @@ def check_cves(d, patched_cves):
f.write("%s,%s,%s," % (bpn, pv, cves))
cmd.append(faux)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ output = subprocess.check_output(cmd).decode("utf-8")
bb.debug(2, "Output of command %s:\n%s" % ("\n".join(cmd), output))
except subprocess.CalledProcessError as e:
bb.warn("Couldn't check for CVEs: %s (output %s)" % (e, e.output))
diff --git a/import-layers/yocto-poky/meta/classes/devshell.bbclass b/import-layers/yocto-poky/meta/classes/devshell.bbclass
index 4de7ea6fc..fdf7dc100 100644
--- a/import-layers/yocto-poky/meta/classes/devshell.bbclass
+++ b/import-layers/yocto-poky/meta/classes/devshell.bbclass
@@ -8,14 +8,14 @@ python do_devshell () {
fakeenv = d.getVar("FAKEROOTENV").split()
for f in fakeenv:
k = f.split("=")
- d.setVar(k[0], k[1])
+ d.setVar(k[0], k[1])
d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
d.delVarFlag("do_devshell", "fakeroot")
oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
}
-addtask devshell after do_patch
+addtask devshell after do_patch do_prepare_recipe_sysroot
# The directory that the terminal starts in
DEVSHELL_STARTDIR ?= "${S}"
@@ -49,7 +49,7 @@ def devpyshell(d):
old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
# &~ termios.ISIG
termios.tcsetattr(fd, termios.TCSADRAIN, old)
-
+
# No echo or buffering over the pty
noechoicanon(s)
@@ -145,7 +145,7 @@ python do_devpyshell() {
try:
devpyshell(d)
except SystemExit:
- # Stop the SIGTERM above causing an error exit code
+ # Stop the SIGTERM above causing an error exit code
return
finally:
return
diff --git a/import-layers/yocto-poky/meta/classes/devtool-source.bbclass b/import-layers/yocto-poky/meta/classes/devtool-source.bbclass
new file mode 100644
index 000000000..8f5bc86b2
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/devtool-source.bbclass
@@ -0,0 +1,165 @@
+# Development tool - source extraction helper class
+#
+# NOTE: this class is intended for use by devtool and should not be
+# inherited manually.
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+DEVTOOL_TEMPDIR ?= ""
+DEVTOOL_PATCH_SRCDIR = "${DEVTOOL_TEMPDIR}/patchworkdir"
+
+
+python() {
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+
+ if not tempdir:
+ bb.fatal('devtool-source class is for internal use by devtool only')
+
+ # Make a subdir so we guard against WORKDIR==S
+ workdir = os.path.join(tempdir, 'workdir')
+ d.setVar('WORKDIR', workdir)
+ if not d.getVar('S').startswith(workdir):
+ # Usually a shared workdir recipe (kernel, gcc)
+ # Try to set a reasonable default
+ if bb.data.inherits_class('kernel', d):
+ d.setVar('S', '${WORKDIR}/source')
+ else:
+ d.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S')))
+ if bb.data.inherits_class('kernel', d):
+ # We don't want to move the source to STAGING_KERNEL_DIR here
+ d.setVar('STAGING_KERNEL_DIR', '${S}')
+
+ d.setVar('STAMPS_DIR', os.path.join(tempdir, 'stamps'))
+ d.setVar('T', os.path.join(tempdir, 'temp'))
+
+ # Hook in pre/postfuncs
+ is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
+ if is_kernel_yocto:
+ unpacktask = 'do_kernel_checkout'
+ d.appendVarFlag('do_configure', 'postfuncs', ' devtool_post_configure')
+ else:
+ unpacktask = 'do_unpack'
+ d.appendVarFlag(unpacktask, 'postfuncs', ' devtool_post_unpack')
+ d.prependVarFlag('do_patch', 'prefuncs', ' devtool_pre_patch')
+ d.appendVarFlag('do_patch', 'postfuncs', ' devtool_post_patch')
+
+ # NOTE: in order for the patch stuff to be fully functional,
+ # PATCHTOOL and PATCH_COMMIT_FUNCTIONS need to be set; we can't
+ # do that here because we can't guarantee the order of the anonymous
+ # functions, so it gets done in the bbappend we create.
+}
+
+
+python devtool_post_unpack() {
+ import oe.recipeutils
+ import shutil
+ sys.path.insert(0, os.path.join(d.getVar('COREBASE'), 'scripts', 'lib'))
+ import scriptutils
+ from devtool import setup_git_repo
+
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+ workdir = d.getVar('WORKDIR')
+ srcsubdir = d.getVar('S')
+
+ def _move_file(src, dst):
+ """Move a file. Creates all the directory components of destination path."""
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.move(src, dst)
+
+ def _ls_tree(directory):
+ """Recursive listing of files in a directory"""
+ ret = []
+ for root, dirs, files in os.walk(directory):
+ ret.extend([os.path.relpath(os.path.join(root, fname), directory) for
+ fname in files])
+ return ret
+
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in
+ oe.recipeutils.get_recipe_patches(d)]
+ local_files = oe.recipeutils.get_recipe_local_files(d)
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
+ local_files = [fname for fname in local_files if
+ os.path.exists(os.path.join(workdir, fname)) and
+ (srcabspath == workdir or not
+ os.path.join(workdir, fname).startswith(srcabspath +
+ os.sep))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname),
+ os.path.join(tempdir, 'oe-local-files', fname))
+ with open(os.path.join(tempdir, 'oe-local-files', '.gitignore'),
+ 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file '
+ 'if you want to commit the directory to Git\n*\n')
+
+ if srcsubdir == workdir:
+ # Find non-patch non-local sources that were "unpacked" to srctree
+ # directory
+ src_files = [fname for fname in _ls_tree(workdir) if
+ os.path.basename(fname) not in recipe_patches]
+ srcsubdir = d.getVar('DEVTOOL_PATCH_SRCDIR')
+ # Move source files to S
+ for path in src_files:
+ _move_file(os.path.join(workdir, path),
+ os.path.join(srcsubdir, path))
+ elif os.path.dirname(srcsubdir) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.join(workdir, os.path.relpath(srcsubdir, workdir).split(os.sep)[0])
+
+ scriptutils.git_convert_standalone_clone(srcsubdir)
+
+ # Make sure that srcsubdir exists
+ bb.utils.mkdirhier(srcsubdir)
+ if not os.listdir(srcsubdir):
+ bb.warn("No source unpacked to S - either the %s recipe "
+ "doesn't use any source or the correct source "
+ "directory could not be determined" % d.getVar('PN'))
+
+ devbranch = d.getVar('DEVTOOL_DEVBRANCH')
+ setup_git_repo(srcsubdir, d.getVar('PV'), devbranch, d=d)
+
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
+ initial_rev = stdout.rstrip()
+ with open(os.path.join(tempdir, 'initial_rev'), 'w') as f:
+ f.write(initial_rev)
+
+ with open(os.path.join(tempdir, 'srcsubdir'), 'w') as f:
+ f.write(srcsubdir)
+}
+
+python devtool_pre_patch() {
+ if d.getVar('S') == d.getVar('WORKDIR'):
+ d.setVar('S', '${DEVTOOL_PATCH_SRCDIR}')
+}
+
+python devtool_post_patch() {
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+ with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
+ srcsubdir = f.read()
+ bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
+}
+
+python devtool_post_configure() {
+ import shutil
+ tempdir = d.getVar('DEVTOOL_TEMPDIR')
+ shutil.copy2(os.path.join(d.getVar('B'), '.config'), tempdir)
+}
diff --git a/import-layers/yocto-poky/meta/classes/distrodata.bbclass b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
index 5e3444161..c85f7b347 100644
--- a/import-layers/yocto-poky/meta/classes/distrodata.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
@@ -261,12 +261,44 @@ python do_checkpkg() {
from bb.utils import vercmp_string
from bb.fetch2 import FetchError, NoMethodError, decodeurl
- """first check whether a uri is provided"""
- src_uri = (d.getVar('SRC_URI') or '').split()
- if src_uri:
- uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
- else:
- uri_type = "none"
+ def get_upstream_version_and_status():
+
+ # set if the upstream check fails reliably, e.g. absent git tags, or weird version format used on our or on upstream side.
+ upstream_version_unknown = localdata.getVar('UPSTREAM_VERSION_UNKNOWN')
+ # set if the upstream check cannot be reliably performed due to transient network failures, or server behaving weirdly.
+ # This one should be used sparingly, as it completely excludes a recipe from upstream checking.
+ upstream_check_unreliable = localdata.getVar('UPSTREAM_CHECK_UNRELIABLE')
+
+ if upstream_check_unreliable == "1":
+ return "N/A", "CHECK_IS_UNRELIABLE"
+
+ try:
+ uv = oe.recipeutils.get_recipe_upstream_version(localdata)
+ pupver = uv['version'] if uv['version'] else "N/A"
+ except Exception as e:
+ pupver = "N/A"
+
+ if pupver == "N/A":
+ pstatus = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+ else:
+ src_uri = (localdata.getVar('SRC_URI') or '').split()
+ if src_uri:
+ uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
+ else:
+ uri_type = "none"
+ pv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pversion, uri_type)
+ upv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pupver, uri_type)
+
+ cmp = vercmp_string(pv, upv)
+ if cmp == -1:
+ pstatus = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
+ elif cmp == 0:
+ pstatus = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN"
+ else:
+ pstatus = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
+
+ return pupver, pstatus
+
"""initialize log files."""
logpath = d.getVar('LOG_DIR')
@@ -313,34 +345,7 @@ python do_checkpkg() {
psrcuri = localdata.getVar('SRC_URI')
maintainer = localdata.getVar('RECIPE_MAINTAINER')
- """ Get upstream version version """
- pupver = ""
- pstatus = ""
-
- try:
- uv = oe.recipeutils.get_recipe_upstream_version(localdata)
-
- pupver = uv['version']
- except Exception as e:
- if e is FetchError:
- pstatus = "ErrAccess"
- elif e is NoMethodError:
- pstatus = "ErrUnsupportedProto"
- else:
- pstatus = "ErrUnknown"
-
- """Set upstream version status"""
- if not pupver:
- pupver = "N/A"
- else:
- pv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pversion, uri_type)
- upv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pupver, uri_type)
-
- cmp = vercmp_string(pv, upv)
- if cmp == -1:
- pstatus = "UPDATE"
- elif cmp == 0:
- pstatus = "MATCH"
+ pupver, pstatus = get_upstream_version_and_status()
if psrcuri:
psrcuri = psrcuri.split()[0]
diff --git a/import-layers/yocto-poky/meta/classes/distrooverrides.bbclass b/import-layers/yocto-poky/meta/classes/distrooverrides.bbclass
new file mode 100644
index 000000000..9f4db0d77
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/distrooverrides.bbclass
@@ -0,0 +1,32 @@
+# Turns certain DISTRO_FEATURES into overrides with the same
+# name plus a df- prefix. Ensures that these special
+# distro features remain set also for native and nativesdk
+# recipes, so that these overrides can also be used there.
+#
+# This makes it simpler to write .bbappends that only change the
+# task signatures of the recipe if the change is really enabled,
+# for example with:
+# do_install_append_df-my-feature () { ... }
+# where "my-feature" is a DISTRO_FEATURE.
+#
+# The class is meant to be used in a layer.conf or distro
+# .inc file with:
+# INHERIT += "distrooverrides"
+# DISTRO_FEATURES_OVERRIDES += "my-feature"
+#
+# Beware that this part of OVERRIDES changes during parsing, so usage
+# of these overrides should be limited to .bb and .bbappend files,
+# because then DISTRO_FEATURES is final.
+
+DISTRO_FEATURES_OVERRIDES ?= ""
+DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
+Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
+
+DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}"
+
+# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
+# signature because of this line, then the task dependency on
+# OVERRIDES itself should be fixed. Excluding these two variables
+# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
+DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"
diff --git a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
index d64af6a9c..65dd13ddc 100644
--- a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
@@ -29,6 +29,12 @@ EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
python () {
externalsrc = d.getVar('EXTERNALSRC')
+ externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
+
+ if externalsrc and not externalsrc.startswith("/"):
+ bb.error("EXTERNALSRC must be an absolute path")
+ if externalsrcbuild and not externalsrcbuild.startswith("/"):
+ bb.error("EXTERNALSRC_BUILD must be an absolute path")
# If this is the base recipe and EXTERNALSRC is set for it or any of its
# derivatives, then enable BB_DONT_CACHE to force the recipe to always be
@@ -48,7 +54,6 @@ python () {
if externalsrc:
d.setVar('S', externalsrc)
- externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
@@ -167,6 +172,7 @@ do_buildclean[nostamp] = "1"
do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
externalsrc_do_buildclean() {
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
+ rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
oe_runmake clean || die "make failed"
else
bbnote "nothing to do - no makefile found"
@@ -179,14 +185,20 @@ def srctree_hash_files(d, srcdir=None):
import tempfile
s_dir = srcdir or d.getVar('EXTERNALSRC')
- git_dir = os.path.join(s_dir, '.git')
- oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+ git_dir = None
+
+ try:
+ git_dir = os.path.join(s_dir,
+ subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir']).decode("utf-8").rstrip())
+ except subprocess.CalledProcessError:
+ pass
ret = " "
- if os.path.exists(git_dir):
- with tempfile.NamedTemporaryFile(dir=git_dir, prefix='oe-devtool-index') as tmp_index:
+ if git_dir is not None:
+ oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+ with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
# Clone index
- shutil.copy2(os.path.join(git_dir, 'index'), tmp_index.name)
+ shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
# Update our custom index
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
diff --git a/import-layers/yocto-poky/meta/classes/gettext.bbclass b/import-layers/yocto-poky/meta/classes/gettext.bbclass
index 0be14246b..da68e6324 100644
--- a/import-layers/yocto-poky/meta/classes/gettext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gettext.bbclass
@@ -13,7 +13,12 @@ def gettext_oeconf(d):
return '--disable-nls'
return "--enable-nls"
-DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
+DEPENDS_GETTEXT ??= "gettext-native"
-BASEDEPENDS =+ "${@gettext_dependencies(d)}"
+BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
+
+# Without this, msgfmt from gettext-native will not find ITS files
+# provided by target recipes (for example, polkit.its).
+GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
+export GETTEXTDATADIRS
diff --git a/import-layers/yocto-poky/meta/classes/gnomebase.bbclass b/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
index 54aa45f17..4ccc8e078 100644
--- a/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
@@ -14,6 +14,8 @@ FILES_${PN} += "${datadir}/application-registry \
${datadir}/polkit* \
${datadir}/GConf \
${datadir}/glib-2.0/schemas \
+ ${datadir}/appdata \
+ ${datadir}/icons \
"
FILES_${PN}-doc += "${datadir}/devhelp"
diff --git a/import-layers/yocto-poky/meta/classes/go.bbclass b/import-layers/yocto-poky/meta/classes/go.bbclass
index 85f71a2e9..09b01a84c 100644
--- a/import-layers/yocto-poky/meta/classes/go.bbclass
+++ b/import-layers/yocto-poky/meta/classes/go.bbclass
@@ -1,77 +1,182 @@
-inherit goarch
+inherit goarch ptest
-# x32 ABI is not supported on go compiler so far
-COMPATIBLE_HOST_linux-gnux32 = "null"
-# ppc32 is not supported in go compilers
-COMPATIBLE_HOST_powerpc = "null"
+def get_go_parallel_make(d):
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ # because they might have a different meaning in golang
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return '-p %d' % int(v)
+
+ return ""
+
+GO_PARALLEL_BUILD ?= "${@get_go_parallel_make(d)}"
GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
-GOROOT = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
-GOBIN_FINAL_class-native = "${GOROOT_FINAL}/bin"
-GOBIN_FINAL = "${GOROOT_FINAL}/bin/${GOOS}_${GOARCH}"
-
-export GOOS = "${TARGET_GOOS}"
-export GOARCH = "${TARGET_GOARCH}"
-export GOARM = "${TARGET_GOARM}"
-export CGO_ENABLED = "1"
+GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
+GOROOT = "${STAGING_LIBDIR}/go"
export GOROOT
-export GOROOT_FINAL = "${libdir}/${TARGET_SYS}/go"
-export GOBIN_FINAL
-export GOPKG_FINAL = "${GOROOT_FINAL}/pkg/${GOOS}_${GOARCH}"
-export GOSRC_FINAL = "${GOROOT_FINAL}/src"
-export GO_GCFLAGS = "${TARGET_CFLAGS}"
-export GO_LDFLAGS = "${TARGET_LDFLAGS}"
-export CGO_CFLAGS = "${TARGET_CC_ARCH}${TOOLCHAIN_OPTIONS} ${TARGET_CFLAGS}"
-export CGO_CPPFLAGS = "${TARGET_CPPFLAGS}"
-export CGO_CXXFLAGS = "${TARGET_CC_ARCH}${TOOLCHAIN_OPTIONS} ${TARGET_CXXFLAGS}"
-export CGO_LDFLAGS = "${TARGET_CC_ARCH}${TOOLCHAIN_OPTIONS} ${TARGET_LDFLAGS}"
-
-DEPENDS += "go-cross-${TARGET_ARCH}"
-DEPENDS_class-native += "go-native"
-
-FILES_${PN}-staticdev += "${GOSRC_FINAL}/${GO_IMPORT}"
-FILES_${PN}-staticdev += "${GOPKG_FINAL}/${GO_IMPORT}*"
+export GOROOT_FINAL = "${libdir}/go"
+
+DEPENDS_GOLANG_class-target = "virtual/${TARGET_PREFIX}go virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG_class-native = "go-native"
+DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+
+DEPENDS_append = " ${DEPENDS_GOLANG}"
+
+GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
+GO_LINKMODE ?= ""
+GO_LINKMODE_class-nativesdk = "--linkmode=external"
+GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
+export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS}"
+export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
+export GOPTESTFLAGS ?= "-test.v"
+GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
+
+export GO = "${HOST_PREFIX}go"
+GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
+GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
+export GOTOOLDIR
+
+SECURITY_CFLAGS = "${SECURITY_NOPIE_CFLAGS}"
+SECURITY_LDFLAGS = ""
+
+export CGO_ENABLED ?= "1"
+export CGO_CFLAGS ?= "${CFLAGS}"
+export CGO_CPPFLAGS ?= "${CPPFLAGS}"
+export CGO_CXXFLAGS ?= "${CXXFLAGS}"
+export CGO_LDFLAGS ?= "${LDFLAGS}"
GO_INSTALL ?= "${GO_IMPORT}/..."
+GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
+
+B = "${WORKDIR}/build"
+export GOPATH = "${B}"
+GO_TMPDIR ?= "${WORKDIR}/go-tmp"
+GO_TMPDIR[vardepvalue] = ""
+
+python go_do_unpack() {
+ src_uri = (d.getVar('SRC_URI') or "").split()
+ if len(src_uri) == 0:
+ return
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ for url in fetcher.urls:
+ if fetcher.ud[url].type == 'git':
+ if fetcher.ud[url].parm.get('destsuffix') is None:
+ s_dirname = os.path.basename(d.getVar('S'))
+ fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src',
+ d.getVar('GO_IMPORT')) + '/'
+ fetcher.unpack(d.getVar('WORKDIR'))
+ except bb.fetch2.BBFetchException as e:
+ raise bb.build.FuncFailed(e)
+}
-do_go_compile() {
- GOPATH=${S}:${STAGING_LIBDIR}/${TARGET_SYS}/go go env
+go_list_packages() {
+ ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
+ egrep -v '${GO_INSTALL_FILTEROUT}'
+}
+
+go_list_package_tests() {
+ ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
+ grep -v '\[\]$' | \
+ egrep -v '${GO_INSTALL_FILTEROUT}' | \
+ awk '{ print $1 }'
+}
+
+go_do_configure() {
+ ln -snf ${S}/src ${B}/
+}
+
+go_do_compile() {
+ export TMPDIR="${GO_TMPDIR}"
+ ${GO} env
if [ -n "${GO_INSTALL}" ]; then
- GOPATH=${S}:${STAGING_LIBDIR}/${TARGET_SYS}/go go install -v ${GO_INSTALL}
+ ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
fi
}
+do_compile[dirs] =+ "${GO_TMPDIR}"
+do_compile[cleandirs] = "${B}/bin ${B}/pkg"
-do_go_install() {
- rm -rf ${WORKDIR}/staging
- install -d ${WORKDIR}/staging${GOROOT_FINAL} ${D}${GOROOT_FINAL}
- tar -C ${S} -cf - . | tar -C ${WORKDIR}/staging${GOROOT_FINAL} -xpvf -
-
- find ${WORKDIR}/staging${GOROOT_FINAL} \( \
- -name \*.indirectionsymlink -o \
- -name .git\* -o \
- -name .hg -o \
- -name .svn -o \
- -name .pc\* -o \
- -name patches\* \
- \) -print0 | \
- xargs -r0 rm -rf
-
- tar -C ${WORKDIR}/staging${GOROOT_FINAL} -cf - . | \
- tar -C ${D}${GOROOT_FINAL} -xpvf -
-
- chown -R root:root "${D}${GOROOT_FINAL}"
-
- if [ -e "${D}${GOBIN_FINAL}" ]; then
- install -d -m 0755 "${D}${bindir}"
- find "${D}${GOBIN_FINAL}" ! -type d -print0 | xargs -r0 mv --target-directory="${D}${bindir}"
- rmdir -p "${D}${GOBIN_FINAL}" || true
- fi
+do_compile_ptest() {
+ export TMPDIR="${GO_TMPDIR}"
+ rm -f ${B}/.go_compiled_tests.list
+ go_list_package_tests | while read pkg; do
+ cd ${B}/src/$pkg
+ ${GO} test ${GOPTESTBUILDFLAGS} $pkg
+ find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
+ sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
+ done
}
+do_compile_ptest_base[dirs] =+ "${GO_TMPDIR}"
+
+go_do_install() {
+ install -d ${D}${libdir}/go/src/${GO_IMPORT}
+ tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' . | \
+ tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
+ tar -C ${B} -cf - pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
-do_compile() {
- do_go_compile
+ if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ install -d ${D}${bindir}
+ install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
+ fi
}
-do_install() {
- do_go_install
+do_install_ptest_base() {
+set -x
+ test -f "${B}/.go_compiled_tests.list" || exit 0
+ tests=""
+ while read test; do
+ tests="$tests${tests:+ }${test%.test}"
+ testdir=`dirname $test`
+ install -d ${D}${PTEST_PATH}/$testdir
+ install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
+ if [ -d "${B}/src/$testdir/testdata" ]; then
+ cp --preserve=mode,timestamps -R "${B}/src/$testdir/testdata" ${D}${PTEST_PATH}/$testdir
+ fi
+ done < ${B}/.go_compiled_tests.list
+ if [ -n "$tests" ]; then
+ install -d ${D}${PTEST_PATH}
+ cat >${D}${PTEST_PATH}/run-ptest <<EOF
+#!/bin/sh
+ANYFAILED=0
+for t in $tests; do
+ testdir=\`dirname \$t.test\`
+ if ( cd "${PTEST_PATH}/\$testdir"; "${PTEST_PATH}/\$t.test" ${GOPTESTFLAGS} | tee /dev/fd/9 | grep -q "^FAIL" ) 9>&1; then
+ ANYFAILED=1
+ fi
+done
+if [ \$ANYFAILED -ne 0 ]; then
+ echo "FAIL: ${PN}"
+ exit 1
+fi
+echo "PASS: ${PN}"
+exit 0
+EOF
+ chmod +x ${D}${PTEST_PATH}/run-ptest
+ else
+ rm -rf ${D}${PTEST_PATH}
+ fi
+set +x
}
+
+EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
+
+FILES_${PN}-dev = "${libdir}/go/src"
+FILES_${PN}-staticdev = "${libdir}/go/pkg"
+
+INSANE_SKIP_${PN} += "ldflags"
+INSANE_SKIP_${PN}-ptest += "ldflags"
diff --git a/import-layers/yocto-poky/meta/classes/goarch.bbclass b/import-layers/yocto-poky/meta/classes/goarch.bbclass
index 12df88f8c..663c9ffc3 100644
--- a/import-layers/yocto-poky/meta/classes/goarch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/goarch.bbclass
@@ -1,15 +1,37 @@
-BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS', True), d)}"
-BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH', True), d)}"
+BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
+BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
-HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS', True), d)}"
-HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH', True), d)}"
-HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH', True), d.getVar('TUNE_FEATURES', True), d)}"
+HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
+HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
+HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
-TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS', True), d)}"
-TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH', True), d)}"
-TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True), d)}"
+TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
+TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
+TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
-GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE',True) == d.getVar('HOST_GOTUPLE',True)]}"
+GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
+
+# Go supports dynamic linking on a limited set of architectures.
+# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
+GO_DYNLINK = ""
+GO_DYNLINK_arm = "1"
+GO_DYNLINK_aarch64 = "1"
+GO_DYNLINK_x86 = "1"
+GO_DYNLINK_x86-64 = "1"
+GO_DYNLINK_powerpc64 = "1"
+GO_DYNLINK_class-native = ""
+
+# define here because everybody inherits this class
+#
+COMPATIBLE_HOST_linux-gnux32 = "null"
+COMPATIBLE_HOST_linux-muslx32 = "null"
+COMPATIBLE_HOST_powerpc = "null"
+COMPATIBLE_HOST_powerpc64 = "null"
+COMPATIBLE_HOST_mipsarchn32 = "null"
+ARM_INSTRUCTION_SET = "arm"
+TUNE_CCARGS_remove = "-march=mips32r2"
def go_map_arch(a, d):
import re
@@ -21,14 +43,14 @@ def go_map_arch(a, d):
return 'arm'
elif re.match('aarch64.*', a):
return 'arm64'
- elif re.match('mips64el*', a):
+ elif re.match('mips64el.*', a):
return 'mips64le'
- elif re.match('mips64*', a):
+ elif re.match('mips64.*', a):
return 'mips64'
- elif re.match('mipsel*', a):
- return 'mipsle'
- elif re.match('mips*', a):
+ elif a == 'mips':
return 'mips'
+ elif a == 'mipsel':
+ return 'mipsle'
elif re.match('p(pc|owerpc)(64)', a):
return 'ppc64'
elif re.match('p(pc|owerpc)(64el)', a):
@@ -43,6 +65,17 @@ def go_map_arm(a, f, d):
return '7'
elif 'armv6' in f:
return '6'
+ elif 'armv5' in f:
+ return '5'
+ return ''
+
+def go_map_386(a, f, d):
+ import re
+ if re.match('i.86', a):
+ if ('core2' in f) or ('corei7' in f):
+ return 'sse2'
+ else:
+ return '387'
return ''
def go_map_os(o, d):
diff --git a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
index df7fe18a7..610479b85 100644
--- a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
+++ b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
@@ -17,7 +17,6 @@
# ${GRUB_ROOT} - grub's root device.
do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
-do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy"
GRUB_SERIAL ?= "console=ttyS0,115200"
GRUB_CFG_VM = "${S}/grub_vm.cfg"
diff --git a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
index 0ae2729c0..5201c7151 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
@@ -48,6 +48,7 @@ do_compile_prepend_class-target () {
# which may then get deleted (or their dependencies) and potentially segfault
export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
+GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
if [ -d ".libs" ]; then
diff --git a/import-layers/yocto-poky/meta/classes/icecc.bbclass b/import-layers/yocto-poky/meta/classes/icecc.bbclass
index 77bf61133..1cc1c4ddb 100644
--- a/import-layers/yocto-poky/meta/classes/icecc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/icecc.bbclass
@@ -116,7 +116,7 @@ def use_icecc(bb,d):
# for one reason or the other
# this is the old list (which doesn't seem to be valid anymore, because I was able to build
# all these with icecc enabled)
- # system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
+ # system_package_blacklist = [ "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
# when adding new entry, please document why (how it failed) so that we can re-evaluate it later
# e.g. when there is new version
# building libgcc-initial with icecc fails with CPP sanity check error if host sysroot contains cross gcc built for another target tune/variant
diff --git a/import-layers/yocto-poky/meta/classes/image-live.bbclass b/import-layers/yocto-poky/meta/classes/image-live.bbclass
index a3d1b4e56..1623c1598 100644
--- a/import-layers/yocto-poky/meta/classes/image-live.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-live.bbclass
@@ -34,20 +34,21 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
${@oe.utils.ifelse(d.getVar('COMPRESSISO', False),'zisofs-tools-native:do_populate_sysroot','')} \
- ${PN}:do_image_ext4 \
+ ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
"
LABELS_LIVE ?= "boot install"
ROOT_LIVE ?= "root=/dev/ram0"
-INITRD_IMAGE_LIVE ?= "core-image-minimal-initramfs"
+INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
-ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.ext4"
+LIVE_ROOTFS_TYPE ?= "ext4"
+ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_live = "ext4"
-IMAGE_TYPEDEP_iso = "ext4"
-IMAGE_TYPEDEP_hddimg = "ext4"
+IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
IMAGE_TYPES_MASKED += "live hddimg iso"
python() {
@@ -91,7 +92,7 @@ build_iso() {
for fs in ${INITRD}
do
if [ ! -s "$fs" ]; then
- bbnote "ISO image will not be created. $fs is invalid."
+ bbwarn "ISO image will not be created. $fs is invalid."
return
fi
done
@@ -216,10 +217,10 @@ build_fat_img() {
fi
if [ -z "${HDDIMG_ID}" ]; then
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
${BLOCKS}
else
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+ mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
${BLOCKS} -i ${HDDIMG_ID}
fi
diff --git a/import-layers/yocto-poky/meta/classes/image-prelink.bbclass b/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
index 4157df021..f3bb68b9e 100644
--- a/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
@@ -1,6 +1,6 @@
do_rootfs[depends] += "prelink-native:do_populate_sysroot"
-IMAGE_PREPROCESS_COMMAND += "prelink_setup; prelink_image; "
+IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
python prelink_setup () {
oe.utils.write_ld_so_conf(d)
@@ -36,7 +36,17 @@ prelink_image () {
dynamic_loader=$(linuxloader)
# prelink!
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+ if [ "$BUILD_REPRODUCIBLE_BINARIES" = "1" ]; then
+ bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
+ else
+ export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
+ fi
+ ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+ else
+ ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
+ fi
# Remove the prelink.conf if we had to add it.
if [ "$dummy_prelink_conf" = "true" ]; then
diff --git a/import-layers/yocto-poky/meta/classes/image-vm.bbclass b/import-layers/yocto-poky/meta/classes/image-vm.bbclass
deleted file mode 100644
index 98bd92000..000000000
--- a/import-layers/yocto-poky/meta/classes/image-vm.bbclass
+++ /dev/null
@@ -1,171 +0,0 @@
-# image-vm.bbclass
-# (loosly based off image-live.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
-#
-# Create an image which can be placed directly onto a harddisk using dd and then
-# booted.
-#
-# This uses syslinux. extlinux would have been nice but required the ext2/3
-# partition to be mounted. grub requires to run itself as part of the install
-# process.
-#
-# The end result is a 512 boot sector populated with an MBR and partition table
-# followed by an msdos fat16 partition containing syslinux and a linux kernel
-# completed by the ext2/3 rootfs.
-#
-# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
-# won't touch fat12 partitions.
-
-inherit live-vm-common
-
-do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
- virtual/kernel:do_deploy \
- syslinux:do_populate_sysroot \
- syslinux-native:do_populate_sysroot \
- parted-native:do_populate_sysroot \
- mtools-native:do_populate_sysroot \
- ${PN}:do_image_${VM_ROOTFS_TYPE} \
- "
-
-IMAGE_TYPEDEP_vmdk = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_vdi = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_qcow2 = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_hdddirect = "${VM_ROOTFS_TYPE}"
-IMAGE_TYPES_MASKED += "vmdk vdi qcow2 hdddirect"
-
-VM_ROOTFS_TYPE ?= "ext4"
-ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${VM_ROOTFS_TYPE}"
-
-# Used by bootloader
-LABELS_VM ?= "boot"
-ROOT_VM ?= "root=/dev/sda2"
-# Using an initramfs is optional. Enable it by setting INITRD_IMAGE_VM.
-INITRD_IMAGE_VM ?= ""
-INITRD_VM ?= "${@'${IMGDEPLOYDIR}/${INITRD_IMAGE_VM}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE_VM}' else ''}"
-do_bootdirectdisk[depends] += "${@'${INITRD_IMAGE_VM}:do_image_complete' if '${INITRD_IMAGE_VM}' else ''}"
-
-BOOTDD_VOLUME_ID ?= "boot"
-BOOTDD_EXTRA_SPACE ?= "16384"
-
-DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
-DISK_SIGNATURE[vardepsexclude] = "DISK_SIGNATURE_GENERATED"
-
-build_boot_dd() {
- HDDDIR="${S}/hdd/boot"
- HDDIMG="${S}/hdd.image"
- IMAGE=${IMGDEPLOYDIR}/${IMAGE_NAME}.hdddirect
-
- populate_kernel $HDDDIR
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_hddimg_populate $HDDDIR
- fi
- if [ "${EFI}" = "1" ]; then
- efi_hddimg_populate $HDDDIR
- fi
-
- BLOCKS=`du -bks $HDDDIR | cut -f 1`
- BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
-
- # Remove it since mkdosfs would fail when it exists
- rm -f $HDDIMG
- mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
- mcopy -i $HDDIMG -s $HDDDIR/* ::/
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_hdddirect_install $HDDIMG
- fi
- chmod 644 $HDDIMG
-
- ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
- TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
- END1=`expr $BLOCKS \* 1024`
- END2=`expr $END1 + 512`
- END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
-
- echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
- rm -rf $IMAGE
- dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
-
- parted $IMAGE mklabel msdos
- parted $IMAGE mkpart primary fat16 0 ${END1}B
- parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
- parted $IMAGE set 1 boot on
-
- parted $IMAGE print
-
- awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | sed 's/\(..\)\(..\)\(..\)\(..\)/\\x\4\\x\3\\x\2\\x\1/')\" }" | \
- dd of=$IMAGE bs=1 seek=440 conv=notrunc
-
- OFFSET=`expr $END2 / 512`
- if [ "${PCBIOS}" = "1" ]; then
- dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
- fi
-
- dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
- dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
-
- cd ${IMGDEPLOYDIR}
-
- ln -sf ${IMAGE_NAME}.hdddirect ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect
-}
-
-python do_bootdirectdisk() {
- validate_disk_signature(d)
- set_live_vm_vars(d, 'VM')
- if d.getVar("PCBIOS") == "1":
- bb.build.exec_func('build_syslinux_cfg', d)
- if d.getVar("EFI") == "1":
- bb.build.exec_func('build_efi_cfg', d)
- bb.build.exec_func('build_boot_dd', d)
-}
-
-def generate_disk_signature():
- import uuid
-
- signature = str(uuid.uuid4())[:8]
-
- if signature != '00000000':
- return signature
- else:
- return 'ffffffff'
-
-def validate_disk_signature(d):
- import re
-
- disk_signature = d.getVar("DISK_SIGNATURE")
-
- if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
- bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
-
-DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
-
-run_qemu_img (){
- type="$1"
- qemu-img convert -O $type ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.hdddirect ${IMGDEPLOYDIR}/${IMAGE_NAME}.$type
-
- ln -sf ${IMAGE_NAME}.$type ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.$type
-}
-create_vmdk_image () {
- run_qemu_img vmdk
-}
-
-create_vdi_image () {
- run_qemu_img vdi
-}
-
-create_qcow2_image () {
- run_qemu_img qcow2
-}
-
-python do_vmimg() {
- if 'vmdk' in d.getVar('IMAGE_FSTYPES'):
- bb.build.exec_func('create_vmdk_image', d)
- if 'vdi' in d.getVar('IMAGE_FSTYPES'):
- bb.build.exec_func('create_vdi_image', d)
- if 'qcow2' in d.getVar('IMAGE_FSTYPES'):
- bb.build.exec_func('create_qcow2_image', d)
-}
-
-addtask bootdirectdisk before do_vmimg
-addtask vmimg after do_bootdirectdisk before do_image_complete
-do_vmimg[depends] += "qemu-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/image.bbclass b/import-layers/yocto-poky/meta/classes/image.bbclass
index 4bcfb87c9..d88ce5c07 100644
--- a/import-layers/yocto-poky/meta/classes/image.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image.bbclass
@@ -9,7 +9,7 @@ TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
-LICENSE = "MIT"
+LICENSE ?= "MIT"
PACKAGES = ""
DEPENDS += "${MLPREFIX}qemuwrapper-cross depmodwrapper-cross"
RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
@@ -33,7 +33,7 @@ ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
# These packages will be removed from a read-only rootfs after all other
# packages have been installed
-ROOTFS_RO_UNNEEDED = "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
+ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
# packages to install from features
FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
@@ -85,7 +85,6 @@ PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
-LDCONFIGDEPEND_libc-uclibc = ""
LDCONFIGDEPEND_libc-musl = ""
# This is needed to have depmod data in PKGDATA_DIR,
@@ -118,7 +117,7 @@ def rootfs_variables(d):
'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
- 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY']
+ 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS']
variables.extend(rootfs_command_variables(d))
variables.extend(variable_depends(d))
return " ".join(variables)
@@ -139,9 +138,6 @@ def build_live(d):
IMAGE_TYPE_live = "${@build_live(d)}"
inherit ${IMAGE_TYPE_live}
-IMAGE_TYPE_vm = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2", "hdddirect"], "image-vm", "", d)}'
-inherit ${IMAGE_TYPE_vm}
-
IMAGE_TYPE_container = '${@bb.utils.contains("IMAGE_FSTYPES", "container", "image-container", "", d)}'
inherit ${IMAGE_TYPE_container}
@@ -149,14 +145,18 @@ IMAGE_TYPE_wic = "image_types_wic"
inherit ${IMAGE_TYPE_wic}
python () {
+ def extraimage_getdepends(task):
+ deps = ""
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
+ deps += " %s:%s" % (dep, task)
+ return deps
+
+ d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_lic'))
+ d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
+
deps = " " + imagetypes_getdepends(d)
d.appendVarFlag('do_rootfs', 'depends', deps)
- deps = ""
- for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
- deps += " %s:do_populate_sysroot" % dep
- d.appendVarFlag('do_image_complete', 'depends', deps)
-
#process IMAGE_FEATURES, we must do this before runtime_mapping_rename
#Check for replaces image features
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
@@ -254,6 +254,7 @@ fakeroot python do_rootfs () {
progress_reporter.next_stage()
# generate rootfs
+ d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
progress_reporter.finish()
@@ -261,18 +262,19 @@ fakeroot python do_rootfs () {
do_rootfs[dirs] = "${TOPDIR}"
do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
do_rootfs[umask] = "022"
-addtask rootfs before do_build after do_prepare_recipe_sysroot
+addtask rootfs after do_prepare_recipe_sysroot
fakeroot python do_image () {
from oe.utils import execute_pre_post_process
+ d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
execute_pre_post_process(d, pre_process_cmds)
}
do_image[dirs] = "${TOPDIR}"
do_image[umask] = "022"
-addtask do_image after do_rootfs before do_build
+addtask do_image after do_rootfs
fakeroot python do_image_complete () {
from oe.utils import execute_pre_post_process
@@ -289,14 +291,21 @@ do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
do_image_complete[stamp-extra-info] = "${MACHINE}"
addtask do_image_complete after do_image before do_build
+python do_image_complete_setscene () {
+ sstate_setscene(d)
+}
+addtask do_image_complete_setscene
# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
#
# IMAGE_QA_COMMANDS += " \
# image_check_everything_ok \
# "
-# This task runs all functions in IMAGE_QA_COMMANDS after the image
+# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
# construction has completed in order to validate the resulting image.
+#
+# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
+# directory, which if QA passes will be the basis for the images.
fakeroot python do_image_qa () {
from oe.utils import ImageQAFailed
@@ -318,7 +327,16 @@ fakeroot python do_image_qa () {
imgname = d.getVar('IMAGE_NAME')
bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
}
-addtask do_image_qa after do_image_complete before do_build
+addtask do_image_qa after do_rootfs before do_image
+
+SSTATETASKS += "do_image_qa"
+SSTATE_SKIP_CREATION_task-image-qa = '1'
+do_image_qa[sstate-inputdirs] = ""
+do_image_qa[sstate-outputdirs] = ""
+python do_image_qa_setscene () {
+ sstate_setscene(d)
+}
+addtask do_image_qa_setscene
def setup_debugfs_variables(d):
d.appendVar('IMAGE_ROOTFS', '-dbg')
@@ -426,7 +444,11 @@ python () {
# Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
localdata.setVar('PV', d.getVar('PV'))
localdata.delVar('DATETIME')
+ localdata.delVar('DATE')
localdata.delVar('TMPDIR')
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ for dep in vardepsexclude:
+ localdata.delVar(dep)
image_cmd = localdata.getVar("IMAGE_CMD")
vardeps.add('IMAGE_CMD_' + realt)
@@ -480,19 +502,20 @@ python () {
for dep in typedeps[t]:
after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
- t = t.replace("-", "_").replace(".", "_")
+ task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
+
+ d.setVar(task, '\n'.join(cmds))
+ d.setVarFlag(task, 'func', '1')
+ d.setVarFlag(task, 'fakeroot', '1')
- d.setVar('do_image_%s' % t, '\n'.join(cmds))
- d.setVarFlag('do_image_%s' % t, 'func', '1')
- d.setVarFlag('do_image_%s' % t, 'fakeroot', '1')
- d.setVarFlag('do_image_%s' % t, 'prefuncs', debug + 'set_image_size')
- d.setVarFlag('do_image_%s' % t, 'postfuncs', 'create_symlinks')
- d.setVarFlag('do_image_%s' % t, 'subimages', ' '.join(subimages))
- d.appendVarFlag('do_image_%s' % t, 'vardeps', ' '.join(vardeps))
- d.appendVarFlag('do_image_%s' % t, 'vardepsexclude', 'DATETIME')
+ d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
+ d.prependVarFlag(task, 'postfuncs', ' create_symlinks')
+ d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
+ d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
+ d.appendVarFlag(task, 'vardepsexclude', 'DATETIME DATE ' + ' '.join(vardepsexclude))
- bb.debug(2, "Adding type %s before %s, after %s" % (t, 'do_image_complete', after))
- bb.build.addtask('do_image_%s' % t, 'do_image_complete', after, d)
+ bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
+ bb.build.addtask(task, 'do_image_complete', after, d)
}
#
@@ -598,3 +621,46 @@ do_package_write_ipk[noexec] = "1"
do_package_write_deb[noexec] = "1"
do_package_write_rpm[noexec] = "1"
+# Prepare the root links to point to the /usr counterparts.
+create_merged_usr_symlinks() {
+ root="$1"
+ install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
+ lnr $root${base_bindir} $root/bin
+ lnr $root${base_sbindir} $root/sbin
+ lnr $root${base_libdir} $root/${baselib}
+
+ if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
+ install -d $root${nonarch_base_libdir}
+ lnr $root${nonarch_base_libdir} $root/lib
+ fi
+
+ # create base links for multilibs
+ multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
+ for d in $multi_libdirs; do
+ install -d $root${exec_prefix}/$d
+ lnr $root${exec_prefix}/$d $root/$d
+ done
+}
+
+create_merged_usr_symlinks_rootfs() {
+ create_merged_usr_symlinks ${IMAGE_ROOTFS}
+}
+
+create_merged_usr_symlinks_sdk() {
+ create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
+}
+
+ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
+POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
+
+reproducible_final_image_task () {
+ if [ "$BUILD_REPRODUCIBLE_BINARIES" = "1" ]; then
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`git log -1 --pretty=%ct`
+ fi
+ # Set mtime of all files to a reproducible value
+ bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
+ find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
+ fi
+}
+IMAGE_PREPROCESS_COMMAND_append = " reproducible_final_image_task; "
diff --git a/import-layers/yocto-poky/meta/classes/image_types.bbclass b/import-layers/yocto-poky/meta/classes/image_types.bbclass
index 8db18ac5a..e881d0cc2 100644
--- a/import-layers/yocto-poky/meta/classes/image_types.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types.bbclass
@@ -26,20 +26,31 @@ def imagetypes_getdepends(d):
fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
+ deprecated = set()
deps = set()
for typestring in fstypes:
basetype, resttypes = split_types(typestring)
- adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype) , deps)
+
+ var = "IMAGE_DEPENDS_%s" % basetype
+ if d.getVar(var) is not None:
+ deprecated.add(var)
for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
base, rest = split_types(typedepends)
- adddep(d.getVar('IMAGE_DEPENDS_%s' % base) , deps)
resttypes += rest
+ var = "IMAGE_DEPENDS_%s" % base
+ if d.getVar(var) is not None:
+ deprecated.add(var)
+
for ctype in resttypes:
adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
+ if deprecated:
+ bb.fatal('Deprecated variable(s) found: "%s". '
+ 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
+
# Sort the set so that ordering is consistant
return " ".join(sorted(deps))
@@ -72,7 +83,11 @@ oe_mkext234fs () {
eval COUNT=\"$MIN_COUNT\"
fi
# Create a sparse image block
+ bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
+ bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
+ bbdebug 1 "Actual Partion size: `ls -s ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
+ bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
# Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
@@ -89,26 +104,28 @@ IMAGE_CMD_btrfs () {
size=${MIN_BTRFS_SIZE}
bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
fi
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${size} bs=1024
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
}
IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
# By default, tar from the host is used, which can be quite old. If
# you need special parameters (like --xattrs) which are only supported
# by GNU tar upstream >= 1.27, then override that default:
# IMAGE_CMD_TAR = "tar --xattrs --xattrs-include=*"
-# IMAGE_DEPENDS_tar_append = " tar-replacement-native"
+# do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot"
# EXTRANATIVEPATH += "tar-native"
#
# The GNU documentation does not specify whether --xattrs-include is necessary.
# In practice, it turned out to be not needed when creating archives and
# required when extracting, but it seems prudent to use it in both cases.
IMAGE_CMD_TAR ?= "tar"
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} ."
+# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
@@ -135,7 +152,7 @@ ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
IMAGE_CMD_elf () {
test -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
- mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.cpio.gz --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
+ mkelfImage --kernel=${ELF_KERNEL} --initrd=${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.cpio.gz --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
}
IMAGE_TYPEDEP_elf = "cpio.gz"
@@ -145,6 +162,12 @@ UBI_VOLNAME ?= "${MACHINE}-rootfs"
multiubi_mkfs() {
local mkubifs_args="$1"
local ubinize_args="$2"
+
+ # Added prompt error message for ubi and ubifs image creation.
+ if [ -z "$mkubifs_args"] || [ -z "$ubinize_args" ]; then
+ bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
+ fi
+
if [ -z "$3" ]; then
local vname=""
else
@@ -209,21 +232,20 @@ EXTRA_IMAGECMD_ext4 ?= "-i 4096"
EXTRA_IMAGECMD_btrfs ?= "-n 4096"
EXTRA_IMAGECMD_elf ?= ""
-IMAGE_DEPENDS = ""
-IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
-IMAGE_DEPENDS_cramfs = "util-linux-native"
-IMAGE_DEPENDS_ext2 = "e2fsprogs-native"
-IMAGE_DEPENDS_ext3 = "e2fsprogs-native"
-IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
-IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
-IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
-IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
-IMAGE_DEPENDS_squashfs-lzo = "squashfs-tools-native"
-IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
-IMAGE_DEPENDS_ubi = "mtd-utils-native"
-IMAGE_DEPENDS_ubifs = "mtd-utils-native"
-IMAGE_DEPENDS_multiubi = "mtd-utils-native"
-IMAGE_DEPENDS_wic = "parted-native"
+do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
+do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
+do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
+do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
+do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
+do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_elf[depends] += "virtual/kernel:do_populate_sysroot mkelfimage-native:do_populate_sysroot"
+do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = " \
@@ -235,14 +257,10 @@ IMAGE_TYPES = " \
btrfs \
iso \
hddimg \
- squashfs squashfs-xz squashfs-lzo \
+ squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
ubi ubifs multiubi \
tar tar.gz tar.bz2 tar.xz tar.lz4 \
cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
- vmdk \
- vdi \
- qcow2 \
- hdddirect \
elf \
wic wic.gz wic.bz2 wic.lzma \
container \
@@ -254,9 +272,9 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot ${COMPRESSIONTYPES}"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 ${COMPRESSIONTYPES}"
CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD_gz = "gzip -f -9 -n -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
CONVERSION_CMD_lz4 = "lz4 -9 -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
@@ -272,6 +290,9 @@ CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}
CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
+CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
+CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
+CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
CONVERSION_DEPENDS_lzma = "xz-native"
CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
@@ -282,15 +303,18 @@ CONVERSION_DEPENDS_zip = "zip-native"
CONVERSION_DEPENDS_sum = "mtd-utils-native"
CONVERSION_DEPENDS_bmap = "bmap-tools-native"
CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native"
+CONVERSION_DEPENDS_vmdk = "qemu-native"
+CONVERSION_DEPENDS_vdi = "qemu-native"
+CONVERSION_DEPENDS_qcow2 = "qemu-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
-# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob
-IMAGE_EXTENSION_live = "hddimg iso"
-
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
+
+# bmap requires python3 to be in the PATH
+EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
diff --git a/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass b/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
index 68f251cfd..dcf620cee 100644
--- a/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
@@ -3,7 +3,7 @@
WICVARS ?= "\
BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS TRANSLATED_TARGET_ARCH"
+ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS"
WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
@@ -39,8 +39,19 @@ IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
-do_image_wic[depends] += "wic-tools:do_populate_sysroot"
-WKS_FILE_DEPENDS ??= ''
+do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
+
+# We ensure all artfacts are deployed (e.g virtual/bootloader)
+do_image_wic[recrdeptask] += "do_deploy"
+
+WKS_FILE_DEPENDS_DEFAULT = "syslinux-native bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
+WKS_FILE_DEPENDS_BOOTLOADERS = ""
+WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
+WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
+WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
+
+WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
+
DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
python do_write_wks_template () {
diff --git a/import-layers/yocto-poky/meta/classes/insane.bbclass b/import-layers/yocto-poky/meta/classes/insane.bbclass
index 0c11c3658..0a3b528dd 100644
--- a/import-layers/yocto-poky/meta/classes/insane.bbclass
+++ b/import-layers/yocto-poky/meta/classes/insane.bbclass
@@ -16,13 +16,8 @@
# into exec_prefix
# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
# files under exec_prefix
+# -Check if the package name is upper case
-
-# unsafe-references-in-binaries requires prelink-rtld from
-# prelink-native, but we don't want this DEPENDS for -native builds
-QADEPENDS = "prelink-native"
-QADEPENDS_class-native = ""
-QADEPENDS_class-nativesdk = ""
QA_SANE = "True"
# Elect whether a given type of error is a warning or error, they may
@@ -32,7 +27,7 @@ WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
installed-vs-shipped compile-host-path install-host-path \
pn-overrides infodir build-deps \
unknown-configure-option symlink-to-sysroot multilib \
- invalid-packageconfig host-user-contaminated \
+ invalid-packageconfig host-user-contaminated uppercase-pn \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
@@ -40,6 +35,9 @@ ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
version-going-backwards expanded-d invalid-chars \
license-checksum dev-elf file-rdeps \
"
+# Add usrmerge QA check based on distro feature
+ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
+
FAKEROOT_QA = "host-user-contaminated"
FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
enabled tests are listed here, the do_package_qa task will run under fakeroot."
@@ -103,23 +101,6 @@ def package_qa_get_machine_dict(d):
"microblazeeb":(189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
},
- "linux-uclibc" : {
- "arm" : ( 40, 97, 0, True, 32),
- "armeb": ( 40, 97, 0, False, 32),
- "powerpc": ( 20, 0, 0, False, 32),
- "i386": ( 3, 0, 0, True, 32),
- "i486": ( 3, 0, 0, True, 32),
- "i586": ( 3, 0, 0, True, 32),
- "i686": ( 3, 0, 0, True, 32),
- "x86_64": ( 62, 0, 0, True, 64),
- "mips": ( 8, 0, 0, False, 32),
- "mipsel": ( 8, 0, 0, True, 32),
- "mips64": ( 8, 0, 0, False, 64),
- "mips64el": ( 8, 0, 0, True, 64),
- "avr32": (6317, 0, 0, False, 32),
- "sh4": (42, 0, 0, True, 32),
-
- },
"linux-musl" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
@@ -151,19 +132,12 @@ def package_qa_get_machine_dict(d):
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
- "linux-uclibceabi" : {
- "arm" : (40, 0, 0, True, 32),
- "armeb" : (40, 0, 0, False, 32),
- },
"linux-gnuspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-muslspe" : {
"powerpc": (20, 0, 0, False, 32),
},
- "linux-uclibcspe" : {
- "powerpc": (20, 0, 0, False, 32),
- },
"linux-gnu" : {
"powerpc": (20, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
@@ -171,6 +145,9 @@ def package_qa_get_machine_dict(d):
"linux-gnux32" : {
"x86_64": (62, 0, 0, True, 32),
},
+ "linux-muslx32" : {
+ "x86_64": (62, 0, 0, True, 32),
+ },
"linux-gnun32" : {
"mips64": ( 8, 0, 0, False, 32),
"mips64el": ( 8, 0, 0, True, 32),
@@ -207,12 +184,13 @@ def package_qa_write_error(type, error, d):
f.write("%s: %s [%s]\n" % (p, error, type))
def package_qa_handle_error(error_class, error_msg, d):
- package_qa_write_error(error_class, error_msg, d)
if error_class in (d.getVar("ERROR_QA") or "").split():
+ package_qa_write_error(error_class, error_msg, d)
bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
d.setVar("QA_SANE", False)
return False
elif error_class in (d.getVar("WARN_QA") or "").split():
+ package_qa_write_error(error_class, error_msg, d)
bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
else:
bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
@@ -408,71 +386,6 @@ def package_qa_check_perm(path,name,d, elf, messages):
"""
return
-QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
-def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
- """
- Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
- """
- if unsafe_references_skippable(path, name, d):
- return
-
- if not elf:
- import stat
- import subprocess
- pn = d.getVar('PN')
-
- # Ensure we're checking an executable script
- statinfo = os.stat(path)
- if bool(statinfo.st_mode & stat.S_IXUSR):
- # grep shell scripts for possible references to /exec_prefix/
- exec_prefix = d.getVar('exec_prefix')
- statement = "grep -e '%s/[^ :]\{1,\}/[^ :]\{1,\}' %s > /dev/null" % (exec_prefix, path)
- if subprocess.call(statement, shell=True) == 0:
- error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
- package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
- error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
- package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
-
-def unsafe_references_skippable(path, name, d):
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
- return True
-
- if "-dbg" in name or "-dev" in name:
- return True
-
- # Other package names to skip:
- if name.startswith("kernel-module-"):
- return True
-
- # Skip symlinks
- if os.path.islink(path):
- return True
-
- # Skip unusual rootfs layouts which make these tests irrelevant
- exec_prefix = d.getVar('exec_prefix')
- if exec_prefix == "":
- return True
-
- pkgdest = d.getVar('PKGDEST')
- pkgdest = pkgdest + "/" + name
- pkgdest = os.path.abspath(pkgdest)
- base_bindir = pkgdest + d.getVar('base_bindir')
- base_sbindir = pkgdest + d.getVar('base_sbindir')
- base_libdir = pkgdest + d.getVar('base_libdir')
- bindir = pkgdest + d.getVar('bindir')
- sbindir = pkgdest + d.getVar('sbindir')
- libdir = pkgdest + d.getVar('libdir')
-
- if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
- return True
-
- # Skip files not in base_[bindir|sbindir|libdir]
- path = os.path.abspath(path)
- if not (base_bindir in path or base_sbindir in path or base_libdir in path):
- return True
-
- return False
-
QAPATHTEST[arch] = "package_qa_check_arch"
def package_qa_check_arch(path,name,d, elf, messages):
"""
@@ -509,7 +422,7 @@ def package_qa_check_arch(path,name,d, elf, messages):
# Check the architecture and endiannes of the binary
is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
- (target_os == "linux-gnux32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
+ (target_os == "linux-gnux32" or target_os == "linux-muslx32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
if not ((machine == elf.machine()) or is_32):
package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
(oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
@@ -677,7 +590,7 @@ python populate_lic_qa_checksum() {
sane = package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
srcdir = d.getVar('S')
-
+ corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
for url in lic_files.split():
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
@@ -689,6 +602,9 @@ python populate_lic_qa_checksum() {
package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
continue
+ if (srclicfile == corebase_licensefile):
+ bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
+
recipemd5 = parm.get('md5', '')
beginline, endline = 0, 0
if 'beginline' in parm:
@@ -816,7 +732,7 @@ def package_qa_check_staged(path,d):
return sane
# Run all package-wide warnfuncs and errorfuncs
-def package_qa_package(warnfuncs, errorfuncs, skip, package, d):
+def package_qa_package(warnfuncs, errorfuncs, package, d):
warnings = {}
errors = {}
@@ -832,8 +748,25 @@ def package_qa_package(warnfuncs, errorfuncs, skip, package, d):
return len(errors) == 0
+# Run all recipe-wide warnfuncs and errorfuncs
+def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
+ warnings = {}
+ errors = {}
+
+ for func in warnfuncs:
+ func(pn, d, warnings)
+ for func in errorfuncs:
+ func(pn, d, errors)
+
+ for w in warnings:
+ package_qa_handle_error(w, warnings[w], d)
+ for e in errors:
+ package_qa_handle_error(e, errors[e], d)
+
+ return len(errors) == 0
+
# Walk over all files in a directory and call func
-def package_qa_walk(warnfuncs, errorfuncs, skip, package, d):
+def package_qa_walk(warnfuncs, errorfuncs, package, d):
import oe.qa
#if this will throw an exception, then fix the dict above
@@ -973,8 +906,9 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
(filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
package_qa_handle_error("file-rdeps", error_msg, d)
+package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
-def package_qa_check_deps(pkg, pkgdest, skip, d):
+def package_qa_check_deps(pkg, pkgdest, d):
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', pkg)
@@ -997,6 +931,18 @@ def package_qa_check_deps(pkg, pkgdest, skip, d):
check_valid_deps('RREPLACES')
check_valid_deps('RCONFLICTS')
+QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
+def package_qa_check_usrmerge(pkg, d, messages):
+ pkgdest = d.getVar('PKGDEST')
+ pkg_dir = pkgdest + os.sep + pkg + os.sep
+ merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
+ for f in merged_dirs:
+ if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
+ msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
+ package_qa_add_message(messages, "usrmerge", msg)
+ return False
+ return True
+
QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
def package_qa_check_expanded_d(package, d, messages):
"""
@@ -1070,6 +1016,7 @@ def package_qa_check_host_user(path, name, d, elf, messages):
return False
return True
+
# The PACKAGE FUNC to scan each package
python do_package_qa () {
import subprocess
@@ -1083,7 +1030,7 @@ python do_package_qa () {
package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
logdir = d.getVar('T')
- pkg = d.getVar('PN')
+ pn = d.getVar('PN')
# Check the compile log for host contamination
compilelog = os.path.join(logdir,"log.do_compile")
@@ -1092,7 +1039,7 @@ python do_package_qa () {
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
if subprocess.call(statement, shell=True) == 0:
msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pkg, compilelog)
+ Please check the log '%s' for more information." % (pn, compilelog)
package_qa_handle_error("compile-host-path", msg, d)
# Check the install log for host contamination
@@ -1102,7 +1049,7 @@ python do_package_qa () {
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
if subprocess.call(statement, shell=True) == 0:
msg = "%s: The install log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pkg, installlog)
+ Please check the log '%s' for more information." % (pn, installlog)
package_qa_handle_error("install-host-path", msg, d)
# Scan the packages...
@@ -1131,35 +1078,30 @@ python do_package_qa () {
for dep in taskdepdata:
taskdeps.add(taskdepdata[dep][0])
- for package in packages:
- def parse_test_matrix(matrix_name):
- testmatrix = d.getVarFlags(matrix_name) or {}
- g = globals()
- warnchecks = []
- for w in (d.getVar("WARN_QA") or "").split():
- if w in skip:
- continue
- if w in testmatrix and testmatrix[w] in g:
- warnchecks.append(g[testmatrix[w]])
- if w == 'unsafe-references-in-binaries':
- oe.utils.write_ld_so_conf(d)
-
- errorchecks = []
- for e in (d.getVar("ERROR_QA") or "").split():
- if e in skip:
- continue
- if e in testmatrix and testmatrix[e] in g:
- errorchecks.append(g[testmatrix[e]])
- if e == 'unsafe-references-in-binaries':
- oe.utils.write_ld_so_conf(d)
- return warnchecks, errorchecks
+ def parse_test_matrix(matrix_name):
+ testmatrix = d.getVarFlags(matrix_name) or {}
+ g = globals()
+ warnchecks = []
+ for w in (d.getVar("WARN_QA") or "").split():
+ if w in skip:
+ continue
+ if w in testmatrix and testmatrix[w] in g:
+ warnchecks.append(g[testmatrix[w]])
+
+ errorchecks = []
+ for e in (d.getVar("ERROR_QA") or "").split():
+ if e in skip:
+ continue
+ if e in testmatrix and testmatrix[e] in g:
+ errorchecks.append(g[testmatrix[e]])
+ return warnchecks, errorchecks
+ for package in packages:
skip = set((d.getVar('INSANE_SKIP') or "").split() +
(d.getVar('INSANE_SKIP_' + package) or "").split())
if skip:
bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
-
bb.note("Checking Package: %s" % package)
# Check package name
if not pkgname_pattern.match(package):
@@ -1167,13 +1109,16 @@ python do_package_qa () {
"%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
- package_qa_walk(warn_checks, error_checks, skip, package, d)
+ package_qa_walk(warn_checks, error_checks, package, d)
warn_checks, error_checks = parse_test_matrix("QAPKGTEST")
- package_qa_package(warn_checks, error_checks, skip, package, d)
+ package_qa_package(warn_checks, error_checks, package, d)
package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
- package_qa_check_deps(package, pkgdest, skip, d)
+ package_qa_check_deps(package, pkgdest, d)
+
+ warn_checks, error_checks = parse_test_matrix("QARECIPETEST")
+ package_qa_recipe(warn_checks, error_checks, pn, d)
if 'libdir' in d.getVar("ALL_QA").split():
package_qa_check_libdir(d)
@@ -1238,12 +1183,10 @@ Rerun configure task after fixing this.""")
cnf = d.getVar('EXTRA_OECONF') or ""
if "gettext" not in d.getVar('P') and "gcc-runtime" not in d.getVar('P') and "--disable-nls" not in cnf:
ml = d.getVar("MLPREFIX") or ""
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
- gt = "gettext-native"
- elif bb.data.inherits_class('cross-canadian', d):
+ if bb.data.inherits_class('cross-canadian', d):
gt = "nativesdk-gettext"
else:
- gt = "virtual/" + ml + "gettext"
+ gt = "gettext-native"
deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
if gt not in deps:
for config in configs:
@@ -1308,6 +1251,8 @@ do_configure[postfuncs] += "do_qa_configure "
do_unpack[postfuncs] += "do_qa_unpack"
python () {
+ import re
+
tests = d.getVar('ALL_QA').split()
if "desktop" in tests:
d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
@@ -1334,6 +1279,9 @@ python () {
if pn in overrides:
msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
package_qa_handle_error("pn-overrides", msg, d)
+ prog = re.compile('[A-Z]')
+ if prog.search(pn):
+ package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
issues = []
if (d.getVar('PACKAGES') or "").split():
diff --git a/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass b/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass
new file mode 100644
index 000000000..6e08be4b7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass
@@ -0,0 +1,112 @@
+# Support for device tree generation
+PACKAGES_append = " \
+ kernel-devicetree \
+ ${@['kernel-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
+"
+FILES_kernel-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+FILES_kernel-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
+
+# Generate kernel+devicetree bundle
+KERNEL_DEVICETREE_BUNDLE ?= "0"
+
+normalize_dtb () {
+ DTB="$1"
+ if echo ${DTB} | grep -q '/dts/'; then
+ bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
+ DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
+ fi
+ echo "${DTB}"
+}
+
+get_real_dtb_path_in_kernel () {
+ DTB="$1"
+ DTB_PATH="${B}/arch/${ARCH}/boot/dts/${DTB}"
+ if [ ! -e "${DTB_PATH}" ]; then
+ DTB_PATH="${B}/arch/${ARCH}/boot/${DTB}"
+ fi
+ echo "${DTB_PATH}"
+}
+
+do_configure_append() {
+ if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
+ if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
+ case "${ARCH}" in
+ "arm")
+ config="${B}/.config"
+ if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
+ bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
+ sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
+ echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
+ echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
+ fi
+ ;;
+ *)
+ bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
+ esac
+ else
+ bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
+ fi
+ fi
+}
+
+do_compile_append() {
+ for DTB in ${KERNEL_DEVICETREE}; do
+ DTB=`normalize_dtb "${DTB}"`
+ oe_runmake ${DTB}
+ done
+}
+
+do_install_append() {
+ for DTB in ${KERNEL_DEVICETREE}; do
+ DTB=`normalize_dtb "${DTB}"`
+ DTB_EXT=${DTB##*.}
+ DTB_PATH=`get_real_dtb_path_in_kernel "${DTB}"`
+ DTB_BASE_NAME=`basename ${DTB} ."${DTB_EXT}"`
+ install -m 0644 ${DTB_PATH} ${D}/${KERNEL_IMAGEDEST}/${DTB_BASE_NAME}.${DTB_EXT}
+ for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
+ symlink_name=${type}"-"${KERNEL_IMAGE_SYMLINK_NAME}
+ DTB_SYMLINK_NAME=`echo ${symlink_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"`
+ ln -sf ${DTB_BASE_NAME}.${DTB_EXT} ${D}/${KERNEL_IMAGEDEST}/devicetree-${DTB_SYMLINK_NAME}.${DTB_EXT}
+
+ if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
+ cat ${D}/${KERNEL_IMAGEDEST}/$type \
+ ${D}/${KERNEL_IMAGEDEST}/${DTB_BASE_NAME}.${DTB_EXT} \
+ > ${D}/${KERNEL_IMAGEDEST}/$type-${DTB_BASE_NAME}.${DTB_EXT}.bin
+ fi
+ done
+ done
+}
+
+do_deploy_append() {
+ for DTB in ${KERNEL_DEVICETREE}; do
+ DTB=`normalize_dtb "${DTB}"`
+ DTB_EXT=${DTB##*.}
+ DTB_BASE_NAME=`basename ${DTB} ."${DTB_EXT}"`
+ for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
+ base_name=${type}"-"${KERNEL_IMAGE_BASE_NAME}
+ symlink_name=${type}"-"${KERNEL_IMAGE_SYMLINK_NAME}
+ DTB_NAME=`echo ${base_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"`
+ DTB_SYMLINK_NAME=`echo ${symlink_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"`
+ DTB_PATH=`get_real_dtb_path_in_kernel "${DTB}"`
+ install -d ${DEPLOYDIR}
+ install -m 0644 ${DTB_PATH} ${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT}
+ ln -sf ${DTB_NAME}.${DTB_EXT} ${DEPLOYDIR}/${DTB_SYMLINK_NAME}.${DTB_EXT}
+ ln -sf ${DTB_NAME}.${DTB_EXT} ${DEPLOYDIR}/${DTB_BASE_NAME}.${DTB_EXT}
+
+ if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
+ cat ${DEPLOYDIR}/$type \
+ ${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT} \
+ > ${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT}.bin
+ ln -sf ${DTB_NAME}.${DTB_EXT}.bin ${DEPLOYDIR}/$type-${DTB_BASE_NAME}.${DTB_EXT}.bin
+
+ if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
+ cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
+ ${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT} \
+ > ${DEPLOYDIR}/${type}-${INITRAMFS_BASE_NAME}-${DTB_BASE_NAME}.${DTB_EXT}.bin
+ ln -sf ${type}-${INITRAMFS_BASE_NAME}-${DTB_BASE_NAME}.${DTB_EXT}.bin \
+ ${DEPLOYDIR}/${type}-initramfs-${DTB_BASE_NAME}.${DTB_EXT}-${MACHINE}.bin
+ fi
+ fi
+ done
+ done
+}
diff --git a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
index 179185b6b..9baf399f2 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
@@ -7,9 +7,12 @@ python __anonymous () {
depends = "%s u-boot-mkimage-native dtc-native" % depends
d.setVar("DEPENDS", depends)
- if d.getVar("UBOOT_ARCH") == "mips":
+ uarch = d.getVar("UBOOT_ARCH")
+ if uarch == "arm64":
+ replacementtype = "Image"
+ elif uarch == "mips":
replacementtype = "vmlinuz.bin"
- elif d.getVar("UBOOT_ARCH") == "x86":
+ elif uarch == "x86":
replacementtype = "bzImage"
else:
replacementtype = "zImage"
diff --git a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
index 5e10dcf73..1035525da 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
@@ -47,7 +47,7 @@ python split_kernel_module_packages () {
tf = tempfile.mkstemp()
tmpfile = tf[1]
cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
- subprocess.call(cmd, shell=True)
+ subprocess.check_call(cmd, shell=True)
f = open(tmpfile)
l = f.read().split("\000")
f.close()
diff --git a/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass b/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
index 87f02654f..2364053f3 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-uboot.bbclass
@@ -3,6 +3,10 @@ uboot_prep_kimage() {
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
linux_suffix=""
linux_comp="none"
+ elif [ -e arch/${ARCH}/boot/Image ] ; then
+ vmlinux_path="vmlinux"
+ linux_suffix=""
+ linux_comp="none"
elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
rm -f linux.bin
cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
diff --git a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
index 1ca0756c4..663c6557d 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -107,20 +107,31 @@ do_kernel_metadata() {
cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
if [ $? -ne 0 ]; then
bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
+ else
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
fi
else
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
- sccs="${WORKDIR}/defconfig"
fi
+ sccs="${WORKDIR}/defconfig"
else
- bbfatal "A KBUILD_DECONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
fi
fi
- sccs="$sccs ${@" ".join(find_sccs(d))}"
+ sccs_from_src_uri="${@" ".join(find_sccs(d))}"
patches="${@" ".join(find_patches(d))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
+ # a quick check to make sure we don't have duplicate defconfigs
+ # If there's a defconfig in the SRC_URI, did we also have one from
+ # the KBUILD_DEFCONFIG processing above ?
+ if [ -n "$sccs" ]; then
+ # we did have a defconfig from above. remove any that might be in the src_uri
+ sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ')
+ fi
+ sccs="$sccs $sccs_from_src_uri"
+
# check for feature directories/repos/branches that were part of the
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
@@ -143,6 +154,12 @@ do_kernel_metadata() {
# expand kernel features into their full path equivalents
bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
+ if [ -z "$bsp_definition" ]; then
+ echo "$sccs" | grep -q defconfig
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
+ fi
+ fi
meta_dir=$(kgit --meta)
# run1: pull all the configuration fragments, no matter where they come from
diff --git a/import-layers/yocto-poky/meta/classes/kernel.bbclass b/import-layers/yocto-poky/meta/classes/kernel.bbclass
index ce2cab65a..14f41e9b1 100644
--- a/import-layers/yocto-poky/meta/classes/kernel.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel.bbclass
@@ -2,7 +2,7 @@ inherit linux-kernel-base kernel-module-split
PROVIDES += "virtual/kernel"
DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native"
-PACKAGE_WRITE_DEPS += "depmodwrapper-cross virtual/update-alternatives-native"
+PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
@@ -57,7 +57,7 @@ python __anonymous () {
d.appendVar('PACKAGES', ' ' + 'kernel-image-' + typelower)
- d.setVar('FILES_kernel-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}')
+ d.setVar('FILES_kernel-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
d.appendVar('RDEPENDS_kernel-image', ' ' + 'kernel-image-' + typelower)
@@ -65,13 +65,6 @@ python __anonymous () {
d.setVar('ALLOW_EMPTY_kernel-image-' + typelower, '1')
- priority = d.getVar('KERNEL_PRIORITY')
- postinst = '#!/bin/sh\n' + 'update-alternatives --install /' + imagedest + '/' + type + ' ' + type + ' ' + type + '-${KERNEL_VERSION_NAME} ' + priority + ' || true' + '\n'
- d.setVar('pkg_postinst_kernel-image-' + typelower, postinst)
-
- postrm = '#!/bin/sh\n' + 'update-alternatives --remove' + ' ' + type + ' ' + type + '-${KERNEL_VERSION_NAME} || true' + '\n'
- d.setVar('pkg_postrm_kernel-image-' + typelower, postrm)
-
image = d.getVar('INITRAMFS_IMAGE')
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
@@ -137,10 +130,6 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
export KBUILD_BUILD_USER = "oe-user"
export KBUILD_BUILD_HOST = "oe-host"
-KERNEL_PRIORITY ?= "${@int(d.getVar('PV').split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
- int(d.getVar('PV').split('-')[0].split('+')[0].split('.')[1]) * 100 + \
- int(d.getVar('PV').split('-')[0].split('+')[0].split('.')[-1])}"
-
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
# The directory where built kernel lies in the kernel tree
@@ -166,7 +155,7 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -255,8 +244,36 @@ python do_devshell_prepend () {
addtask bundle_initramfs after do_install before do_deploy
+get_cc_option () {
+ # Check if KERNEL_CC supports the option "file-prefix-map".
+ # This option allows us to build images with __FILE__ values that do not
+ # contain the host build path.
+ if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
+ echo "-ffile-prefix-map=${S}=/kernel-source/"
+ fi
+}
+
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if [ "$BUILD_REPRODUCIBLE_BINARIES" = "1" ]; then
+ # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
+ # be set....
+ if [ "$SOURCE_DATE_EPOCH" = "0" ]; then
+ olddir=`pwd`
+ cd ${S}
+ SOURCE_DATE_EPOCH=`git log -1 --pretty=%ct`
+ # git repo not guaranteed, so fall back to REPRODUCIBLE_TIMESTAMP_ROOTFS
+ if [ $? -ne 0 ]; then
+ SOURCE_DATE_EPOCH=${REPRODUCIBLE_TIMESTAMP_ROOTFS}
+ fi
+ cd $olddir
+ fi
+
+ ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ export KCONFIG_NOTIMESTAMP=1
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
# case where we are making a second pass at the kernel
@@ -270,20 +287,22 @@ kernel_do_compile() {
copy_initramfs
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
+ cc_extra=$(get_cc_option)
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
- gzip -9c < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
+ gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ cc_extra=$(get_cc_option)
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
# Module.symvers gets updated during the
# building of the kernel modules. We need to
@@ -320,6 +339,7 @@ kernel_do_install() {
install -d ${D}/boot
for type in ${KERNEL_IMAGETYPES} ; do
install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} ${D}/${KERNEL_IMAGEDEST}/${type}-${KERNEL_VERSION}
+ ln -sf ${type}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${type}
done
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
@@ -575,19 +595,27 @@ do_strip[dirs] = "${B}"
addtask strip before do_sizecheck after do_kernel_link_images
# Support checking the kernel size since some kernels need to reside in partitions
-# with a fixed length or there is a limit in transferring the kernel to memory
+# with a fixed length or there is a limit in transferring the kernel to memory.
+# If more than one image type is enabled, warn on any that don't fit but only fail
+# if none fit.
do_sizecheck() {
if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
if [ -n "$invalid" ]; then
- die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integerx (The unit is Kbytes)"
+ die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
fi
+ at_least_one_fits=
for type in ${KERNEL_IMAGETYPES} ; do
size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$type | awk '{print $1}'`
if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
- warn "This kernel $type (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device. Please reduce the size of the kernel by making more of it modular."
+ bbwarn "This kernel $type (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
+ else
+ at_least_one_fits=y
fi
done
+ if [ -z "$at_least_one_fits" ]; then
+ die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
+ fi
fi
}
do_sizecheck[dirs] = "${B}"
@@ -642,3 +670,6 @@ do_deploy[prefuncs] += "package_get_auto_pr"
addtask deploy after do_populate_sysroot do_packagedata
EXPORT_FUNCTIONS do_deploy
+
+# Add using Device Tree support
+inherit kernel-devicetree
diff --git a/import-layers/yocto-poky/meta/classes/license.bbclass b/import-layers/yocto-poky/meta/classes/license.bbclass
index b1fffe70f..d35311046 100644
--- a/import-layers/yocto-poky/meta/classes/license.bbclass
+++ b/import-layers/yocto-poky/meta/classes/license.bbclass
@@ -255,14 +255,9 @@ def get_boot_dependencies(d):
"""
depends = []
- boot_depends_string = ""
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- # Only bootimg and bootdirectdisk include the depends flag
- boot_tasks = ["do_bootimg", "do_bootdirectdisk",]
-
- for task in boot_tasks:
- boot_depends_string = "%s %s" % (boot_depends_string,
- d.getVarFlag(task, "depends") or "")
+ # Only bootimg includes the depends flag
+ boot_depends_string = d.getVarFlag("do_bootimg", "depends") or ""
boot_depends = [dep.split(":")[0] for dep
in boot_depends_string.split()
if not dep.split(":")[0].endswith("-native")]
diff --git a/import-layers/yocto-poky/meta/classes/linuxloader.bbclass b/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
index 117b03074..8f30eb32e 100644
--- a/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
+++ b/import-layers/yocto-poky/meta/classes/linuxloader.bbclass
@@ -1,5 +1,8 @@
+LDSO_TCLIBC = "glibc"
+LDSO_TCLIBC_libc-musl = "musl"
+LDSO_TCLIBC_libc-baremetal = "musl"
-linuxloader () {
+linuxloader_glibc () {
case ${TARGET_ARCH} in
powerpc | microblaze )
dynamic_loader="${base_libdir}/ld.so.1"
@@ -28,3 +31,40 @@ linuxloader () {
esac
echo $dynamic_loader
}
+
+linuxloader_musl () {
+ case ${TARGET_ARCH} in
+ microblaze* )
+ dynamic_loader="${base_libdir}/ld-musl-microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}.so.1"
+ ;;
+ mips* )
+ dynamic_loader="${base_libdir}/ld-musl-mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ ;;
+ powerpc )
+ dynamic_loader="${base_libdir}/ld-musl-powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ ;;
+ powerpc64 )
+ dynamic_loader="${base_libdir}/ld-musl-powerpc64.so.1"
+ ;;
+ x86_64 )
+ dynamic_loader="${base_libdir}/ld-musl-x86_64.so.1"
+ ;;
+ i*86 )
+ dynamic_loader="${base_libdir}/ld-musl-i386.so.1"
+ ;;
+ arm* )
+ dynamic_loader="${base_libdir}/ld-musl-arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}.so.1"
+ ;;
+ aarch64* )
+ dynamic_loader="${base_libdir}/ld-musl-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+ ;;
+ * )
+ dynamic_loader="/unknown_dynamic_linker"
+ ;;
+ esac
+ echo $dynamic_loader
+}
+
+linuxloader () {
+ linuxloader_${LDSO_TCLIBC}
+}
diff --git a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
index 27b137dec..e1d8b1843 100644
--- a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
+++ b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
@@ -15,6 +15,8 @@ EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
EFI_PROVIDER ?= "grub-efi"
EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
+MKDOSFS_EXTRAOPTS ??= "-S 512"
+
# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
# contain "efi". This way legacy is supported by default if neither is
# specified, maintaining the original behavior.
diff --git a/import-layers/yocto-poky/meta/classes/mirrors.bbclass b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
index 4ad814ff2..766f1cb6f 100644
--- a/import-layers/yocto-poky/meta/classes/mirrors.bbclass
+++ b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
@@ -30,21 +30,15 @@ ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
+http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
${APACHE_MIRROR} http://www.us.apache.org/dist \n \
${APACHE_MIRROR} http://archive.apache.org/dist \n \
http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
+ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
+ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
+ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
diff --git a/import-layers/yocto-poky/meta/classes/module.bbclass b/import-layers/yocto-poky/meta/classes/module.bbclass
index 802476bc7..78d1b21db 100644
--- a/import-layers/yocto-poky/meta/classes/module.bbclass
+++ b/import-layers/yocto-poky/meta/classes/module.bbclass
@@ -1,6 +1,6 @@
inherit module-base kernel-module-split pkgconfig
-addtask make_scripts after do_prepare_recipe_sysroot before do_compile
+addtask make_scripts after do_prepare_recipe_sysroot before do_configure
do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
do_make_scripts[depends] += "virtual/kernel:do_shared_workdir"
@@ -18,6 +18,26 @@ python __anonymous () {
d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
}
+python do_devshell_prepend () {
+ os.environ['CFLAGS'] = ''
+ os.environ['CPPFLAGS'] = ''
+ os.environ['CXXFLAGS'] = ''
+ os.environ['LDFLAGS'] = ''
+
+ os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR')
+ os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR')
+ os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION')
+ os.environ['CC'] = d.getVar('KERNEL_CC')
+ os.environ['LD'] = d.getVar('KERNEL_LD')
+ os.environ['AR'] = d.getVar('KERNEL_AR')
+ os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
+ kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
+ if kbuild_extra_symbols:
+ os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols
+ else:
+ os.environ['KBUILD_EXTRA_SYMBOLS'] = ''
+}
+
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
diff --git a/import-layers/yocto-poky/meta/classes/multilib.bbclass b/import-layers/yocto-poky/meta/classes/multilib.bbclass
index ab04597f9..816f54e7f 100644
--- a/import-layers/yocto-poky/meta/classes/multilib.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib.bbclass
@@ -4,7 +4,9 @@ python multilib_virtclass_handler () {
if cls != "multilib" or not variant:
return
- e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR'))
+ localdata = bb.data.createCopy(e.data)
+ localdata.delVar('TMPDIR')
+ e.data.setVar('STAGING_KERNEL_DIR', localdata.getVar('STAGING_KERNEL_DIR'))
# There should only be one kernel in multilib configs
# We also skip multilib setup for module packages.
diff --git a/import-layers/yocto-poky/meta/classes/native.bbclass b/import-layers/yocto-poky/meta/classes/native.bbclass
index 6b7f3dd76..9c434dce8 100644
--- a/import-layers/yocto-poky/meta/classes/native.bbclass
+++ b/import-layers/yocto-poky/meta/classes/native.bbclass
@@ -108,7 +108,7 @@ PKG_CONFIG_SYSROOT_DIR = ""
PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
-# we dont want libc-uclibc or libc-glibc to kick in for native recipes
+# we dont want libc-*libc to kick in for native recipes
LIBCOVERRIDE = ""
CLASSOVERRIDE = "class-native"
MACHINEOVERRIDES = ""
diff --git a/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass b/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
index 0296d545b..a77783513 100644
--- a/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
+++ b/import-layers/yocto-poky/meta/classes/own-mirrors.bbclass
@@ -1,13 +1,13 @@
-PREMIRRORS() {
-cvs://.*/.* ${SOURCE_MIRROR_URL}
-svn://.*/.* ${SOURCE_MIRROR_URL}
-git://.*/.* ${SOURCE_MIRROR_URL}
-gitsm://.*/.* ${SOURCE_MIRROR_URL}
-hg://.*/.* ${SOURCE_MIRROR_URL}
-bzr://.*/.* ${SOURCE_MIRROR_URL}
-p4://.*/.* ${SOURCE_MIRROR_URL}
-osc://.*/.* ${SOURCE_MIRROR_URL}
-https?$://.*/.* ${SOURCE_MIRROR_URL}
-ftp://.*/.* ${SOURCE_MIRROR_URL}
-npm://.*/?.* ${SOURCE_MIRROR_URL}
-}
+PREMIRRORS_prepend = " \
+cvs://.*/.* ${SOURCE_MIRROR_URL} \n \
+svn://.*/.* ${SOURCE_MIRROR_URL} \n \
+git://.*/.* ${SOURCE_MIRROR_URL} \n \
+gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \
+hg://.*/.* ${SOURCE_MIRROR_URL} \n \
+bzr://.*/.* ${SOURCE_MIRROR_URL} \n \
+p4://.*/.* ${SOURCE_MIRROR_URL} \n \
+osc://.*/.* ${SOURCE_MIRROR_URL} \n \
+https?$://.*/.* ${SOURCE_MIRROR_URL} \n \
+ftp://.*/.* ${SOURCE_MIRROR_URL} \n \
+npm://.*/?.* ${SOURCE_MIRROR_URL} \n \
+"
diff --git a/import-layers/yocto-poky/meta/classes/package.bbclass b/import-layers/yocto-poky/meta/classes/package.bbclass
index a03c05b9f..2053d4639 100644
--- a/import-layers/yocto-poky/meta/classes/package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package.bbclass
@@ -737,9 +737,7 @@ python fixup_perms () {
def get_fs_perms_list(d):
str = ""
bbpath = d.getVar('BBPATH')
- fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES')
- if not fs_perms_tables:
- fs_perms_tables = 'files/fs-perms.txt'
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
for conf_file in fs_perms_tables.split():
str += " %s" % bb.utils.which(bbpath, conf_file)
return str
@@ -879,6 +877,11 @@ python split_and_strip_files () {
debugdir = "/.debug"
debuglibdir = ""
debugsrcdir = ""
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debugappend = ""
+ debugdir = "/.debug"
+ debuglibdir = ""
+ debugsrcdir = "/usr/src/debug"
else:
# Original OE-core, a.k.a. ".debug", style debug info
debugappend = ""
@@ -1092,6 +1095,15 @@ python populate_packages () {
autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
+ split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
+
+ # If debug-with-srcpkg mode is enabled then the src package is added
+ # into the package list and the source directory as its main content
+ if split_source_package:
+ src_package_name = ('%s-src' % d.getVar('PN'))
+ packages += (' ' + src_package_name)
+ d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
+
# Sanity check PACKAGES for duplicates
# Sanity should be moved to sanity.bbclass once we have the infrastucture
package_list = []
@@ -1100,7 +1112,12 @@ python populate_packages () {
if pkg in package_list:
msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
package_qa_handle_error("packages-list", msg, d)
- elif autodebug and pkg.endswith("-dbg"):
+ # If debug-with-srcpkg mode is enabled then the src package will have
+ # priority over dbg package when assigning the files.
+ # This allows src package to include source files and remove them from dbg.
+ elif split_source_package and pkg.endswith("-src"):
+ package_list.insert(0, pkg)
+ elif autodebug and pkg.endswith("-dbg") and not split_source_package:
package_list.insert(0, pkg)
else:
package_list.append(pkg)
@@ -1434,13 +1451,7 @@ if [ x"$D" = "x" ]; then
fi
}
-# In Morty and earlier releases, and on master (Rocko), the RPM file
-# dependencies are always enabled. However, since they were broken with the
-# release of Pyro and enabling them may cause build problems for some packages,
-# they are not enabled by default in Pyro. Setting ENABLE_RPM_FILEDEPS_FOR_PYRO
-# to "1" will enable them again.
-ENABLE_RPM_FILEDEPS_FOR_PYRO ??= "0"
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps${@' --alldeps' if d.getVar('ENABLE_RPM_FILEDEPS_FOR_PYRO') == '1' else ''}"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
# Collect perfile run-time dependency metadata
# Output:
@@ -1465,7 +1476,7 @@ python package_do_filedeps() {
for pkg in packages.split():
if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
continue
- if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
continue
for files in chunks(pkgfiles[pkg], 100):
pkglist.append((pkg, files, rpmdeps, pkgdest))
@@ -1583,7 +1594,7 @@ python package_do_shlibs() {
combos.append("-".join(options[0:i]))
return combos
- if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'):
+ if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
# Drop suffix
name = os.path.basename(file).rsplit(".",1)[0]
# Find all combinations
@@ -2060,7 +2071,7 @@ python do_package () {
# cache. This is useful if an item this class depends on changes in a
# way that the output of this class changes. rpmdeps is a good example
# as any change to rpmdeps requires this to be rerun.
- # PACKAGE_BBCLASS_VERSION = "1"
+ # PACKAGE_BBCLASS_VERSION = "2"
# Init cachedpath
global cpath
diff --git a/import-layers/yocto-poky/meta/classes/package_deb.bbclass b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
index eacabcdb6..5d297939b 100644
--- a/import-layers/yocto-poky/meta/classes/package_deb.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
@@ -39,35 +39,33 @@ def debian_arch_map(arch, tune):
if arch == "arm":
return arch + ["el", "hf"]["callconvention-hard" in tune_features]
return arch
-#
-# install a bunch of packages using apt
-# the following shell variables needs to be set before calling this func:
-# INSTALL_ROOTFS_DEB - install root dir
-# INSTALL_BASEARCH_DEB - install base architecutre
-# INSTALL_ARCHS_DEB - list of available archs
-# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
-# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attempted to be installed only
-# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
-# INSTALL_TASK_DEB - task name
python do_package_deb () {
- import re, copy
- import textwrap
- import subprocess
- import collections
- import codecs
-
- oldcwd = os.getcwd()
- workdir = d.getVar('WORKDIR')
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
+ import multiprocessing
+ import traceback
+
+ class DebianWritePkgProcess(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+
+ def run(self):
+ try:
+ multiprocessing.Process.run(self)
+ self._cconn.send(None)
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ @property
+ def exception(self):
+ if self._pconn.poll():
+ self._exception = self._pconn.recv()
+ return self._exception
- outdir = d.getVar('PKGWRITEDIRDEB')
- if not outdir:
- bb.error("PKGWRITEDIRDEB not defined, unable to package")
- return
+ oldcwd = os.getcwd()
packages = d.getVar('PACKAGES')
if not packages:
@@ -75,14 +73,45 @@ python do_package_deb () {
return
tmpdir = d.getVar('TMPDIR')
-
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
- if packages == []:
- bb.debug(1, "No packages; nothing to do")
- return
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ error = None
+ pkgs = packages.split()
+ while not error and pkgs:
+ if len(launched) < max_process:
+ p = DebianWritePkgProcess(target=deb_write_pkg, args=(pkgs.pop(), d))
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ launched.remove(q)
+ if q.exception:
+ error, traceback = q.exception
+ break
+
+ for p in launched:
+ p.join()
+
+ os.chdir(oldcwd)
+ if error:
+ raise error
+}
+do_package_deb[vardeps] += "deb_write_pkg"
+do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
+
+def deb_write_pkg(pkg, d):
+ import re, copy
+ import textwrap
+ import subprocess
+ import collections
+ import codecs
+
+ outdir = d.getVar('PKGWRITEDIRDEB')
pkgdest = d.getVar('PKGDEST')
def cleanupcontrol(root):
@@ -91,11 +120,11 @@ python do_package_deb () {
if os.path.exists(p):
bb.utils.prunedir(p)
- for pkg in packages.split():
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
- lf = bb.utils.lockfile(root + ".lock")
+ lf = bb.utils.lockfile(root + ".lock")
+ try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
@@ -117,8 +146,7 @@ python do_package_deb () {
g = glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
- bb.utils.unlockfile(lf)
- continue
+ return
controldir = os.path.join(root, 'DEBIAN')
bb.utils.mkdirhier(controldir)
@@ -194,8 +222,8 @@ python do_package_deb () {
mapping_rename_hook(localdata)
def debian_cmp_remap(var):
- # dpkg does not allow for '(' or ')' in a dependency name
- # replace these instances with '__' and '__'
+ # dpkg does not allow for '(', ')' or ':' in a dependency name
+ # Replace any instances of them with '__'
#
# In debian '>' and '<' do not mean what it appears they mean
# '<' = less or equal
@@ -204,8 +232,7 @@ python do_package_deb () {
#
for dep in var:
if '(' in dep:
- newdep = dep.replace('(', '__')
- newdep = newdep.replace(')', '__')
+ newdep = re.sub(r'[(:)]', '__', dep)
if newdep != dep:
var[newdep] = var[dep]
del var[dep]
@@ -289,17 +316,19 @@ python do_package_deb () {
conffiles.close()
os.chdir(basedir)
- subprocess.check_output("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH"), root, pkgoutdir), shell=True)
+ subprocess.check_output("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH"), root, pkgoutdir),
+ stderr=subprocess.STDOUT,
+ shell=True)
+ finally:
cleanupcontrol(root)
bb.utils.unlockfile(lf)
- os.chdir(oldcwd)
-}
-# Indirect references to these vars
-do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
+
# Otherwise allarch packages may change depending on override configuration
-do_package_deb[vardepsexclude] = "OVERRIDES"
+deb_write_pkg[vardepsexclude] = "OVERRIDES"
+# Indirect references to these vars
+do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
SSTATETASKS += "do_package_write_deb"
do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
diff --git a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
index a1e51ee69..6c1fdaa39 100644
--- a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
@@ -12,15 +12,34 @@ OPKGBUILDCMD ??= "opkg-build"
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
-OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "") != ""]}"
+OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
OPKGLIBDIR = "${localstatedir}/lib"
python do_package_ipk () {
- import re, copy
- import textwrap
- import subprocess
- import collections
+ import multiprocessing
+ import traceback
+
+ class IPKWritePkgProcess(multiprocessing.Process):
+ def __init__(self, *args, **kwargs):
+ multiprocessing.Process.__init__(self, *args, **kwargs)
+ self._pconn, self._cconn = multiprocessing.Pipe()
+ self._exception = None
+
+ def run(self):
+ try:
+ multiprocessing.Process.run(self)
+ self._cconn.send(None)
+ except Exception as e:
+ tb = traceback.format_exc()
+ self._cconn.send((e, tb))
+
+ @property
+ def exception(self):
+ if self._pconn.poll():
+ self._exception = self._pconn.recv()
+ return self._exception
+
oldcwd = os.getcwd()
@@ -42,20 +61,55 @@ python do_package_ipk () {
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
+ max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ launched = []
+ error = None
+ pkgs = packages.split()
+ while not error and pkgs:
+ if len(launched) < max_process:
+ p = IPKWritePkgProcess(target=ipk_write_pkg, args=(pkgs.pop(), d))
+ p.start()
+ launched.append(p)
+ for q in launched:
+ # The finished processes are joined when calling is_alive()
+ if not q.is_alive():
+ launched.remove(q)
+ if q.exception:
+ error, traceback = q.exception
+ break
+
+ for p in launched:
+ p.join()
+
+ os.chdir(oldcwd)
+
+ if error:
+ raise error
+}
+do_package_ipk[vardeps] += "ipk_write_pkg"
+do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
+
+def ipk_write_pkg(pkg, d):
+ import re, copy
+ import subprocess
+ import textwrap
+ import collections
+
def cleanupcontrol(root):
for p in ['CONTROL', 'DEBIAN']:
p = os.path.join(root, p)
if os.path.exists(p):
bb.utils.prunedir(p)
+ outdir = d.getVar('PKGWRITEDIRIPK')
+ pkgdest = d.getVar('PKGDEST')
recipesource = os.path.basename(d.getVar('FILE'))
- for pkg in packages.split():
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
-
- lf = bb.utils.lockfile(root + ".lock")
+ localdata = bb.data.createCopy(d)
+ root = "%s/%s" % (pkgdest, pkg)
+ lf = bb.utils.lockfile(root + ".lock")
+ try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
pkgname = localdata.getVar('PKG_%s' % pkg)
@@ -100,8 +154,7 @@ python do_package_ipk () {
g = glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
- bb.utils.unlockfile(lf)
- continue
+ return
controldir = os.path.join(root, 'CONTROL')
bb.utils.mkdirhier(controldir)
@@ -142,16 +195,9 @@ python do_package_ipk () {
description = localdata.getVar('DESCRIPTION') or "."
description = textwrap.dedent(description).strip()
if '\\n' in description:
- # Manually indent
+ # Manually indent: multiline description includes a leading space
for t in description.split('\\n'):
- # We don't limit the width when manually indent, but we do
- # need the textwrap.fill() to set the initial_indent and
- # subsequent_indent, so set a large width
- line = textwrap.fill(t.strip(),
- width=100000,
- initial_indent=' ',
- subsequent_indent=' ') or '.'
- ctrlfile.write('%s\n' % line)
+ ctrlfile.write(' %s\n' % (t.strip() or ' .'))
else:
# Auto indent
ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
@@ -228,20 +274,22 @@ python do_package_ipk () {
os.chdir(basedir)
subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
- d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir), shell=True)
+ d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
+ stderr=subprocess.STDOUT,
+ shell=True)
if d.getVar('IPK_SIGN_PACKAGES') == '1':
ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
sign_ipk(d, ipk_to_sign)
+ finally:
cleanupcontrol(root)
bb.utils.unlockfile(lf)
- os.chdir(oldcwd)
-}
# Otherwise allarch packages may change depending on override configuration
-do_package_ipk[vardepsexclude] = "OVERRIDES"
+ipk_write_pkg[vardepsexclude] = "OVERRIDES"
+
SSTATETASKS += "do_package_write_ipk"
do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
diff --git a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
index 1deaf832d..a428d3064 100644
--- a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
@@ -646,9 +646,13 @@ python do_package_rpm () {
rpmbuild = d.getVar('RPMBUILD')
targetsys = d.getVar('TARGET_SYS')
targetvendor = d.getVar('HOST_VENDOR')
+
# Too many places in dnf stack assume that arch-independent packages are "noarch".
# Let's not fight against this.
- package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_").replace("all", "noarch")
+ package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
+ if package_arch == "all":
+ package_arch = "noarch"
+
sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
diff --git a/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass b/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
index c0e9be549..564860256 100644
--- a/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
+++ b/import-layers/yocto-poky/meta/classes/packagefeed-stability.bbclass
@@ -189,7 +189,7 @@ def package_compare_impl(pkgtype, d):
# Remove all the old files and copy again if docopy
if docopy:
- bb.plain('Copying packages for recipe %s' % pn)
+ bb.note('Copying packages for recipe %s' % pn)
pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
try:
with open(pcmanifest, 'r') as f:
@@ -224,7 +224,7 @@ def package_compare_impl(pkgtype, d):
shutil.copyfile(srcpath, destpath)
f.write('%s\n' % destpath)
else:
- bb.plain('Not copying packages for recipe %s' % pn)
+ bb.note('Not copying packages for recipe %s' % pn)
do_cleansstate[postfuncs] += "pfs_cleanpkgs"
python pfs_cleanpkgs () {
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
index 563582e0a..424c63cbf 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -59,6 +59,9 @@ SDK_TITLE ?= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
+SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
+SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
@@ -88,8 +91,9 @@ python write_host_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; write_sdk_test_data ; "
-POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; "
+POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
+POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
+POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
@@ -97,6 +101,26 @@ def populate_sdk_common(d):
from oe.sdk import populate_sdk
from oe.manifest import create_manifest, Manifest
+ # Handle package exclusions
+ excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
+ inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split()
+ inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split()
+
+ d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
+ d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
+
+ for pkg in excl_pkgs:
+ if pkg in inst_pkgs:
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
+ inst_pkgs.remove(pkg)
+
+ if pkg in inst_attempt_pkgs:
+ bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
+ inst_attempt_pkgs.remove(pkg)
+
+ d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
+ d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
+
pn = d.getVar('PN')
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
@@ -256,8 +280,7 @@ populate_sdk_log_check() {
}
def sdk_command_variables(d):
- return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS']
+ return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS']
def sdk_variables(d):
variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
index 8b8a341e3..c79ddbbb8 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -33,6 +33,7 @@ SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
DL_DIR \
SSTATE_DIR \
TMPDIR \
+ BB_SERVER_TIMEOUT \
"
SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
SDK_UPDATE_URL ?= ""
@@ -69,7 +70,6 @@ OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
# COREBASE be preserved as well as untracked files.
COREBASE_FILES ?= " \
oe-init-build-env \
- oe-init-build-env-memres \
scripts \
LICENSE \
.templateconf \
@@ -83,6 +83,39 @@ TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+python write_target_sdk_ext_manifest () {
+ from oe.sdk import get_extra_sdkinfo
+ sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
+ extra_info = get_extra_sdkinfo(sstate_dir)
+
+ target = d.getVar('TARGET_SYS')
+ target_multimach = d.getVar('MULTIMACH_TARGET_SYS')
+ real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
+
+ pkgs = {}
+ with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
+ for fn in extra_info['filesizes']:
+ info = fn.split(':')
+ if info[2] in (target, target_multimach, real_target_multimach) \
+ or info[5] == 'allarch':
+ if not info[1] in pkgs:
+ f.write("%s %s %s\n" % (info[1], info[2], info[3]))
+ pkgs[info[1]] = {}
+}
+python write_host_sdk_ext_manifest () {
+ from oe.sdk import get_extra_sdkinfo
+ sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
+ extra_info = get_extra_sdkinfo(sstate_dir)
+ host = d.getVar('BUILD_SYS')
+ with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f:
+ for fn in extra_info['filesizes']:
+ info = fn.split(':')
+ if info[2] == host:
+ f.write("%s %s %s\n" % (info[1], info[2], info[3]))
+}
+
+SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+
SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
def clean_esdk_builddir(d, sdkbasepath):
@@ -111,7 +144,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
with open(sdkbasepath + '/conf/local.conf', 'a') as f:
# Force the use of sstate from the build system
f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
- f.write('SSTATE_MIRRORS_forcevariable = ""\n')
+ f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
# Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
f.write('TCLIBCAPPEND_forcevariable = ""\n')
@@ -314,12 +347,18 @@ python copy_buildsystem () {
# the sig computed from the metadata.
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
+ # We want to be able to set this without a full reparse
+ f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
+
# Set up whitelist for run on install
f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n\n')
+ f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
+ f.write('WITHIN_EXT_SDK = "1"\n\n')
+
# Map gcc-dependent uninative sstate cache for installer usage
f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
diff --git a/import-layers/yocto-poky/meta/classes/python3native.bbclass b/import-layers/yocto-poky/meta/classes/python3native.bbclass
index ef468b3fd..89665efee 100644
--- a/import-layers/yocto-poky/meta/classes/python3native.bbclass
+++ b/import-layers/yocto-poky/meta/classes/python3native.bbclass
@@ -9,5 +9,8 @@ DEPENDS_append = " ${PYTHON_PN}-native "
export STAGING_INCDIR
export STAGING_LIBDIR
+# suppress host user's site-packages dirs.
+export PYTHONNOUSERSITE = "1"
+
# autoconf macros will use their internal default preference otherwise
export PYTHON
diff --git a/import-layers/yocto-poky/meta/classes/pythonnative.bbclass b/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
index 4e0381b56..4cc8b2769 100644
--- a/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
+++ b/import-layers/yocto-poky/meta/classes/pythonnative.bbclass
@@ -12,5 +12,8 @@ DEPENDS_append = " ${PYTHON_PN}-native "
export STAGING_INCDIR
export STAGING_LIBDIR
+# suppress host user's site-packages dirs.
+export PYTHONNOUSERSITE = "1"
+
# autoconf macros will use their internal default preference otherwise
export PYTHON
diff --git a/import-layers/yocto-poky/meta/classes/qemuboot.bbclass b/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
index 3468d1c67..15a9e63f2 100644
--- a/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/qemuboot.bbclass
@@ -85,10 +85,11 @@ python do_write_qemuboot_conf() {
qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
- topdir="%s/"%(d.getVar('TOPDIR')).replace("//","/")
+ finalpath = d.getVar("DEPLOY_DIR_IMAGE")
+ topdir = d.getVar('TOPDIR')
cf = configparser.ConfigParser()
cf.add_section('config_bsp')
- for k in qemuboot_vars(d):
+ for k in sorted(qemuboot_vars(d)):
# qemu-helper-native sysroot is not removed by rm_work and
# contains all tools required by runqemu
if k == 'STAGING_BINDIR_NATIVE':
@@ -98,7 +99,8 @@ python do_write_qemuboot_conf() {
val = d.getVar(k)
# we only want to write out relative paths so that we can relocate images
# and still run them
- val=val.replace(topdir,"")
+ if val.startswith(topdir):
+ val = os.path.relpath(val, finalpath)
cf.set('config_bsp', k, '%s' % val)
# QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
@@ -108,14 +110,15 @@ python do_write_qemuboot_conf() {
kernel = os.path.realpath(kernel_link)
# we only want to write out relative paths so that we can relocate images
# and still run them
- kernel=kernel.replace(topdir,"")
+ kernel = os.path.relpath(kernel, finalpath)
cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
bb.utils.mkdirhier(os.path.dirname(qemuboot))
with open(qemuboot, 'w') as f:
cf.write(f)
- if os.path.lexists(qemuboot_link):
- os.remove(qemuboot_link)
- os.symlink(os.path.basename(qemuboot), qemuboot_link)
+ if qemuboot_link != qemuboot:
+ if os.path.lexists(qemuboot_link):
+ os.remove(qemuboot_link)
+ os.symlink(os.path.basename(qemuboot), qemuboot_link)
}
diff --git a/import-layers/yocto-poky/meta/classes/report-error.bbclass b/import-layers/yocto-poky/meta/classes/report-error.bbclass
index d6fdd364a..1c55abfbf 100644
--- a/import-layers/yocto-poky/meta/classes/report-error.bbclass
+++ b/import-layers/yocto-poky/meta/classes/report-error.bbclass
@@ -29,6 +29,13 @@ python errorreport_handler () {
import json
import codecs
+ def nativelsb():
+ nativelsbstr = e.data.getVar("NATIVELSBSTRING")
+ # provide a bit more host info in case of uninative build
+ if e.data.getVar('UNINATIVE_URL') != 'unset':
+ return '/'.join([nativelsbstr, lsb_distro_identifier(e.data)])
+ return nativelsbstr
+
logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
@@ -38,7 +45,7 @@ python errorreport_handler () {
machine = e.data.getVar("MACHINE")
data['machine'] = machine
data['build_sys'] = e.data.getVar("BUILD_SYS")
- data['nativelsb'] = e.data.getVar("NATIVELSBSTRING")
+ data['nativelsb'] = nativelsb()
data['distro'] = e.data.getVar("DISTRO")
data['target_sys'] = e.data.getVar("TARGET_SYS")
data['failures'] = []
diff --git a/import-layers/yocto-poky/meta/classes/rm_work.bbclass b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
index badeaeba0..31d99e455 100644
--- a/import-layers/yocto-poky/meta/classes/rm_work.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rm_work.bbclass
@@ -35,22 +35,12 @@ do_rm_work () {
fi
done
- cd ${WORKDIR}
- for dir in *
- do
- # Retain only logs and other files in temp, safely ignore
- # failures of removing pseudo folers on NFS2/3 server.
- if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
- elif ! echo '${RM_WORK_EXCLUDE_ITEMS}' | grep -q -w "$dir"; then
- rm -rf $dir
- fi
- done
-
# Need to add pseudo back or subsqeuent work in this workdir
# might fail since setscene may not rerun to recreate it
mkdir -p ${WORKDIR}/pseudo/
+ excludes='${RM_WORK_EXCLUDE_ITEMS}'
+
# Change normal stamps into setscene stamps as they better reflect the
# fact WORKDIR is now empty
# Also leave noexec stamps since setscene stamps don't cover them
@@ -71,7 +61,12 @@ do_rm_work () {
i=dummy
break
;;
- *do_rootfs*|*do_image*|*do_bootimg*|*do_bootdirectdisk*|*do_vmimg*|*do_write_qemuboot_conf*)
+ *do_image_complete*)
+ mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
+ i=dummy
+ break
+ ;;
+ *do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*)
i=dummy
break
;;
@@ -79,6 +74,12 @@ do_rm_work () {
i=dummy
break
;;
+ *do_addto_recipe_sysroot*)
+ # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
+ excludes="$excludes recipe-sysroot-native"
+ i=dummy
+ break
+ ;;
# We remove do_package entirely, including any
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
@@ -101,6 +102,18 @@ do_rm_work () {
done
rm -f $i
done
+
+ cd ${WORKDIR}
+ for dir in *
+ do
+ # Retain only logs and other files in temp, safely ignore
+ # failures of removing pseudo folers on NFS2/3 server.
+ if [ $dir = 'pseudo' ]; then
+ rm -rf $dir 2> /dev/null || true
+ elif ! echo "$excludes" | grep -q -w "$dir"; then
+ rm -rf $dir
+ fi
+ done
}
do_rm_work_all () {
:
@@ -153,6 +166,10 @@ python inject_rm_work() {
deps = set(bb.build.preceedtask('do_build', True, d))
deps.difference_update(('do_build', 'do_rm_work_all'))
+ # deps can be empty if do_build doesn't exist, e.g. *-inital recipes
+ if not deps:
+ deps = ["do_populate_sysroot", "do_populate_lic"]
+
if pn in excludes:
d.delVarFlag('rm_work_rootfs', 'cleandirs')
d.delVarFlag('rm_work_populatesdk', 'cleandirs')
diff --git a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
index c19ff8738..a4e627fef 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
@@ -14,6 +14,14 @@ ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
+# We also need to do the same for the kernel boot parameters,
+# otherwise kernel or initramfs end up mounting the rootfs read/write
+# (the default) if supported by the underlying storage.
+#
+# We do this with _append because the default value might get set later with ?=
+# and we don't want to disable such a default that by setting a value here.
+APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
+
# Generates test data file with data store variables expanded in json format
ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
@@ -84,7 +92,9 @@ systemd_create_users () {
#
read_only_rootfs_hook () {
# Tweak the mount option and fs_passno for rootfs in fstab
- sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+ if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then
+ sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+ fi
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
@@ -316,5 +326,5 @@ python rootfs_log_check_recommends() {
continue
if 'unsatisfied recommendation for' in line:
- bb.warn('[log_check] %s: %s' % (d.getVar('PN', True), line))
+ bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
}
diff --git a/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass b/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
index 262e3d555..9ee1dfc86 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs_deb.bbclass
@@ -33,6 +33,3 @@ python () {
elif darch == "arm":
d.setVar('DEB_SDK_ARCH', 'armel')
}
-
-# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
-DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass b/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass
index a558871e9..e2ba4e364 100644
--- a/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfsdebugfiles.bbclass
@@ -15,6 +15,10 @@
# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
# 2. Boot the image once, copy the dropbear_rsa_host_key from
# the device into your build conf directory.
+# 3. A optional parameter can be used to set file mode
+# of the copied target, for instance:
+# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;"
+# in case they might be required to have a specific mode. (Shoundn't be too open, for example)
#
# Do not use for production images! It bypasses several
# core build mechanisms (updating the image when one
@@ -27,10 +31,11 @@ ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed
ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
rootfs_debug_files () {
#!/bin/sh -e
- echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target; do
+ echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
if [ -e "$source" ]; then
mkdir -p $(dirname $target)
cp -a $source $target
+ [ -n "$mode" ] && chmod $mode $target
fi
done
}
diff --git a/import-layers/yocto-poky/meta/classes/sanity.bbclass b/import-layers/yocto-poky/meta/classes/sanity.bbclass
index e8064ac48..1feb7949d 100644
--- a/import-layers/yocto-poky/meta/classes/sanity.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sanity.bbclass
@@ -350,6 +350,14 @@ def check_not_nfs(path, name):
return "The %s: %s can't be located on nfs.\n" % (name, path)
return ""
+# Check that the path is on a case-sensitive file system
+def check_case_sensitive(path, name):
+ import tempfile
+ with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
+ if os.path.exists(tmp_file.name.lower()):
+ return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
+ return ""
+
# Check that path isn't a broken symlink
def check_symlink(lnk, data):
if os.path.islink(lnk) and not os.path.exists(lnk):
@@ -448,45 +456,6 @@ def check_sanity_validmachine(sanity_data):
return messages
-# Checks if necessary to add option march to host gcc
-def check_gcc_march(sanity_data):
- result = True
- message = ""
-
- # Check if -march not in BUILD_CFLAGS
- if sanity_data.getVar("BUILD_CFLAGS").find("-march") < 0:
- result = False
-
- # Construct a test file
- f = open("gcc_test.c", "w")
- f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n")
- f.close()
-
- # Check if GCC could work without march
- if not result:
- status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} gcc_test.c -o gcc_test"))
- if status == 0:
- result = True;
-
- if not result:
- status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=native gcc_test.c -o gcc_test"))
- if status == 0:
- message = "BUILD_CFLAGS_append = \" -march=native\""
- result = True;
-
- if not result:
- build_arch = sanity_data.getVar('BUILD_ARCH')
- status,res = oe.utils.getstatusoutput(sanity_data.expand("${BUILD_CC} -march=%s gcc_test.c -o gcc_test" % build_arch))
- if status == 0:
- message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
- result = True;
-
- os.remove("gcc_test.c")
- if os.path.exists("gcc_test"):
- os.remove("gcc_test")
-
- return (result, message)
-
# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
def check_make_version(sanity_data):
@@ -612,7 +581,7 @@ def check_sanity_sstate_dir_change(sstate_dir, data):
except IndexError:
pass
return testmsg
-
+
def check_sanity_version_change(status, d):
# Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
# In other words, these tests run once in a given build directory and then
@@ -657,23 +626,6 @@ def check_sanity_version_change(status, d):
if "diffstat-native" not in assume_provided:
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
- if "qemu-native" in assume_provided:
- if not check_app_exists("qemu-arm", d):
- status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH")
-
- if "libsdl-native" in assume_provided:
- if not check_app_exists("sdl-config", d):
- status.addresult("libsdl-native is set to be ASSUME_PROVIDED but sdl-config can't be found in PATH. Please either install it, or configure qemu not to require sdl.")
-
- (result, message) = check_gcc_march(d)
- if result and message:
- status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \
- %s\n" % message)
- if not result:
- status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build")
- status.addresult(" and link something that uses atomic operations, such as: \n")
- status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
-
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
@@ -728,6 +680,10 @@ def check_sanity_version_change(status, d):
# Check that TMPDIR isn't located on nfs
status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
+ # Check for case-insensitive file systems (such as Linux in Docker on
+ # macOS with default HFS+ file system)
+ status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
+
def sanity_check_locale(d):
"""
Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
@@ -746,10 +702,10 @@ def check_sanity_everybuild(status, d):
if 0 == os.getuid():
raise_sanity_error("Do not use Bitbake as root.", d)
- # Check the Python version, we now have a minimum of Python 2.7.3
+ # Check the Python version, we now have a minimum of Python 3.4
import sys
- if sys.hexversion < 0x020703F0:
- status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n')
+ if sys.hexversion < 0x03040000:
+ status.addresult('The system requires at least Python 3.4 to run. Please update your Python interpreter.\n')
# Check the bitbake version meets minimum requirements
from distutils.version import LooseVersion
@@ -770,6 +726,11 @@ def check_sanity_everybuild(status, d):
if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
+ # Check that these variables don't use tilde-expansion as we don't do that
+ for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
+ if d.getVar(v).startswith("~"):
+ status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
+
# Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
# set, since so much relies on it being set.
dldir = d.getVar('DL_DIR')
@@ -839,7 +800,7 @@ def check_sanity_everybuild(status, d):
# Split into pairs
if len(mirrors) % 2 != 0:
- bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, mirrors.strip()))
+ bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
continue
mirrors = list(zip(*[iter(mirrors)]*2))
diff --git a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
index 71df03bab..f03c4802d 100644
--- a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
@@ -28,6 +28,9 @@ PACKAGE_FEED_SIGN = '1'
PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
+# Make feed signing key to be present in rootfs
+FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed"
+
python () {
# Check sanity of configuration
for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
diff --git a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
index bc2e94710..4961b0361 100644
--- a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
@@ -9,16 +9,30 @@
# Optional variable for specifying the backend to use for signing.
# Currently the only available option is 'local', i.e. local signing
# on the build host.
+# RPM_FILE_CHECKSUM_DIGEST
+# Optional variable for specifying the algorithm for generating file
+# checksum digest.
+# RPM_FSK_PATH
+# Optional variable for the file signing key.
+# RPM_FSK_PASSWORD
+# Optional variable for the file signing key password.
# GPG_BIN
# Optional variable for specifying the gpg binary/wrapper to use for
# signing.
+# RPM_GPG_SIGN_CHUNK
+# Optional variable indicating the number of packages used per gpg
+# invocation
# GPG_PATH
# Optional variable for specifying the gnupg "home" directory:
-#
+
inherit sanity
RPM_SIGN_PACKAGES='1'
+RPM_SIGN_FILES ?= '0'
RPM_GPG_BACKEND ?= 'local'
+# SHA-256 is used by default
+RPM_FILE_CHECKSUM_DIGEST ?= '8'
+RPM_GPG_SIGN_CHUNK ?= "${BB_NUMBER_THREADS}"
python () {
@@ -28,6 +42,11 @@ python () {
for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
+
+ if d.getVar('RPM_SIGN_FILES') == '1':
+ for var in ('RPM_FSK_PATH', 'RPM_FSK_PASSWORD'):
+ if not d.getVar(var):
+ raise_sanity_error("You need to define %s in the config" % var, d)
}
python sign_rpm () {
@@ -39,8 +58,18 @@ python sign_rpm () {
signer.sign_rpms(rpms,
d.getVar('RPM_GPG_NAME'),
- d.getVar('RPM_GPG_PASSPHRASE'))
+ d.getVar('RPM_GPG_PASSPHRASE'),
+ d.getVar('RPM_FILE_CHECKSUM_DIGEST'),
+ int(d.getVar('RPM_GPG_SIGN_CHUNK')),
+ d.getVar('RPM_FSK_PATH'),
+ d.getVar('RPM_FSK_PASSWORD'))
}
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot"
+
+# Newer versions of gpg (at least 2.1.5 and 2.2.1) have issues when signing occurs in parallel
+# so unfortunately the signing must be done serially. Once the upstream problem is fixed,
+# the following line must be removed otherwise we loose all the intrinsic parallelism from
+# bitbake. For more information, check https://bugzilla.yoctoproject.org/show_bug.cgi?id=12022.
+do_package_write_rpm[lockfiles] += "${TMPDIR}/gpg.lock"
diff --git a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
index 2c33732be..1aada4069 100644
--- a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
@@ -62,10 +62,8 @@ def siteinfo_data(d):
"linux-gnun32": "common-linux common-glibc",
"linux-gnueabi": "common-linux common-glibc",
"linux-gnuspe": "common-linux common-glibc",
- "linux-uclibc": "common-linux common-uclibc",
- "linux-uclibceabi": "common-linux common-uclibc",
- "linux-uclibcspe": "common-linux common-uclibc",
"linux-musl": "common-linux common-musl",
+ "linux-muslx32": "common-linux common-musl",
"linux-musleabi": "common-linux common-musl",
"linux-muslspe": "common-linux common-musl",
"uclinux-uclibc": "common-uclibc",
@@ -79,9 +77,7 @@ def siteinfo_data(d):
"aarch64_be-linux-musl": "aarch64_be-linux",
"arm-linux-gnueabi": "arm-linux",
"arm-linux-musleabi": "arm-linux",
- "arm-linux-uclibceabi": "arm-linux-uclibc",
"armeb-linux-gnueabi": "armeb-linux",
- "armeb-linux-uclibceabi": "armeb-linux-uclibc",
"armeb-linux-musleabi": "armeb-linux",
"mips-linux-musl": "mips-linux",
"mipsel-linux-musl": "mipsel-linux",
@@ -93,10 +89,8 @@ def siteinfo_data(d):
"mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
"powerpc-linux": "powerpc32-linux",
"powerpc-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
"powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
"powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
- "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
"powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
"powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
"powerpc64-linux": "powerpc-linux",
@@ -106,7 +100,7 @@ def siteinfo_data(d):
"x86_64-darwin9": "bit-64",
"x86_64-linux": "bit-64",
"x86_64-linux-musl": "x86_64-linux bit-64",
- "x86_64-linux-uclibc": "bit-64",
+ "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
"x86_64-elf": "bit-64",
"x86_64-linux-gnu": "bit-64 x86_64-linux",
"x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
@@ -159,7 +153,7 @@ python () {
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
-def siteinfo_get_files(d, aclocalcache = False):
+def siteinfo_get_files(d, sysrootcache = False):
sitedata = siteinfo_data(d)
sitefiles = ""
for path in d.getVar("BBPATH").split(":"):
@@ -168,18 +162,11 @@ def siteinfo_get_files(d, aclocalcache = False):
if os.path.exists(filename):
sitefiles += filename + " "
- if not aclocalcache:
+ if not sysrootcache:
return sitefiles
- # Now check for siteconfig cache files in the directory setup by autotools.bbclass to
- # avoid races.
- #
- # ACLOCALDIR may or may not exist so cache should only be set to True from autotools.bbclass
- # after files have been copied into this location. To do otherwise risks parsing/signature
- # issues and the directory being created/removed whilst this code executes. This can happen
- # when a multilib recipe is parsed along with its base variant which may be running at the time
- # causing rare but nasty failures
- path_siteconfig = d.getVar('ACLOCALDIR')
+ # Now check for siteconfig cache files in sysroots
+ path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
if path_siteconfig and os.path.isdir(path_siteconfig):
for i in os.listdir(path_siteconfig):
if not i.endswith("_config"):
diff --git a/import-layers/yocto-poky/meta/classes/sstate.bbclass b/import-layers/yocto-poky/meta/classes/sstate.bbclass
index 0a12935be..e30fbe128 100644
--- a/import-layers/yocto-poky/meta/classes/sstate.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sstate.bbclass
@@ -346,8 +346,6 @@ def sstate_installpkgdir(ss, d):
oe.path.remove(dir)
for state in ss['dirs']:
- if d.getVar('SSTATE_SKIP_CREATION') == '1':
- continue
prepdir(state[1])
os.rename(sstateinst + state[0], state[1])
sstate_install(ss, d)
@@ -404,7 +402,7 @@ python sstate_hardcode_path_unpack () {
return
bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
- subprocess.call(sstate_hardcode_cmd, shell=True)
+ subprocess.check_call(sstate_hardcode_cmd, shell=True)
# Need to remove this or we'd copy it into the target directory and may
# conflict with another writer
@@ -453,7 +451,7 @@ def sstate_clean_manifest(manifest, d, prefix=None):
if os.path.exists(manifest + ".postrm"):
import subprocess
os.chmod(postrm, 0o755)
- subprocess.call(postrm, shell=True)
+ subprocess.check_call(postrm, shell=True)
oe.path.remove(postrm)
oe.path.remove(manifest)
@@ -596,8 +594,6 @@ def sstate_package(ss, d):
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
- if d.getVar('SSTATE_SKIP_CREATION') == '1':
- continue
srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
# Find and error for absolute symlinks. We could attempt to relocate but its not
# clear where the symlink is relative to in this context. We could add that markup
@@ -625,6 +621,10 @@ def sstate_package(ss, d):
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_PKG', sstatepkg)
+ d.setVar('SSTATE_INSTDIR', sstatebuild)
+
+ if d.getVar('SSTATE_SKIP_CREATION') == '1':
+ return
for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
['sstate_create_package', 'sstate_sign_package'] + \
@@ -634,8 +634,6 @@ def sstate_package(ss, d):
bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
- d.setVar('SSTATE_INSTDIR', sstatebuild)
-
return
def pstaging_fetch(sstatefetch, sstatepkg, d):
@@ -969,7 +967,8 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
if isNativeCross(taskdependees[dep][0]):
return False
# Native/cross tools depended upon by target sysroot are not needed
- if isNativeCross(taskdependees[task][0]):
+ # Add an exception for shadow-native as required by useradd.bbclass
+ if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
continue
# Target populate_sysroot need their dependencies
return False
@@ -1017,6 +1016,11 @@ python sstate_eventhandler2() {
d = e.data
stamps = e.stamps.values()
removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
+ preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
+ preservestamps = []
+ if os.path.exists(preservestampfile):
+ with open(preservestampfile, 'r') as f:
+ preservestamps = f.readlines()
seen = []
for a in d.getVar("SSTATE_ARCHS").split():
toremove = []
@@ -1027,7 +1031,7 @@ python sstate_eventhandler2() {
lines = f.readlines()
for l in lines:
(stamp, manifest, workdir) = l.split()
- if stamp not in stamps:
+ if stamp not in stamps and stamp not in preservestamps:
toremove.append(l)
if stamp not in seen:
bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
@@ -1049,4 +1053,6 @@ python sstate_eventhandler2() {
with open(i, "w") as f:
for l in lines:
f.write(l)
+ if preservestamps:
+ os.remove(preservestampfile)
}
diff --git a/import-layers/yocto-poky/meta/classes/staging.bbclass b/import-layers/yocto-poky/meta/classes/staging.bbclass
index 984051d6a..c479bd93e 100644
--- a/import-layers/yocto-poky/meta/classes/staging.bbclass
+++ b/import-layers/yocto-poky/meta/classes/staging.bbclass
@@ -68,101 +68,19 @@ sysroot_stage_all() {
}
python sysroot_strip () {
- import stat, errno
+ inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
+ if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
+ return 0
- dvar = d.getVar('SYSROOT_DESTDIR')
+ dstdir = d.getVar('SYSROOT_DESTDIR')
pn = d.getVar('PN')
+ libdir = os.path.abspath(dstdir + os.sep + d.getVar("libdir"))
+ base_libdir = os.path.abspath(dstdir + os.sep + d.getVar("base_libdir"))
+ qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
+ strip_cmd = d.getVar("STRIP")
- os.chdir(dvar)
-
- # Return type (bits):
- # 0 - not elf
- # 1 - ELF
- # 2 - stripped
- # 4 - executable
- # 8 - shared library
- # 16 - kernel module
- def isELF(path):
- type = 0
- ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
-
- if ret:
- bb.error("split_and_strip_files: 'file %s' failed" % path)
- return type
-
- # Not stripped
- if "ELF" in result:
- type |= 1
- if "not stripped" not in result:
- type |= 2
- if "executable" in result:
- type |= 4
- if "shared" in result:
- type |= 8
- return type
-
-
- elffiles = {}
- inodes = {}
- libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
- baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
- if (d.getVar('INHIBIT_SYSROOT_STRIP') != '1'):
- #
- # First lets figure out all of the files we may have to process
- #
- for root, dirs, files in os.walk(dvar):
- for f in files:
- file = os.path.join(root, f)
-
- try:
- ltarget = oe.path.realpath(file, dvar, False)
- s = os.lstat(ltarget)
- except OSError as e:
- (err, strerror) = e.args
- if err != errno.ENOENT:
- raise
- # Skip broken symlinks
- continue
- if not s:
- continue
- # Check its an excutable
- if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
- or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
- # If it's a symlink, and points to an ELF file, we capture the readlink target
- if os.path.islink(file):
- continue
-
- # It's a file (or hardlink), not a link
- # ...but is it ELF, and is it already stripped?
- elf_file = isELF(file)
- if elf_file & 1:
- if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
- bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
- else:
- bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn))
- continue
-
- if s.st_ino in inodes:
- os.unlink(file)
- os.link(inodes[s.st_ino], file)
- else:
- inodes[s.st_ino] = file
- # break hardlink
- bb.utils.copyfile(file, file)
- elffiles[file] = elf_file
-
- #
- # Now strip them (in parallel)
- #
- strip = d.getVar("STRIP")
- sfiles = []
- for file in elffiles:
- elf_file = int(elffiles[file])
- #bb.note("Strip %s" % file)
- sfiles.append((file, elf_file, strip))
-
- oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
+ oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir,
+ qa_already_stripped=qa_already_stripped)
}
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
@@ -259,6 +177,7 @@ def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
import glob
import subprocess
+ import errno
fixme = []
postinsts = []
@@ -454,7 +373,8 @@ python extend_recipe_sysroot() {
msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
next = new
- bb.note("\n".join(msgbuf))
+ # This logging is too verbose for day to day use sadly
+ #bb.debug(2, "\n".join(msgbuf))
depdir = recipesysrootnative + "/installeddeps"
bb.utils.mkdirhier(depdir)
@@ -469,6 +389,8 @@ python extend_recipe_sysroot() {
postinsts = []
multilibs = {}
manifests = {}
+ # All files that we're going to be installing, to find conflicts.
+ fileset = {}
for f in os.listdir(depdir):
if not f.endswith(".complete"):
@@ -521,6 +443,8 @@ python extend_recipe_sysroot() {
os.unlink(fl)
os.unlink(fl + ".complete")
+ msg_exists = []
+ msg_adding = []
for dep in configuredeps:
c = setscenedeps[dep][0]
if c not in installed:
@@ -531,7 +455,7 @@ python extend_recipe_sysroot() {
if os.path.exists(depdir + "/" + c):
lnk = os.readlink(depdir + "/" + c)
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
- bb.note("%s exists in sysroot, skipping" % c)
+ msg_exists.append(c)
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
@@ -542,6 +466,8 @@ python extend_recipe_sysroot() {
elif os.path.lexists(depdir + "/" + c):
os.unlink(depdir + "/" + c)
+ msg_adding.append(c)
+
os.symlink(c + "." + taskhash, depdir + "/" + c)
d2 = d
@@ -595,8 +521,19 @@ python extend_recipe_sysroot() {
if l.endswith("/fixmepath.cmd"):
continue
dest = l.replace(stagingdir, "")
- dest = targetdir + "/" + "/".join(dest.split("/")[3:])
- newmanifest[l] = dest
+ dest = "/" + "/".join(dest.split("/")[3:])
+ newmanifest[l] = targetdir + dest
+
+ # Check if files have already been installed by another
+ # recipe and abort if they have, explaining what recipes are
+ # conflicting.
+ hashname = targetdir + dest
+ if not hashname.endswith("/"):
+ if hashname in fileset:
+ bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
+ else:
+ fileset[hashname] = c
+
# Having multiple identical manifests in each sysroot eats diskspace so
# create a shared pool of them and hardlink if we can.
# We create the manifest in advance so that if something fails during installation,
@@ -627,6 +564,9 @@ python extend_recipe_sysroot() {
continue
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+ bb.note("Installed into sysroot: %s" % str(msg_adding))
+ bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
+
for f in fixme:
if f == '':
staging_processfixme(fixme[f], recipesysroot, recipesysroot, recipesysrootnative, d)
@@ -658,6 +598,9 @@ addtask do_prepare_recipe_sysroot before do_configure after do_fetch
# Clean out the recipe specific sysroots before do_fetch
# (use a prefunc so we can order before extend_recipe_sysroot if it gets added)
python clean_recipe_sysroot() {
+ # We remove these stamps since we're removing any content they'd have added with
+ # cleandirs. This removes the sigdata too, likely not a big deal,
+ oe.path.remove(d.getVar("STAMP") + "*addto_recipe_sysroot*")
return
}
clean_recipe_sysroot[cleandirs] += "${RECIPE_SYSROOT} ${RECIPE_SYSROOT_NATIVE}"
@@ -672,4 +615,3 @@ python staging_taskhandler() {
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
addhandler staging_taskhandler
-
diff --git a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
index 959775992..937307076 100644
--- a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
@@ -7,10 +7,9 @@
# maintenance.
#
# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
-# (images built by image-live.bbclass or image-vm.bbclass)
+# (images built by image-live.bbclass)
do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-do_bootdirectdisk[depends] += "${MLPREFIX}systemd-boot:do_deploy"
EFIDIR = "/EFI/BOOT"
@@ -100,6 +99,8 @@ python build_efi_cfg() {
bb.fatal('OVERRIDES not defined')
entryfile = "%s/%s.conf" % (s, label)
+ if not os.path.exists(s):
+ os.makedirs(s)
d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
try:
entrycfg = open(entryfile, "w")
diff --git a/import-layers/yocto-poky/meta/classes/systemd.bbclass b/import-layers/yocto-poky/meta/classes/systemd.bbclass
index c4b4bb9b7..1b134322f 100644
--- a/import-layers/yocto-poky/meta/classes/systemd.bbclass
+++ b/import-layers/yocto-poky/meta/classes/systemd.bbclass
@@ -154,8 +154,10 @@ python systemd_populate_packages() {
# Deal with adding, for example, 'ifplugd@eth0.service' from
# 'ifplugd@.service'
base = None
- if service.find('@') != -1:
- base = re.sub('@[^.]+.', '@.', service)
+ at = service.find('@')
+ if at != -1:
+ ext = service.rfind('.')
+ base = service[:at] + '@' + service[ext:]
for path in searchpaths:
if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
diff --git a/import-layers/yocto-poky/meta/classes/testexport.bbclass b/import-layers/yocto-poky/meta/classes/testexport.bbclass
index 56edda994..d070f07af 100644
--- a/import-layers/yocto-poky/meta/classes/testexport.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testexport.bbclass
@@ -113,7 +113,7 @@ def copy_needed_files(d, tc):
oe.path.remove(cases_path)
bb.utils.mkdirhier(cases_path)
test_paths = get_runtime_paths(d)
- test_modules = d.getVar('TEST_SUITES')
+ test_modules = d.getVar('TEST_SUITES').split()
tc.loadTests(test_paths, modules=test_modules)
for f in getSuiteCasesFiles(tc.suites):
shutil.copy2(f, cases_path)
diff --git a/import-layers/yocto-poky/meta/classes/testimage.bbclass b/import-layers/yocto-poky/meta/classes/testimage.bbclass
index fb214604a..45bb2bda3 100644
--- a/import-layers/yocto-poky/meta/classes/testimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testimage.bbclass
@@ -7,13 +7,13 @@
# Most of the tests are commands run on target image over ssh.
# To use it add testimage to global inherit and call your target image with -c testimage
# You can try it out like this:
-# - first build a qemu core-image-sato
-# - add IMAGE_CLASSES += "testimage" in local.conf
+# - first add IMAGE_CLASSES += "testimage" in local.conf
+# - build a qemu core-image-sato
# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
# You can set (or append to) TEST_SUITES in local.conf to select the tests
# which you want to run for your target.
-# The test names are the module names in meta/lib/oeqa/runtime.
+# The test names are the module names in meta/lib/oeqa/runtime/cases.
# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
# Note that order in TEST_SUITES is relevant: tests are run in an order such that
@@ -49,10 +49,10 @@ DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE}"
DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)}"
-DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} buildcpio buildiptables buildgalculator \
+DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} buildcpio buildlzip buildgalculator \
connman ${DEVTESTSUITE} logrotate perl parselogs python ${RPMTESTSUITE} xorg"
DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcpio buildiptables buildgalculator \
+DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcpio buildlzip buildgalculator \
connman ${DEVTESTSUITE} logrotate pam parselogs perl python ${RPMTESTSUITE}"
DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
@@ -61,7 +61,7 @@ DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
# mitigate this by removing build tests for qemumips machines.
-MIPSREMOVE ??= "buildcpio buildiptables buildgalculator"
+MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
@@ -248,7 +248,7 @@ def testimage_main(d):
# the robot dance
target = OERuntimeTestContextExecutor.getTarget(
- d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
+ d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
d.getVar("TEST_SERVER_IP"), **target_kwargs)
# test context
@@ -257,7 +257,7 @@ def testimage_main(d):
# Load tests before starting the target
test_paths = get_runtime_paths(d)
- test_modules = d.getVar('TEST_SUITES')
+ test_modules = d.getVar('TEST_SUITES').split()
tc.loadTests(test_paths, modules=test_modules)
if not getSuiteCases(tc.suites):
@@ -291,11 +291,11 @@ def testimage_main(d):
# Show results (if we have them)
if not results:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn)
- tc.logSummary(results, pn)
- tc.logDetails()
+ bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
+ results.logDetails()
+ results.logSummary(pn)
if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn)
+ bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
def get_runtime_paths(d):
"""
diff --git a/import-layers/yocto-poky/meta/classes/testsdk.bbclass b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
index 6a201aa41..6b51a33db 100644
--- a/import-layers/yocto-poky/meta/classes/testsdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
@@ -21,10 +21,11 @@ def testsdk_main(d):
import logging
from bb.utils import export_proxies
- from oeqa.core.runner import OEStreamLogger
from oeqa.sdk.context import OESDKTestContext, OESDKTestContextExecutor
from oeqa.utils import make_logger_bitbake_compatible
+ bb.event.enable_threadlock()
+
pn = d.getVar("PN")
logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
@@ -71,8 +72,8 @@ def testsdk_main(d):
component = "%s %s" % (pn, OESDKTestContextExecutor.name)
context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
- tc.logSummary(result, component, context_msg)
- tc.logDetails()
+ result.logDetails()
+ result.logSummary(component, context_msg)
if not result.wasSuccessful():
fail = True
@@ -98,6 +99,8 @@ def testsdkext_main(d):
from oeqa.utils import avoid_paths_in_environ, make_logger_bitbake_compatible, subprocesstweak
from oeqa.sdkext.context import OESDKExtTestContext, OESDKExtTestContextExecutor
+ bb.event.enable_threadlock()
+
pn = d.getVar("PN")
logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
@@ -173,8 +176,8 @@ def testsdkext_main(d):
component = "%s %s" % (pn, OESDKExtTestContextExecutor.name)
context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
- tc.logSummary(result, component, context_msg)
- tc.logDetails()
+ result.logDetails()
+ result.logSummary(component, context_msg)
if not result.wasSuccessful():
fail = True
diff --git a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
index 260ece967..9bcfe708c 100644
--- a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
+++ b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -3,7 +3,6 @@ inherit toolchain-scripts-base siteinfo kernel-arch
# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
# doesn't always match our expectations... but we default to the stock value
REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
-TARGET_CC_ARCH_append_libc-uclibc = " -muclibc"
TARGET_CC_ARCH_append_libc-musl = " -mmusl"
# default debug prefix map isn't valid in the SDK
@@ -25,6 +24,21 @@ toolchain_create_sdk_env_script () {
script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
rm -f $script
touch $script
+
+ echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script
+ echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
+ echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
+ echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
+ echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
+ echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
+ echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
+ echo ' echo "For more references see:"' >> $script
+ echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script
+ echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script
+ echo ' return 1' >> $script
+ echo 'fi' >> $script
+
echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
EXTRAPATH=""
for i in ${CANADIANEXTRAOS}; do
diff --git a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
index 10013b7d4..533e175a3 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
@@ -20,24 +20,21 @@ python () {
ubootbinaries = d.getVar('UBOOT_BINARIES')
# The "doc" varflag is special, we don't want to see it here
ubootconfigflags.pop('doc', None)
+ ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
- if not ubootmachine and not ubootconfigflags:
+ if not ubootmachine and not ubootconfig:
PN = d.getVar("PN")
FILE = os.path.basename(d.getVar("FILE"))
bb.debug(1, "To build %s, see %s for instructions on \
setting up your machine config" % (PN, FILE))
raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
- if ubootmachine and ubootconfigflags:
+ if ubootmachine and ubootconfig:
raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
if ubootconfigflags and ubootbinaries:
raise bb.parse.SkipPackage("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
- if not ubootconfigflags:
- return
-
- ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
if len(ubootconfig) > 0:
for config in ubootconfig:
for f, v in ubootconfigflags.items():
@@ -57,6 +54,4 @@ python () {
bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
break
- elif len(ubootconfig) == 0:
- raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
}
diff --git a/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
index 8447a047e..61dff14b7 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-extlinux-config.bbclass
@@ -68,7 +68,7 @@ UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
-python create_extlinux_config() {
+python do_create_extlinux_config() {
if d.getVar("UBOOT_EXTLINUX") != "1":
return
@@ -149,4 +149,4 @@ python create_extlinux_config() {
bb.fatal('Unable to open %s' % (cfile))
}
-do_install[prefuncs] += "create_extlinux_config"
+addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/import-layers/yocto-poky/meta/classes/uninative.bbclass b/import-layers/yocto-poky/meta/classes/uninative.bbclass
index 8f3448336..670efa9f0 100644
--- a/import-layers/yocto-poky/meta/classes/uninative.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uninative.bbclass
@@ -49,6 +49,12 @@ python uninative_event_fetchloader() {
localdata = bb.data.createCopy(d)
localdata.setVar('FILESPATH', "")
localdata.setVar('DL_DIR', tarballdir)
+ # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
+ # and we can't easily put 'chksum' into the url path from a url parameter with
+ # the current fetcher url handling
+ ownmirror = d.getVar('SOURCE_MIRROR_URL')
+ if ownmirror:
+ localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
bb.note("Fetching uninative binary shim from %s" % srcuri)
@@ -57,7 +63,19 @@ python uninative_event_fetchloader() {
fetcher.download()
localpath = fetcher.localpath(srcuri)
if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
+ # Follow the symlink behavior from the bitbake fetch2.
+ # This will cover the case where an existing symlink is broken
+ # as well as if there are two processes trying to create it
+ # at the same time.
+ if os.path.islink(tarballpath):
+ # Broken symbolic link
+ os.unlink(tarballpath)
+
+ # Deal with two processes trying to make symlink at once
+ try:
os.symlink(localpath, tarballpath)
+ except FileExistsError:
+ pass
cmd = d.expand("\
mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
diff --git a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
index 4bba76c3b..aa01058cf 100644
--- a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
+++ b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass
@@ -143,6 +143,10 @@ python perform_packagecopy_append () {
if not alt_link:
alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
+ if alt_link.startswith(os.path.join(d.getVar('sysconfdir', True), 'init.d')):
+ # Managing init scripts does not work (bug #10433), foremost
+ # because of a race with update-rc.d
+ bb.fatal("Using update-alternatives for managing SysV init scripts is not supported")
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
@@ -201,8 +205,8 @@ python populate_packages_updatealternatives () {
pkgdest = d.getVar('PKGD')
for pkg in (d.getVar('PACKAGES') or "").split():
# Create post install/removal scripts
- alt_setup_links = "# Begin section update-alternatives\n"
- alt_remove_links = "# Begin section update-alternatives\n"
+ alt_setup_links = ""
+ alt_remove_links = ""
for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
@@ -225,13 +229,10 @@ python populate_packages_updatealternatives () {
# Default to generate shell script.. eventually we may want to change this...
alt_target = os.path.normpath(alt_target)
- alt_setup_links += 'update-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
- alt_remove_links += 'update-alternatives --remove %s %s\n' % (alt_name, alt_target)
+ alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
+ alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
- alt_setup_links += "# End section update-alternatives\n"
- alt_remove_links += "# End section update-alternatives\n"
-
- if len(alt_setup_links.splitlines()) > 2:
+ if alt_setup_links:
# RDEPENDS setup
provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
if provider:
@@ -241,24 +242,12 @@ python populate_packages_updatealternatives () {
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
postinst = d.getVar('pkg_postinst_%s' % pkg) or '#!/bin/sh\n'
- postinst = postinst.splitlines(True)
- try:
- index = postinst.index('# Begin section update-rc.d\n')
- postinst.insert(index, alt_setup_links)
- except ValueError:
- postinst.append(alt_setup_links)
- postinst = ''.join(postinst)
+ postinst += alt_setup_links
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
- prerm = prerm.splitlines(True)
- try:
- index = prerm.index('# End section update-rc.d\n')
- prerm.insert(index + 1, alt_remove_links)
- except ValueError:
- prerm.append(alt_remove_links)
- prerm = ''.join(prerm)
+ prerm += alt_remove_links
d.setVar('pkg_prerm_%s' % pkg, prerm)
}
diff --git a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
index 9ba3dacca..e1e0e0487 100644
--- a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
+++ b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass
@@ -37,7 +37,6 @@ fi
PACKAGE_WRITE_DEPS += "update-rc.d-native"
updatercd_postinst() {
-# Begin section update-rc.d
if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
OPT="-r $D"
@@ -46,15 +45,12 @@ if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
fi
update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
fi
-# End section update-rc.d
}
updatercd_prerm() {
-# Begin section update-rc.d
if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
fi
-# End section update-rc.d
}
updatercd_postrm() {
@@ -95,8 +91,7 @@ python populate_packages_updatercd () {
return
statement = "grep -q -w '/etc/init.d/functions' %s" % path
if subprocess.call(statement, shell=True) == 0:
- mlprefix = d.getVar('MLPREFIX') or ""
- d.appendVar('RDEPENDS_' + pkg, ' %sinitscripts-functions' % (mlprefix))
+ d.appendVar('RDEPENDS_' + pkg, ' initd-functions')
def update_rcd_package(pkg):
bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
@@ -116,25 +111,13 @@ python populate_packages_updatercd () {
postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst = postinst.splitlines(True)
- try:
- index = postinst.index('# End section update-alternatives\n')
- postinst.insert(index + 1, localdata.getVar('updatercd_postinst'))
- except ValueError:
- postinst.append(localdata.getVar('updatercd_postinst'))
- postinst = ''.join(postinst)
+ postinst += localdata.getVar('updatercd_postinst')
d.setVar('pkg_postinst_%s' % pkg, postinst)
prerm = d.getVar('pkg_prerm_%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm = prerm.splitlines(True)
- try:
- index = prerm.index('# Begin section update-alternatives\n')
- prerm.insert(index, localdata.getVar('updatercd_prerm'))
- except ValueError:
- prerm.append(localdata.getVar('updatercd_prerm'))
- prerm = ''.join(prerm)
+ prerm += localdata.getVar('updatercd_prerm')
d.setVar('pkg_prerm_%s' % pkg, prerm)
postrm = d.getVar('pkg_postrm_%s' % pkg)
diff --git a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
index 6ebf7600f..589a99ff4 100644
--- a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -1,22 +1,10 @@
# In order to support a deterministic set of 'dynamic' users/groups,
# we need a function to reformat the params based on a static file
def update_useradd_static_config(d):
- import argparse
import itertools
import re
import errno
-
- class myArgumentParser( argparse.ArgumentParser ):
- def _print_message(self, message, file=None):
- bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
-
- # This should never be called...
- def exit(self, status=0, message=None):
- message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
- error(message)
-
- def error(self, message):
- bb.fatal(message)
+ import oe.useradd
def list_extend(iterable, length, obj = None):
"""Ensure that iterable is the specified length by extending with obj
@@ -50,62 +38,44 @@ def update_useradd_static_config(d):
return id_table
- def handle_missing_id(id, type, pkg):
+ def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
- if d.getVar('USERADD_ERROR_DYNAMIC') == 'error' or d.getVar('USERADD_ERROR_DYNAMIC') == '1':
- raise NotImplementedError("%s - %s: %sname %s does not have a static ID defined. Skipping it." % (d.getVar('PN'), pkg, type, id))
- elif d.getVar('USERADD_ERROR_DYNAMIC') == 'warn':
- bb.warn("%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id))
+ error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
+ msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ if files:
+ msg += " Add %s to one of these files: %s" % (id, files)
+ else:
+ msg += " %s file(s) not found in BBPATH: %s" % (var, value)
+ if error_dynamic == 'error' or error_dynamic == '1':
+ raise NotImplementedError(msg)
+ elif error_dynamic == 'warn':
+ bb.warn(msg)
+ elif error_dynamic == 'skip':
+ raise bb.parse.SkipRecipe(msg)
+
+ # Return a list of configuration files based on either the default
+ # files/group or the contents of USERADD_GID_TABLES, resp.
+ # files/passwd for USERADD_UID_TABLES.
+ # Paths are resolved via BBPATH.
+ def get_table_list(d, var, default):
+ files = []
+ bbpath = d.getVar('BBPATH', True)
+ tables = d.getVar(var, True)
+ if not tables:
+ tables = default
+ for conf_file in tables.split():
+ files.append(bb.utils.which(bbpath, conf_file))
+ return (' '.join(files), var, default)
# We parse and rewrite the useradd components
def rewrite_useradd(params, is_pkg):
- # The following comes from --help on useradd from shadow
- parser = myArgumentParser(prog='useradd')
- parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
- parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
- parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
- parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
- parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
- parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
- parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
- parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
- parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
- parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
- parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
- parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
- parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
- parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
- parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
- parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
- parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
- parser.add_argument("-r", "--system", help="create a system account", action="store_true")
- parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
- parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
- parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
- parser.add_argument("LOGIN", help="Login name of the new user")
-
- # Return a list of configuration files based on either the default
- # files/passwd or the contents of USERADD_UID_TABLES
- # paths are resolved via BBPATH
- def get_passwd_list(d):
- str = ""
- bbpath = d.getVar('BBPATH')
- passwd_tables = d.getVar('USERADD_UID_TABLES')
- if not passwd_tables:
- passwd_tables = 'files/passwd'
- for conf_file in passwd_tables.split():
- str += " %s" % bb.utils.which(bbpath, conf_file)
- return str
+ parser = oe.useradd.build_useradd_parser()
newparams = []
users = None
- for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
- param = param.strip()
- if not param:
- continue
+ for param in oe.useradd.split_commands(params):
try:
- uaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
+ uaargs = parser.parse_args(oe.useradd.split_args(param))
except:
bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
@@ -121,10 +91,12 @@ def update_useradd_static_config(d):
# all new users get the default ('*' which prevents login) until the user is
# specifically configured by the system admin.
if not users:
- users = merge_files(get_passwd_list(d), 7)
+ files, table_var, table_value = get_table_list(d, 'USERADD_UID_TABLES', 'files/passwd')
+ users = merge_files(files, 7)
+ type = 'system user' if uaargs.system else 'normal user'
if uaargs.LOGIN not in users:
- handle_missing_id(uaargs.LOGIN, 'user', pkg)
+ handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
newparams.append(param)
continue
@@ -182,7 +154,7 @@ def update_useradd_static_config(d):
# Should be an error if a specific option is set...
if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
- handle_missing_id(uaargs.LOGIN, 'user', pkg)
+ handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
# Reconstruct the args...
newparam = ['', ' --defaults'][uaargs.defaults]
@@ -217,40 +189,14 @@ def update_useradd_static_config(d):
# We parse and rewrite the groupadd components
def rewrite_groupadd(params, is_pkg):
- # The following comes from --help on groupadd from shadow
- parser = myArgumentParser(prog='groupadd')
- parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
- parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
- parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
- parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
- parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
- parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
- parser.add_argument("-r", "--system", help="create a system account", action="store_true")
- parser.add_argument("GROUP", help="Group name of the new group")
-
- # Return a list of configuration files based on either the default
- # files/group or the contents of USERADD_GID_TABLES
- # paths are resolved via BBPATH
- def get_group_list(d):
- str = ""
- bbpath = d.getVar('BBPATH')
- group_tables = d.getVar('USERADD_GID_TABLES')
- if not group_tables:
- group_tables = 'files/group'
- for conf_file in group_tables.split():
- str += " %s" % bb.utils.which(bbpath, conf_file)
- return str
+ parser = oe.useradd.build_groupadd_parser()
newparams = []
groups = None
- for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
- param = param.strip()
- if not param:
- continue
+ for param in oe.useradd.split_commands(params):
try:
# If we're processing multiple lines, we could have left over values here...
- gaargs = parser.parse_args(re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
+ gaargs = parser.parse_args(oe.useradd.split_args(param))
except:
bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
@@ -264,10 +210,12 @@ def update_useradd_static_config(d):
# Note: similar to the passwd file, the 'password' filed is ignored
# Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
if not groups:
- groups = merge_files(get_group_list(d), 4)
+ files, table_var, table_value = get_table_list(d, 'USERADD_GID_TABLES', 'files/group')
+ groups = merge_files(files, 4)
+ type = 'system group' if gaargs.system else 'normal group'
if gaargs.GROUP not in groups:
- handle_missing_id(gaargs.GROUP, 'group', pkg)
+ handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
newparams.append(param)
continue
@@ -279,7 +227,7 @@ def update_useradd_static_config(d):
gaargs.gid = field[2]
if not gaargs.gid or not gaargs.gid.isdigit():
- handle_missing_id(gaargs.GROUP, 'group', pkg)
+ handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
# Reconstruct the args...
newparam = ['', ' --force'][gaargs.force]
@@ -335,11 +283,7 @@ def update_useradd_static_config(d):
#bb.warn("Before: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
new_extrausers = []
- for cmd in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', extrausers):
- cmd = cmd.strip()
- if not cmd:
- continue
-
+ for cmd in oe.useradd.split_commands(extrausers):
if re.match('''useradd (.*)''', cmd):
useradd_param = re.match('''useradd (.*)''', cmd).group(1)
useradd_param = rewrite_useradd(useradd_param, False)
diff --git a/import-layers/yocto-poky/meta/classes/useradd.bbclass b/import-layers/yocto-poky/meta/classes/useradd.bbclass
index 0f551b50f..124becd08 100644
--- a/import-layers/yocto-poky/meta/classes/useradd.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd.bbclass
@@ -118,6 +118,7 @@ useradd_sysroot () {
# useradd/groupadd tools are unavailable. If there is no dependency, we assume we don't want to
# create users in the sysroot
if ! command -v useradd; then
+ bbwarn "command useradd not found!"
exit 0
fi
@@ -139,22 +140,19 @@ useradd_sysroot () {
EXTRA_STAGING_FIXMES += "COMPONENTS_DIR PSEUDO_LOCALSTATEDIR LOGFIFO"
python useradd_sysroot_sstate () {
+ scriptfile = None
task = d.getVar("BB_CURRENTTASK")
if task == "package_setscene":
bb.build.exec_func("useradd_sysroot", d)
elif task == "prepare_recipe_sysroot":
# Used to update this recipe's own sysroot so the user/groups are available to do_install
scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-${PN}")
- bb.utils.mkdirhier(os.path.dirname(scriptfile))
- with open(scriptfile, 'w') as script:
- script.write("#!/bin/sh\n")
- bb.data.emit_func("useradd_sysroot", script, d)
- script.write("useradd_sysroot\n")
- os.chmod(scriptfile, 0o755)
bb.build.exec_func("useradd_sysroot", d)
elif task == "populate_sysroot":
# Used when installed in dependent task sysroots
scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-${PN}")
+
+ if scriptfile:
bb.utils.mkdirhier(os.path.dirname(scriptfile))
with open(scriptfile, 'w') as script:
script.write("#!/bin/sh\n")
diff --git a/import-layers/yocto-poky/meta/classes/utils.bbclass b/import-layers/yocto-poky/meta/classes/utils.bbclass
index 96463ab32..8e07eac07 100644
--- a/import-layers/yocto-poky/meta/classes/utils.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utils.bbclass
@@ -320,7 +320,7 @@ hardlinkdir () {
def check_app_exists(app, d):
- app = d.expand(app).strip()
+ app = d.expand(app).split()[0].strip()
path = d.getVar('PATH')
return bool(bb.utils.which(path, app))
@@ -369,6 +369,7 @@ def get_multilib_datastore(variant, d):
localdata.setVar("OVERRIDES", overrides)
localdata.setVar("MLPREFIX", variant + "-")
return localdata
+get_multilib_datastore[vardepsexclude] = "OVERRIDES"
def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
"""Return a string of all ${var} in all multilib tune configuration"""
@@ -431,6 +432,7 @@ def all_multilib_tune_list(vars, d):
values[v].append(localdata.getVar(v))
values['ml'].append(item)
return values
+all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
# If the user hasn't set up their name/email, set some defaults
check_git_config() {
diff --git a/import-layers/yocto-poky/meta/classes/waf.bbclass b/import-layers/yocto-poky/meta/classes/waf.bbclass
index c4698e910..acbda278a 100644
--- a/import-layers/yocto-poky/meta/classes/waf.bbclass
+++ b/import-layers/yocto-poky/meta/classes/waf.bbclass
@@ -25,8 +25,23 @@ def get_waf_parallel_make(d):
return ""
+python waf_preconfigure() {
+ from distutils.version import StrictVersion
+ srcsubdir = d.getVar('S')
+ wafbin = os.path.join(srcsubdir, 'waf')
+ status, result = oe.utils.getstatusoutput(wafbin + " --version")
+ if status != 0:
+ bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % status)
+ return
+ version = result.split()[1]
+ if StrictVersion(version) >= StrictVersion("1.8.7"):
+ d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
+}
+
+do_configure[prefuncs] += "waf_preconfigure"
+
waf_do_configure() {
- ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
+ ${S}/waf configure --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF}
}
waf_do_compile() {
OpenPOWER on IntegriCloud