summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/meta/classes')
-rw-r--r--import-layers/yocto-poky/meta/classes/archiver.bbclass30
-rw-r--r--import-layers/yocto-poky/meta/classes/autotools.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/base.bbclass67
-rw-r--r--import-layers/yocto-poky/meta/classes/bin_package.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/blacklist.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/buildhistory.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/cmake.bbclass35
-rw-r--r--import-layers/yocto-poky/meta/classes/cross-canadian.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/cross.bbclass7
-rw-r--r--import-layers/yocto-poky/meta/classes/cve-check.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/debian.bbclass43
-rw-r--r--import-layers/yocto-poky/meta/classes/deploy.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/devtool-source.bbclass56
-rw-r--r--import-layers/yocto-poky/meta/classes/distro_features_check.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/distrodata.bbclass57
-rw-r--r--import-layers/yocto-poky/meta/classes/externalsrc.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/gettext.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/glide.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/gnomebase.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/go.bbclass133
-rw-r--r--import-layers/yocto-poky/meta/classes/goarch.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/godep.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/grub-efi-cfg.bbclass114
-rw-r--r--import-layers/yocto-poky/meta/classes/grub-efi.bbclass124
-rw-r--r--import-layers/yocto-poky/meta/classes/gtk-doc.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/icecc.bbclass136
-rw-r--r--import-layers/yocto-poky/meta/classes/image-combined-dbg.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/image-live.bbclass17
-rw-r--r--import-layers/yocto-poky/meta/classes/image-prelink.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/image.bbclass26
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types.bbclass44
-rw-r--r--import-layers/yocto-poky/meta/classes/image_types_wic.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/insane.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-arch.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass9
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/kernel.bbclass132
-rw-r--r--import-layers/yocto-poky/meta/classes/libc-package.bbclass20
-rw-r--r--import-layers/yocto-poky/meta/classes/license.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/live-vm-common.bbclass11
-rw-r--r--import-layers/yocto-poky/meta/classes/logging.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/meson.bbclass130
-rw-r--r--import-layers/yocto-poky/meta/classes/mirrors.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/module-base.bbclass14
-rw-r--r--import-layers/yocto-poky/meta/classes/module.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/multilib_global.bbclass28
-rw-r--r--import-layers/yocto-poky/meta/classes/native.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/npm.bbclass21
-rw-r--r--import-layers/yocto-poky/meta/classes/package.bbclass121
-rw-r--r--import-layers/yocto-poky/meta/classes/package_deb.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/package_ipk.bbclass4
-rw-r--r--import-layers/yocto-poky/meta/classes/package_rpm.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/packagegroup.bbclass1
-rw-r--r--import-layers/yocto-poky/meta/classes/patch.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass12
-rw-r--r--import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass24
-rw-r--r--import-layers/yocto-poky/meta/classes/ptest-perl.bbclass30
-rw-r--r--import-layers/yocto-poky/meta/classes/reproducible_build.bbclass150
-rw-r--r--import-layers/yocto-poky/meta/classes/reproducible_build_simple.bbclass10
-rw-r--r--import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass25
-rw-r--r--import-layers/yocto-poky/meta/classes/sanity.bbclass27
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/sign_rpm.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/siteinfo.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/sstate.bbclass52
-rw-r--r--import-layers/yocto-poky/meta/classes/staging.bbclass42
-rw-r--r--import-layers/yocto-poky/meta/classes/syslinux.bbclass3
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd-boot-cfg.bbclass68
-rw-r--r--import-layers/yocto-poky/meta/classes/systemd-boot.bbclass72
-rw-r--r--import-layers/yocto-poky/meta/classes/testimage.bbclass16
-rw-r--r--import-layers/yocto-poky/meta/classes/testsdk.bbclass5
-rw-r--r--import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass18
-rw-r--r--import-layers/yocto-poky/meta/classes/uboot-config.bbclass8
-rw-r--r--import-layers/yocto-poky/meta/classes/uninative.bbclass6
-rw-r--r--import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass2
-rw-r--r--import-layers/yocto-poky/meta/classes/utility-tasks.bbclass13
-rw-r--r--import-layers/yocto-poky/meta/classes/utils.bbclass48
-rw-r--r--import-layers/yocto-poky/meta/classes/waf.bbclass46
82 files changed, 1477 insertions, 831 deletions
diff --git a/import-layers/yocto-poky/meta/classes/archiver.bbclass b/import-layers/yocto-poky/meta/classes/archiver.bbclass
index ec80ad47a..31c9b7eb4 100644
--- a/import-layers/yocto-poky/meta/classes/archiver.bbclass
+++ b/import-layers/yocto-poky/meta/classes/archiver.bbclass
@@ -46,7 +46,6 @@ do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
do_deploy_archives[dirs] = "${WORKDIR}"
-do_deploy_all_archives[dirs] = "${WORKDIR}"
# This is a convenience for the shell script to use it
@@ -114,10 +113,9 @@ python () {
if ar_recipe == "1":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
- # Output the srpm package
- ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm')
- if ar_srpm == "1":
- if d.getVar('PACKAGES') != '' and d.getVar('IMAGE_PKGTYPE') == 'rpm':
+ # Output the SRPM package
+ if d.getVarFlag('ARCHIVER_MODE', 'srpm') == "1" and d.getVar('PACKAGES'):
+ if "package_rpm" in d.getVar('PACKAGE_CLASSES'):
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
if ar_dumpdata == "1":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
@@ -129,6 +127,8 @@ python () {
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
+ else:
+ bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
}
# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
@@ -277,6 +277,11 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
if (d.getVar('SRC_URI') == ""):
return
+ # For the kernel archive, srcdir may just be a link to the
+ # work-shared location. Use os.path.realpath to make sure
+ # that we archive the actual directory and not just the link.
+ srcdir = os.path.realpath(srcdir)
+
bb.utils.mkdirhier(ar_outdir)
if suffix:
filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
@@ -319,6 +324,10 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
finally:
os.chdir(cwd)
+def is_work_shared(d):
+ pn = d.getVar('PN')
+ return bb.data.inherits_class('kernel', d) or pn.startswith('gcc-source')
+
# Run do_unpack and do_patch
python do_unpack_and_patch() {
if d.getVarFlag('ARCHIVER_MODE', 'src') not in \
@@ -331,7 +340,7 @@ python do_unpack_and_patch() {
pn = d.getVar('PN')
# The kernel class functions require it to be on work-shared, so we dont change WORKDIR
- if not (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source')):
+ if not is_work_shared(d):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
# Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
@@ -351,7 +360,7 @@ python do_unpack_and_patch() {
oe.path.copytree(src, src_orig)
# Make sure gcc and kernel sources are patched only once
- if not (d.getVar('SRC_URI') == "" or (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source'))):
+ if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
# Create the patches
@@ -455,13 +464,6 @@ addtask do_dumpdata
addtask do_ar_recipe
addtask do_deploy_archives before do_build
-addtask do_deploy_all_archives after do_deploy_archives
-do_deploy_all_archives[recrdeptask] = "do_deploy_archives"
-do_deploy_all_archives[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_deploy_all_archives() {
- :
-}
-
python () {
# Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
# sstatesig.py:sstate_rundepfilter has special support that excludes this dependency
diff --git a/import-layers/yocto-poky/meta/classes/autotools.bbclass b/import-layers/yocto-poky/meta/classes/autotools.bbclass
index efa4098d6..cc857acc3 100644
--- a/import-layers/yocto-poky/meta/classes/autotools.bbclass
+++ b/import-layers/yocto-poky/meta/classes/autotools.bbclass
@@ -200,7 +200,7 @@ autotools_do_configure() {
bbnote Executing glib-gettextize --force --copy
echo "no" | glib-gettextize --force --copy
fi
- elif grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
+ elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
# We'd call gettextize here if it wasn't so broken...
cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
if [ -d ${S}/po/ ]; then
diff --git a/import-layers/yocto-poky/meta/classes/base.bbclass b/import-layers/yocto-poky/meta/classes/base.bbclass
index bd0d6e3ca..bb1f4b753 100644
--- a/import-layers/yocto-poky/meta/classes/base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/base.bbclass
@@ -152,12 +152,8 @@ python base_do_fetch() {
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
-python () {
- if d.getVar('S') != d.getVar('WORKDIR'):
- d.setVarFlag('do_unpack', 'cleandirs', '${S}')
- else:
- d.setVarFlag('do_unpack', 'cleandirs', os.path.join('${S}', 'patches'))
-}
+do_unpack[cleandirs] = "${@d.getVar('S') if d.getVar('S') != d.getVar('WORKDIR') else os.path.join('${S}', 'patches')}"
+
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
@@ -170,12 +166,6 @@ python base_do_unpack() {
bb.fatal(str(e))
}
-def pkgarch_mapping(d):
- # Compatibility mappings of TUNE_PKGARCH (opt in)
- if d.getVar("PKGARCHCOMPAT_ARMV7A"):
- if d.getVar("TUNE_PKGARCH") == "armv7a-vfp-neon":
- d.setVar("TUNE_PKGARCH", "armv7a")
-
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS") or "").split()
layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
@@ -225,12 +215,9 @@ python base_eventhandler() {
import bb.runqueue
if isinstance(e, bb.event.ConfigParsed):
- if not e.data.getVar("NATIVELSBSTRING", False):
- e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
- e.data.setVar('BB_VERSION', bb.__version__)
- pkgarch_mapping(e.data)
- oe.utils.features_backfill("DISTRO_FEATURES", e.data)
- oe.utils.features_backfill("MACHINE_FEATURES", e.data)
+ if not d.getVar("NATIVELSBSTRING", False):
+ d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
+ d.setVar('BB_VERSION', bb.__version__)
# Works with the line in layer.conf which changes PATH to point here
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
@@ -246,7 +233,7 @@ python base_eventhandler() {
e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
if isinstance(e, bb.event.BuildStarted):
- localdata = bb.data.createCopy(e.data)
+ localdata = bb.data.createCopy(d)
statuslines = []
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
g = globals()
@@ -257,7 +244,7 @@ python base_eventhandler() {
if flines:
statuslines.extend(flines)
- statusheader = e.data.getVar('BUILDCFG_HEADER')
+ statusheader = d.getVar('BUILDCFG_HEADER')
if statusheader:
bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
@@ -265,23 +252,23 @@ python base_eventhandler() {
# target ones and we'd see dulpicate key names overwriting each other
# for various PREFERRED_PROVIDERS
if isinstance(e, bb.event.RecipePreFinalise):
- if e.data.getVar("TARGET_PREFIX") == e.data.getVar("SDK_PREFIX"):
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
- e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
+ if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
+ d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
if isinstance(e, bb.runqueue.sceneQueueComplete):
- completions = e.data.expand("${STAGING_DIR}/sstatecompletions")
+ completions = d.expand("${STAGING_DIR}/sstatecompletions")
if os.path.exists(completions):
cmds = set()
with open(completions, "r") as f:
cmds = set(f)
- e.data.setVar("completion_function", "\n".join(cmds))
- e.data.setVarFlag("completion_function", "func", "1")
+ d.setVar("completion_function", "\n".join(cmds))
+ d.setVarFlag("completion_function", "func", "1")
bb.debug(1, "Executing SceneQueue Completion commands: %s" % "\n".join(cmds))
- bb.build.exec_func("completion_function", e.data)
+ bb.build.exec_func("completion_function", d)
os.remove(completions)
if isinstance(e, bb.event.RecipeParsed):
@@ -300,7 +287,7 @@ python base_eventhandler() {
if p.startswith("virtual/") and p not in multiwhitelist:
profprov = d.getVar("PREFERRED_PROVIDER_" + p)
if profprov and pn != profprov:
- raise bb.parse.SkipPackage("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
+ raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
}
CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
@@ -389,6 +376,10 @@ def set_packagetriplet(d):
python () {
import string, re
+ # Handle backfilling
+ oe.utils.features_backfill("DISTRO_FEATURES", d)
+ oe.utils.features_backfill("MACHINE_FEATURES", d)
+
# Handle PACKAGECONFIG
#
# These take the form:
@@ -463,7 +454,7 @@ python () {
pn = d.getVar('PN')
license = d.getVar('LICENSE')
- if license == "INVALID":
+ if license == "INVALID" and pn != "defaultpkgname":
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
if bb.data.inherits_class('license', d):
@@ -472,7 +463,7 @@ python () {
if unmatched_license_flag:
bb.debug(1, "Skipping %s because it has a restricted license not"
" whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
- raise bb.parse.SkipPackage("because it has a restricted license not"
+ raise bb.parse.SkipRecipe("because it has a restricted license not"
" whitelisted in LICENSE_FLAGS_WHITELIST")
# If we're building a target package we need to use fakeroot (pseudo)
@@ -500,7 +491,7 @@ python () {
if re.match(need_machine, m):
break
else:
- raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
+ raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
@@ -509,7 +500,7 @@ python () {
import re
this_host = d.getVar('HOST_SYS')
if not re.match(need_host, this_host):
- raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
+ raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
@@ -562,7 +553,7 @@ python () {
bb.debug(1, "INCLUDING the package " + pkg)
elif all_skipped or incompatible_license(d, bad_licenses):
bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
- raise bb.parse.SkipPackage("it has an incompatible license: %s" % license)
+ raise bb.parse.SkipRecipe("it has an incompatible license: %s" % license)
elif pn in whitelist:
if pn in incompatwl:
bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
@@ -635,6 +626,10 @@ python () {
elif path.endswith('.rpm'):
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+ # *.deb should DEPEND on xz-native for unpacking
+ elif path.endswith('.deb'):
+ d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+
if needsrcrev:
d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
diff --git a/import-layers/yocto-poky/meta/classes/bin_package.bbclass b/import-layers/yocto-poky/meta/classes/bin_package.bbclass
index a52b75be5..cbc9b1fa1 100644
--- a/import-layers/yocto-poky/meta/classes/bin_package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/bin_package.bbclass
@@ -26,7 +26,10 @@ do_compile[noexec] = "1"
bin_package_do_install () {
# Do it carefully
[ -d "${S}" ] || exit 1
- cd ${S} || exit 1
+ if [ -z "$(ls -A ${S})" ]; then
+ bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
+ fi
+ cd ${S}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
| tar --no-same-owner -xpf - -C ${D}
}
diff --git a/import-layers/yocto-poky/meta/classes/blacklist.bbclass b/import-layers/yocto-poky/meta/classes/blacklist.bbclass
index e58564c34..dc794228f 100644
--- a/import-layers/yocto-poky/meta/classes/blacklist.bbclass
+++ b/import-layers/yocto-poky/meta/classes/blacklist.bbclass
@@ -16,5 +16,5 @@ python () {
blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
if blacklist:
- raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
+ raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
}
diff --git a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
index 7a5534edd..63980f72a 100644
--- a/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
+++ b/import-layers/yocto-poky/meta/classes/buildhistory.bbclass
@@ -37,7 +37,7 @@ BUILDHISTORY_OLD_DIR_PACKAGE = "${BUILDHISTORY_OLD_DIR}/packages/${MULTIMACH_TAR
BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}${SDK_EXT}/${IMAGE_BASENAME}"
BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf conf/locked-sigs.inc conf/devtool.conf"
-BUILDHISTORY_COMMIT ?= "0"
+BUILDHISTORY_COMMIT ?= "1"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
diff --git a/import-layers/yocto-poky/meta/classes/cmake.bbclass b/import-layers/yocto-poky/meta/classes/cmake.bbclass
index ac2c1519b..fcfd5dda4 100644
--- a/import-layers/yocto-poky/meta/classes/cmake.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cmake.bbclass
@@ -7,6 +7,23 @@ B = "${WORKDIR}/build"
# We need to unset CCACHE otherwise cmake gets too confused
CCACHE = ""
+# What CMake generator to use.
+# The supported options are "Unix Makefiles" or "Ninja".
+OECMAKE_GENERATOR ?= "Ninja"
+
+python() {
+ generator = d.getVar("OECMAKE_GENERATOR")
+ if generator == "Unix Makefiles":
+ args = "-G 'Unix Makefiles' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
+ d.setVar("OECMAKE_GENERATOR_ARGS", args)
+ d.setVarFlag("do_compile", "progress", "percent")
+ elif generator == "Ninja":
+ d.appendVar("DEPENDS", " ninja-native")
+ d.setVar("OECMAKE_GENERATOR_ARGS", "-G Ninja -DCMAKE_MAKE_PROGRAM=ninja")
+ d.setVarFlag("do_compile", "progress", "outof:^\[(\d+)/(\d+)\]\s+")
+ else:
+ bb.fatal("Unknown CMake Generator %s" % generator)
+}
# C/C++ Compiler (without cpu arch/tune arguments)
OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
@@ -34,6 +51,11 @@ EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
EXTRA_OECMAKE_BUILD_prepend_task-compile = "${PARALLEL_MAKE} "
EXTRA_OECMAKE_BUILD_prepend_task-install = "${PARALLEL_MAKEINST} "
+OECMAKE_TARGET_COMPILE ?= "all"
+OECMAKE_TARGET_INSTALL ?= "install"
+
+FILES_${PN}-dev += "${libdir}/cmake ${datadir}/cmake"
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
@@ -116,6 +138,7 @@ cmake_do_configure() {
fi
cmake \
+ ${OECMAKE_GENERATOR_ARGS} \
$oecmake_sitefile \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
@@ -136,15 +159,17 @@ cmake_do_configure() {
-Wno-dev
}
-do_compile[progress] = "percent"
+cmake_runcmake_build() {
+ bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+ eval ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+}
+
cmake_do_compile() {
- bbnote VERBOSE=1 cmake --build '${B}' -- ${EXTRA_OECMAKE_BUILD}
- VERBOSE=1 cmake --build '${B}' -- ${EXTRA_OECMAKE_BUILD}
+ cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
}
cmake_do_install() {
- bbnote DESTDIR='${D}' cmake --build '${B}' --target install -- ${EXTRA_OECMAKE_BUILD}
- DESTDIR='${D}' cmake --build '${B}' --target install -- ${EXTRA_OECMAKE_BUILD}
+ DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
}
EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
index 1928455cf..ee8aa6142 100644
--- a/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -38,7 +38,7 @@ python () {
extralibcs = [""]
if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
- for variant in ["", "spe", "x32", "eabi", "n32", "ilp32"]:
+ for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
for libc in extralibcs:
entry = "linux"
if variant and libc:
@@ -123,8 +123,6 @@ LDFLAGS = "${BUILDSDK_LDFLAGS} \
-Wl,-rpath-link,${STAGING_LIBDIR}/.. \
-Wl,-rpath,${libdir}/.. "
-DEPENDS_GETTEXT = "gettext-native nativesdk-gettext"
-
#
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
diff --git a/import-layers/yocto-poky/meta/classes/cross.bbclass b/import-layers/yocto-poky/meta/classes/cross.bbclass
index d217717e6..4e85cab2d 100644
--- a/import-layers/yocto-poky/meta/classes/cross.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cross.bbclass
@@ -41,8 +41,6 @@ LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
TOOLCHAIN_OPTIONS = ""
-DEPENDS_GETTEXT = "gettext-native"
-
# This class encodes staging paths into its scripts data so can only be
# reused if we manipulate the paths.
SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
@@ -92,3 +90,8 @@ export STRIP = "${BUILD_STRIP}"
export NM = "${BUILD_NM}"
inherit nopackages
+
+python do_addto_recipe_sysroot () {
+ bb.build.exec_func("extend_recipe_sysroot", d)
+}
+addtask addto_recipe_sysroot after do_populate_sysroot
diff --git a/import-layers/yocto-poky/meta/classes/cve-check.bbclass b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
index bc2f03f7d..537659df1 100644
--- a/import-layers/yocto-poky/meta/classes/cve-check.bbclass
+++ b/import-layers/yocto-poky/meta/classes/cve-check.bbclass
@@ -23,12 +23,12 @@
# The product name that the CVE database uses. Defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvd.db"
-CVE_CHECK_LOCAL_DIR ?= "${WORKDIR}/cve"
-CVE_CHECK_LOCAL_FILE ?= "${CVE_CHECK_LOCAL_DIR}/cve.log"
+CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
@@ -169,7 +169,10 @@ def check_cves(d, patched_cves):
cves_patched = []
cves_unpatched = []
bpn = d.getVar("CVE_PRODUCT")
- pv = d.getVar("PV").split("+git")[0]
+ # If this has been unset then we're not scanning for CVEs here (for example, image recipes)
+ if not bpn:
+ return ([], [])
+ pv = d.getVar("CVE_VERSION").split("+git")[0]
cves = " ".join(patched_cves)
cve_db_dir = d.getVar("CVE_CHECK_DB_DIR")
cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST"))
@@ -181,9 +184,6 @@ def check_cves(d, patched_cves):
bb.note("Recipe has been whitelisted, skipping check")
return ([], [])
- # It is needed to export the proxies to download the database using HTTP
- bb.utils.export_proxies(d)
-
try:
# Write the faux CSV file to be used with cve-check-tool
fd, faux = tempfile.mkstemp(prefix="cve-faux-")
@@ -251,11 +251,11 @@ def cve_write_data(d, patched, unpatched, cve_data):
CVE manifest if enabled.
"""
- cve_file = d.getVar("CVE_CHECK_LOCAL_FILE")
+ cve_file = d.getVar("CVE_CHECK_LOG")
nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
write_string = ""
unpatched_cves = []
- bb.utils.mkdirhier(d.getVar("CVE_CHECK_LOCAL_DIR"))
+ bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
diff --git a/import-layers/yocto-poky/meta/classes/debian.bbclass b/import-layers/yocto-poky/meta/classes/debian.bbclass
index 8124558b8..989ea8f8d 100644
--- a/import-layers/yocto-poky/meta/classes/debian.bbclass
+++ b/import-layers/yocto-poky/meta/classes/debian.bbclass
@@ -25,12 +25,10 @@ python () {
}
python debian_package_name_hook () {
- import glob, copy, stat, errno, re
+ import glob, copy, stat, errno, re, pathlib, subprocess
- pkgdest = d.getVar('PKGDEST')
+ pkgdest = d.getVar("PKGDEST")
packages = d.getVar('PACKAGES')
- bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir")) + "$")
- lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir")) + "$")
so_re = re.compile("lib.*\.so")
def socrunch(s):
@@ -60,25 +58,32 @@ python debian_package_name_hook () {
d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
def auto_libname(packages, orig_pkg):
+ p = lambda var: pathlib.PurePath(d.getVar(var))
+ libdirs = (p("base_libdir"), p("libdir"))
+ bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
+
sonames = []
has_bins = 0
has_libs = 0
- for file in pkgfiles[orig_pkg]:
- root = os.path.dirname(file)
- if bin_re.match(root):
+ for f in pkgfiles[orig_pkg]:
+ # This is .../packages-split/orig_pkg/
+ pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
+ # Strip pkgpath off the full path to a file in the package, re-root
+ # so it is absolute, and then get the parent directory of the file.
+ path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
+ if path in bindirs:
has_bins = 1
- if lib_re.match(root):
+ if path in libdirs:
has_libs = 1
- if so_re.match(os.path.basename(file)):
- cmd = (d.getVar('TARGET_PREFIX') or "") + "objdump -p " + file + " 2>/dev/null"
- fd = os.popen(cmd)
- lines = fd.readlines()
- fd.close()
- for l in lines:
- m = re.match("\s+SONAME\s+([^\s]*)", l)
- if m and not m.group(1) in sonames:
- sonames.append(m.group(1))
-
+ if so_re.match(os.path.basename(f)):
+ try:
+ cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
+ output = subprocess.check_output(cmd).decode("utf-8")
+ for m in re.finditer("\s+SONAME\s+([^\s]+)", output):
+ if m.group(1) not in sonames:
+ sonames.append(m.group(1))
+ except subprocess.CalledProcessError:
+ pass
bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
soname = None
if len(sonames) == 1:
@@ -120,6 +125,7 @@ python debian_package_name_hook () {
if not newpkg.find(mlpre) == 0:
newpkg = mlpre + newpkg
if newpkg != pkg:
+ bb.note("debian: renaming %s to %s" % (pkg, newpkg))
d.setVar('PKG_' + pkg, newpkg)
add_rprovides(pkg, d)
else:
@@ -138,4 +144,3 @@ python debian_package_name_hook () {
EXPORT_FUNCTIONS package_name_hook
DEBIAN_NAMES = "1"
-
diff --git a/import-layers/yocto-poky/meta/classes/deploy.bbclass b/import-layers/yocto-poky/meta/classes/deploy.bbclass
index 8ad07da01..6d5290878 100644
--- a/import-layers/yocto-poky/meta/classes/deploy.bbclass
+++ b/import-layers/yocto-poky/meta/classes/deploy.bbclass
@@ -8,4 +8,4 @@ python do_deploy_setscene () {
}
addtask do_deploy_setscene
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
-do_deploy[stamp-extra-info] = "${MACHINE}"
+do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/import-layers/yocto-poky/meta/classes/devtool-source.bbclass b/import-layers/yocto-poky/meta/classes/devtool-source.bbclass
index 8f5bc86b2..56882a41d 100644
--- a/import-layers/yocto-poky/meta/classes/devtool-source.bbclass
+++ b/import-layers/yocto-poky/meta/classes/devtool-source.bbclass
@@ -152,9 +152,65 @@ python devtool_pre_patch() {
}
python devtool_post_patch() {
+ import shutil
tempdir = d.getVar('DEVTOOL_TEMPDIR')
with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
srcsubdir = f.read()
+ with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
+ initial_rev = f.read()
+
+ def rm_patches():
+ patches_dir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patches_dir):
+ shutil.rmtree(patches_dir)
+ # Restore any "patches" directory that was actually part of the source tree
+ try:
+ bb.process.run('git checkout -- patches', cwd=srcsubdir)
+ except bb.process.ExecutionError:
+ pass
+
+ extra_overrides = d.getVar('DEVTOOL_EXTRA_OVERRIDES')
+ if extra_overrides:
+ extra_override_list = extra_overrides.split(':')
+ devbranch = d.getVar('DEVTOOL_DEVBRANCH')
+ default_overrides = d.getVar('OVERRIDES').split(':')
+ no_overrides = []
+ # First, we may have some overrides that are referred to in the recipe set in
+ # our configuration, so we need to make a branch that excludes those
+ for override in default_overrides:
+ if override not in extra_override_list:
+ no_overrides.append(override)
+ if default_overrides != no_overrides:
+ # Some overrides are active in the current configuration, so
+ # we need to create a branch where none of the overrides are active
+ bb.process.run('git checkout %s -b devtool-no-overrides' % initial_rev, cwd=srcsubdir)
+ # Run do_patch function with the override applied
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ bb.build.exec_func('do_patch', localdata)
+ rm_patches()
+ # Now we need to reconcile the dev branch with the no-overrides one
+ # (otherwise we'd likely be left with identical commits that have different hashes)
+ bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
+ bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
+ else:
+ bb.process.run('git checkout %s -b devtool-no-overrides' % devbranch, cwd=srcsubdir)
+
+ for override in extra_override_list:
+ localdata = bb.data.createCopy(d)
+ if override in default_overrides:
+ bb.process.run('git branch devtool-override-%s %s' % (override, devbranch), cwd=srcsubdir)
+ else:
+ # Reset back to the initial commit on a new branch
+ bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
+ # Run do_patch function with the override applied
+ localdata.appendVar('OVERRIDES', ':%s' % override)
+ bb.build.exec_func('do_patch', localdata)
+ rm_patches()
+ # Now we need to reconcile the new branch with the no-overrides one
+ # (otherwise we'd likely be left with identical commits that have different hashes)
+ bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
+ bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
}
diff --git a/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass b/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
index e74d3c04b..9b78b03ef 100644
--- a/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distro_features_check.bbclass
@@ -17,7 +17,7 @@ python () {
if any_of_distro_features:
any_of_distro_features = any_of_distro_features.split()
if set.isdisjoint(set(any_of_distro_features),set(distro_features)):
- raise bb.parse.SkipPackage("one of '%s' needs to be in DISTRO_FEATURES" % any_of_distro_features)
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in DISTRO_FEATURES" % any_of_distro_features)
required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES')
if required_distro_features:
@@ -26,12 +26,12 @@ python () {
if f in distro_features:
continue
else:
- raise bb.parse.SkipPackage("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
+ raise bb.parse.SkipRecipe("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES')
if conflict_distro_features:
conflict_distro_features = conflict_distro_features.split()
for f in conflict_distro_features:
if f in distro_features:
- raise bb.parse.SkipPackage("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f)
+ raise bb.parse.SkipRecipe("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f)
}
diff --git a/import-layers/yocto-poky/meta/classes/distrodata.bbclass b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
index c85f7b347..59ee8cea6 100644
--- a/import-layers/yocto-poky/meta/classes/distrodata.bbclass
+++ b/import-layers/yocto-poky/meta/classes/distrodata.bbclass
@@ -188,14 +188,6 @@ python do_distrodata() {
}
do_distrodata[vardepsexclude] = "DATETIME"
-addtask distrodataall after do_distrodata
-do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
-do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_distrodataall[nostamp] = "1"
-do_distrodataall() {
- :
-}
-
addhandler checkpkg_eventhandler
checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
python checkpkg_eventhandler() {
@@ -272,24 +264,15 @@ python do_checkpkg() {
if upstream_check_unreliable == "1":
return "N/A", "CHECK_IS_UNRELIABLE"
- try:
- uv = oe.recipeutils.get_recipe_upstream_version(localdata)
- pupver = uv['version'] if uv['version'] else "N/A"
- except Exception as e:
- pupver = "N/A"
+ uv = oe.recipeutils.get_recipe_upstream_version(localdata)
+ pupver = uv['version'] if uv['version'] else "N/A"
+ pversion = uv['current_version']
+ revision = uv['revision'] if uv['revision'] else "N/A"
if pupver == "N/A":
pstatus = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
else:
- src_uri = (localdata.getVar('SRC_URI') or '').split()
- if src_uri:
- uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
- else:
- uri_type = "none"
- pv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pversion, uri_type)
- upv, _, _ = oe.recipeutils.get_recipe_pv_without_srcpv(pupver, uri_type)
-
- cmp = vercmp_string(pv, upv)
+ cmp = vercmp_string(pversion, pupver)
if cmp == -1:
pstatus = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
elif cmp == 0:
@@ -297,7 +280,7 @@ python do_checkpkg() {
else:
pstatus = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
- return pupver, pstatus
+ return pversion, pupver, pstatus, revision
"""initialize log files."""
@@ -334,7 +317,6 @@ python do_checkpkg() {
pdesc = localdata.getVar('DESCRIPTION')
pgrp = localdata.getVar('SECTION')
- pversion = localdata.getVar('PV')
plicense = localdata.getVar('LICENSE')
psection = localdata.getVar('SECTION')
phome = localdata.getVar('HOMEPAGE')
@@ -345,7 +327,7 @@ python do_checkpkg() {
psrcuri = localdata.getVar('SRC_URI')
maintainer = localdata.getVar('RECIPE_MAINTAINER')
- pupver, pstatus = get_upstream_version_and_status()
+ pversion, pupver, pstatus, prevision = get_upstream_version_and_status()
if psrcuri:
psrcuri = psrcuri.split()[0]
@@ -358,20 +340,12 @@ python do_checkpkg() {
with open(logfile, "a") as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow([pname, pversion, pupver, plicense, psection, phome,
- prelease, pdepends, pbugtracker, ppe, pdesc, pstatus, pupver,
+ prelease, pdepends, pbugtracker, ppe, pdesc, pstatus, prevision,
psrcuri, maintainer, no_upgr_reason])
f.close()
bb.utils.unlockfile(lf)
}
-addtask checkpkgall after do_checkpkg
-do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg"
-do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_checkpkgall[nostamp] = "1"
-do_checkpkgall() {
- :
-}
-
addhandler distro_check_eventhandler
distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
python distro_check_eventhandler() {
@@ -407,13 +381,6 @@ python do_distro_check() {
dc.save_distro_check_result(result, datetime, result_file, d)
}
-addtask distro_checkall after do_distro_check
-do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check"
-do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_distro_checkall[nostamp] = "1"
-do_distro_checkall() {
- :
-}
#
#Check Missing License Text.
#Use this task to generate the missing license text data for pkg-report system,
@@ -458,11 +425,3 @@ python do_checklicense() {
bb.utils.unlockfile(lf)
return
}
-
-addtask checklicenseall after do_checklicense
-do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense"
-do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_checklicenseall[nostamp] = "1"
-do_checklicenseall() {
- :
-}
diff --git a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
index 65dd13ddc..c9f5cf767 100644
--- a/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/externalsrc.bbclass
@@ -53,6 +53,9 @@ python () {
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
+ import oe.recipeutils
+ import oe.path
+
d.setVar('S', externalsrc)
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
@@ -85,10 +88,10 @@ python () {
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
# We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
- cleandirs = (d.getVarFlag(task, 'cleandirs', False) or '').split()
+ cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(task, 'cleandirs', False) or '')
setvalue = False
for cleandir in cleandirs[:]:
- if d.expand(cleandir) == externalsrc:
+ if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
cleandirs.remove(cleandir)
setvalue = True
if setvalue:
@@ -173,7 +176,9 @@ do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
externalsrc_do_buildclean() {
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
- oe_runmake clean || die "make failed"
+ if [ "${CLEANBROKEN}" != "1" ]; then
+ oe_runmake clean || die "make failed"
+ fi
else
bbnote "nothing to do - no makefile found"
fi
@@ -189,7 +194,7 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
- subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir']).decode("utf-8").rstrip())
+ subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
except subprocess.CalledProcessError:
pass
diff --git a/import-layers/yocto-poky/meta/classes/gettext.bbclass b/import-layers/yocto-poky/meta/classes/gettext.bbclass
index da68e6324..be2ef3b31 100644
--- a/import-layers/yocto-poky/meta/classes/gettext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gettext.bbclass
@@ -3,7 +3,7 @@ def gettext_dependencies(d):
return ""
if d.getVar('USE_NLS') == 'no':
return "gettext-minimal-native"
- return d.getVar('DEPENDS_GETTEXT', False)
+ return "gettext-native"
def gettext_oeconf(d):
if d.getVar('USE_NLS') == 'no':
@@ -13,8 +13,6 @@ def gettext_oeconf(d):
return '--disable-nls'
return "--enable-nls"
-DEPENDS_GETTEXT ??= "gettext-native"
-
BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
diff --git a/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass b/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
index a8190b7b8..e429bd319 100644
--- a/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gio-module-cache.bbclass
@@ -9,6 +9,7 @@ if [ "x$D" != "x" ]; then
mlprefix=${MLPREFIX} \
binprefix=${MLPREFIX} \
libdir=${libdir} \
+ libexecdir=${libexecdir} \
base_libdir=${base_libdir} \
bindir=${bindir}
else
diff --git a/import-layers/yocto-poky/meta/classes/glide.bbclass b/import-layers/yocto-poky/meta/classes/glide.bbclass
new file mode 100644
index 000000000..db421745b
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/glide.bbclass
@@ -0,0 +1,9 @@
+# Handle Glide Vendor Package Management use
+#
+# Copyright 2018 (C) O.S. Systems Software LTDA.
+
+DEPENDS_append = " glide-native"
+
+do_compile_prepend() {
+ ( cd ${B}/src/${GO_IMPORT} && glide install )
+}
diff --git a/import-layers/yocto-poky/meta/classes/gnomebase.bbclass b/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
index 4ccc8e078..efcb6caae 100644
--- a/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gnomebase.bbclass
@@ -20,7 +20,8 @@ FILES_${PN} += "${datadir}/application-registry \
FILES_${PN}-doc += "${datadir}/devhelp"
-inherit autotools pkgconfig
+GNOMEBASEBUILDCLASS ??= "autotools"
+inherit ${GNOMEBASEBUILDCLASS} pkgconfig
do_install_append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
diff --git a/import-layers/yocto-poky/meta/classes/go.bbclass b/import-layers/yocto-poky/meta/classes/go.bbclass
index 09b01a84c..d33d83ea7 100644
--- a/import-layers/yocto-poky/meta/classes/go.bbclass
+++ b/import-layers/yocto-poky/meta/classes/go.bbclass
@@ -1,23 +1,6 @@
inherit goarch ptest
-def get_go_parallel_make(d):
- pm = (d.getVar('PARALLEL_MAKE') or '').split()
- # look for '-j' and throw other options (e.g. '-l') away
- # because they might have a different meaning in golang
- while pm:
- opt = pm.pop(0)
- if opt == '-j':
- v = pm.pop(0)
- elif opt.startswith('-j'):
- v = opt[2:].strip()
- else:
- continue
-
- return '-p %d' % int(v)
-
- return ""
-
-GO_PARALLEL_BUILD ?= "${@get_go_parallel_make(d)}"
+GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
@@ -41,8 +24,9 @@ GO_LINKMODE ?= ""
GO_LINKMODE_class-nativesdk = "--linkmode=external"
GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS}"
+export GOPATH_OMIT_IN_ACTIONID ?= "1"
export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
-export GOPTESTFLAGS ?= "-test.v"
+export GOPTESTFLAGS ?= ""
GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
export GO = "${HOST_PREFIX}go"
@@ -50,9 +34,6 @@ GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}
GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
export GOTOOLDIR
-SECURITY_CFLAGS = "${SECURITY_NOPIE_CFLAGS}"
-SECURITY_LDFLAGS = ""
-
export CGO_ENABLED ?= "1"
export CGO_CFLAGS ?= "${CFLAGS}"
export CGO_CPPFLAGS ?= "${CPPFLAGS}"
@@ -64,8 +45,9 @@ GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
B = "${WORKDIR}/build"
export GOPATH = "${B}"
-GO_TMPDIR ?= "${WORKDIR}/go-tmp"
-GO_TMPDIR[vardepvalue] = ""
+export GOCACHE = "off"
+export GOTMPDIR ?= "${WORKDIR}/go-tmp"
+GOTMPDIR[vardepvalue] = ""
python go_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
@@ -91,7 +73,7 @@ go_list_packages() {
}
go_list_package_tests() {
- ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
+ ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
grep -v '\[\]$' | \
egrep -v '${GO_INSTALL_FILTEROUT}' | \
awk '{ print $1 }'
@@ -100,32 +82,37 @@ go_list_package_tests() {
go_do_configure() {
ln -snf ${S}/src ${B}/
}
+do_configure[dirs] =+ "${GOTMPDIR}"
go_do_compile() {
- export TMPDIR="${GO_TMPDIR}"
- ${GO} env
+ export TMPDIR="${GOTMPDIR}"
if [ -n "${GO_INSTALL}" ]; then
+ if [ -n "${GO_LINKSHARED}" ]; then
+ ${GO} install ${GOBUILDFLAGS} `go_list_packages`
+ rm -rf ${B}/bin
+ fi
${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
fi
}
-do_compile[dirs] =+ "${GO_TMPDIR}"
+do_compile[dirs] =+ "${GOTMPDIR}"
do_compile[cleandirs] = "${B}/bin ${B}/pkg"
-do_compile_ptest() {
- export TMPDIR="${GO_TMPDIR}"
- rm -f ${B}/.go_compiled_tests.list
+do_compile_ptest_base() {
+ export TMPDIR="${GOTMPDIR}"
+ rm -f ${B}/.go_compiled_tests.list
go_list_package_tests | while read pkg; do
cd ${B}/src/$pkg
${GO} test ${GOPTESTBUILDFLAGS} $pkg
find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
done
+ do_compile_ptest
}
-do_compile_ptest_base[dirs] =+ "${GO_TMPDIR}"
+do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
go_do_install() {
install -d ${D}${libdir}/go/src/${GO_IMPORT}
- tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' . | \
+ tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
tar -C ${B} -cf - pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
@@ -135,42 +122,54 @@ go_do_install() {
fi
}
-do_install_ptest_base() {
-set -x
- test -f "${B}/.go_compiled_tests.list" || exit 0
- tests=""
- while read test; do
- tests="$tests${tests:+ }${test%.test}"
- testdir=`dirname $test`
- install -d ${D}${PTEST_PATH}/$testdir
- install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
- if [ -d "${B}/src/$testdir/testdata" ]; then
- cp --preserve=mode,timestamps -R "${B}/src/$testdir/testdata" ${D}${PTEST_PATH}/$testdir
- fi
- done < ${B}/.go_compiled_tests.list
- if [ -n "$tests" ]; then
- install -d ${D}${PTEST_PATH}
- cat >${D}${PTEST_PATH}/run-ptest <<EOF
+go_make_ptest_wrapper() {
+ cat >${D}${PTEST_PATH}/run-ptest <<EOF
#!/bin/sh
-ANYFAILED=0
-for t in $tests; do
- testdir=\`dirname \$t.test\`
- if ( cd "${PTEST_PATH}/\$testdir"; "${PTEST_PATH}/\$t.test" ${GOPTESTFLAGS} | tee /dev/fd/9 | grep -q "^FAIL" ) 9>&1; then
- ANYFAILED=1
- fi
-done
-if [ \$ANYFAILED -ne 0 ]; then
- echo "FAIL: ${PN}"
- exit 1
-fi
-echo "PASS: ${PN}"
-exit 0
+RC=0
+run_test() (
+ cd "\$1"
+ ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
+ exit \$?)
EOF
- chmod +x ${D}${PTEST_PATH}/run-ptest
- else
- rm -rf ${D}${PTEST_PATH}
- fi
-set +x
+
+}
+
+go_stage_testdata() {
+ oldwd="$PWD"
+ cd ${S}/src
+ find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
+ if echo "$d" | grep -q '/vendor/'; then
+ continue
+ fi
+ parent=`dirname $d`
+ install -d ${D}${PTEST_PATH}/$parent
+ cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
+ done
+ cd "$oldwd"
+}
+
+do_install_ptest_base() {
+ test -f "${B}/.go_compiled_tests.list" || exit 0
+ install -d ${D}${PTEST_PATH}
+ go_stage_testdata
+ go_make_ptest_wrapper
+ havetests=""
+ while read test; do
+ testdir=`dirname $test`
+ testprog=`basename $test`
+ install -d ${D}${PTEST_PATH}/$testdir
+ install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
+ echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
+ havetests="yes"
+ done < ${B}/.go_compiled_tests.list
+ if [ -n "$havetests" ]; then
+ echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
+ chmod +x ${D}${PTEST_PATH}/run-ptest
+ else
+ rm -rf ${D}${PTEST_PATH}
+ fi
+ do_install_ptest
+ chown -R root:root ${D}${PTEST_PATH}
}
EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/goarch.bbclass b/import-layers/yocto-poky/meta/classes/goarch.bbclass
index 663c9ffc3..f54c5169e 100644
--- a/import-layers/yocto-poky/meta/classes/goarch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/goarch.bbclass
@@ -5,11 +5,13 @@ HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
@@ -22,6 +24,7 @@ GO_DYNLINK_x86 = "1"
GO_DYNLINK_x86-64 = "1"
GO_DYNLINK_powerpc64 = "1"
GO_DYNLINK_class-native = ""
+GO_DYNLINK_class-nativesdk = ""
# define here because everybody inherits this class
#
@@ -32,6 +35,8 @@ COMPATIBLE_HOST_powerpc64 = "null"
COMPATIBLE_HOST_mipsarchn32 = "null"
ARM_INSTRUCTION_SET = "arm"
TUNE_CCARGS_remove = "-march=mips32r2"
+SECURITY_CFLAGS_mips = "${SECURITY_NOPIE_CFLAGS}"
+SECURITY_NOPIE_CFLAGS ??= ""
def go_map_arch(a, d):
import re
@@ -56,7 +61,7 @@ def go_map_arch(a, d):
elif re.match('p(pc|owerpc)(64el)', a):
return 'ppc64le'
else:
- raise bb.parse.SkipPackage("Unsupported CPU architecture: %s" % a)
+ raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
def go_map_arm(a, f, d):
import re
@@ -78,6 +83,15 @@ def go_map_386(a, f, d):
return '387'
return ''
+def go_map_mips(a, f, d):
+ import re
+ if a == 'mips' or a == 'mipsel':
+ if 'fpu-hard' in f:
+ return 'hardfloat'
+ else:
+ return 'softfloat'
+ return ''
+
def go_map_os(o, d):
if o.startswith('linux'):
return 'linux'
diff --git a/import-layers/yocto-poky/meta/classes/godep.bbclass b/import-layers/yocto-poky/meta/classes/godep.bbclass
new file mode 100644
index 000000000..c82401c31
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/godep.bbclass
@@ -0,0 +1,8 @@
+DEPENDS_append = " go-dep-native"
+
+do_compile_prepend() {
+ rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.toml
+ rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.lock
+ ( cd ${WORKDIR}/build/src/${GO_IMPORT} && dep init && dep ensure )
+}
+
diff --git a/import-layers/yocto-poky/meta/classes/grub-efi-cfg.bbclass b/import-layers/yocto-poky/meta/classes/grub-efi-cfg.bbclass
new file mode 100644
index 000000000..5eeee6c2e
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/grub-efi-cfg.bbclass
@@ -0,0 +1,114 @@
+# grub-efi.bbclass
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# Released under the MIT license (see packages/COPYING)
+
+# Provide grub-efi specific functions for building bootable images.
+
+# External variables
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
+# ${LABELS} - a list of targets for the automatic config
+# ${APPEND} - an override list of append strings for each label
+# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
+# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
+# ${GRUB_ROOT} - grub's root device.
+
+GRUB_SERIAL ?= "console=ttyS0,115200"
+GRUB_CFG_VM = "${S}/grub_vm.cfg"
+GRUB_CFG_LIVE = "${S}/grub_live.cfg"
+GRUB_TIMEOUT ?= "10"
+#FIXME: build this from the machine config
+GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
+
+EFIDIR = "/EFI/BOOT"
+GRUB_ROOT ?= "${ROOT}"
+APPEND ?= ""
+
+# Need UUID utility code.
+inherit fs-uuid
+
+python build_efi_cfg() {
+ import sys
+
+ workdir = d.getVar('WORKDIR')
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
+
+ labels = d.getVar('LABELS')
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('GRUB_CFG')
+ if not cfile:
+ bb.fatal('Unable to read GRUB_CFG')
+
+ try:
+ cfgfile = open(cfile, 'w')
+ except OSError:
+ bb.fatal('Unable to open %s' % cfile)
+
+ cfgfile.write('# Automatically created by OE\n')
+
+ opts = d.getVar('GRUB_OPTS')
+ if opts:
+ for opt in opts.split(';'):
+ cfgfile.write('%s\n' % opt)
+
+ cfgfile.write('default=%s\n' % (labels.split()[0]))
+
+ timeout = d.getVar('GRUB_TIMEOUT')
+ if timeout:
+ cfgfile.write('timeout=%s\n' % timeout)
+ else:
+ cfgfile.write('timeout=50\n')
+
+ root = d.getVar('GRUB_ROOT')
+ if not root:
+ bb.fatal('GRUB_ROOT not defined')
+
+ if gfxserial == "1":
+ btypes = [ [ " graphics console", "" ],
+ [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
+ else:
+ btypes = [ [ "", "" ] ]
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ for btype in btypes:
+ cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ kernel = localdata.getVar('KERNEL_IMAGETYPE')
+ cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
+
+ cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
+
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
+
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ cfgfile.write(' %s' % (append))
+
+ cfgfile.write(' %s' % btype[1])
+ cfgfile.write('\n')
+
+ if initrd:
+ cfgfile.write('initrd /initrd')
+ cfgfile.write('\n}\n')
+
+ cfgfile.close()
+}
diff --git a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
index 610479b85..90badc03a 100644
--- a/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
+++ b/import-layers/yocto-poky/meta/classes/grub-efi.bbclass
@@ -1,36 +1,4 @@
-# grub-efi.bbclass
-# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# Released under the MIT license (see packages/COPYING)
-
-# Provide grub-efi specific functions for building bootable images.
-
-# External variables
-# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
-# ${LABELS} - a list of targets for the automatic config
-# ${APPEND} - an override list of append strings for each label
-# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
-# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
-# ${GRUB_ROOT} - grub's root device.
-
-do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
-
-GRUB_SERIAL ?= "console=ttyS0,115200"
-GRUB_CFG_VM = "${S}/grub_vm.cfg"
-GRUB_CFG_LIVE = "${S}/grub_live.cfg"
-GRUB_TIMEOUT ?= "10"
-#FIXME: build this from the machine config
-GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
-
-EFIDIR = "/EFI/BOOT"
-GRUB_ROOT ?= "${ROOT}"
-APPEND ?= ""
-
-# Need UUID utility code.
-inherit fs-uuid
+inherit grub-efi-cfg
efi_populate() {
# DEST must be the root of the image so that EFIDIR is not
@@ -58,7 +26,7 @@ efi_iso_populate() {
# Build a EFI directory to create efi.img
mkdir -p ${EFIIMGDIR}/${EFIDIR}
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/vmlinuz ${EFIIMGDIR}
+ cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" > ${EFIIMGDIR}/startup.nsh
if [ -f "$iso_dir/initrd" ] ; then
@@ -69,91 +37,3 @@ efi_iso_populate() {
efi_hddimg_populate() {
efi_populate $1
}
-
-python build_efi_cfg() {
- import sys
-
- workdir = d.getVar('WORKDIR')
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
-
- gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
-
- labels = d.getVar('LABELS')
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('GRUB_CFG')
- if not cfile:
- bb.fatal('Unable to read GRUB_CFG')
-
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- bb.fatal('Unable to open %s' % cfile)
-
- cfgfile.write('# Automatically created by OE\n')
-
- opts = d.getVar('GRUB_OPTS')
- if opts:
- for opt in opts.split(';'):
- cfgfile.write('%s\n' % opt)
-
- cfgfile.write('default=%s\n' % (labels.split()[0]))
-
- timeout = d.getVar('GRUB_TIMEOUT')
- if timeout:
- cfgfile.write('timeout=%s\n' % timeout)
- else:
- cfgfile.write('timeout=50\n')
-
- root = d.getVar('GRUB_ROOT')
- if not root:
- bb.fatal('GRUB_ROOT not defined')
-
- if gfxserial == "1":
- btypes = [ [ " graphics console", "" ],
- [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
- else:
- btypes = [ [ "", "" ] ]
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES')
- if not overrides:
- bb.fatal('OVERRIDES not defined')
-
- for btype in btypes:
- localdata.setVar('OVERRIDES', label + ':' + overrides)
-
- cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
- lb = label
- if label == "install":
- lb = "install-efi"
- cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
-
- cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
-
- append = localdata.getVar('APPEND')
- initrd = localdata.getVar('INITRD')
-
- if append:
- append = replace_rootfs_uuid(d, append)
- cfgfile.write(' %s' % (append))
-
- cfgfile.write(' %s' % btype[1])
- cfgfile.write('\n')
-
- if initrd:
- cfgfile.write('initrd /initrd')
- cfgfile.write('\n}\n')
-
- cfgfile.close()
-}
diff --git a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
index 5201c7151..b4f675490 100644
--- a/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/gtk-doc.bbclass
@@ -18,27 +18,27 @@ EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'Tru
EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
-DEPENDS_append_class-target = " gtk-doc-native qemu-native"
-
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
-DEPENDS_append_class-native = " gtk-doc-native"
-DEPENDS_append_class-nativesdk = " gtk-doc-native"
+DEPENDS_append = " gtk-doc-native"
# The documentation directory, where the infrastructure will be copied.
# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
GTKDOC_DOCDIR ?= "${S}"
-do_configure_prepend () {
- ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} || true )
-}
+export STAGING_DIR_HOST
-inherit qemu
+inherit python3native pkgconfig qemu
+DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-export STAGING_DIR_HOST
+do_configure_prepend () {
+ # Need to use ||true as this is only needed if configure.ac both exists
+ # and uses GTK_DOC_CHECK.
+ gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
+}
do_compile_prepend_class-target () {
-
+ if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
# can run target helper binaries through that.
qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
@@ -64,7 +64,5 @@ if [ \$? -ne 0 ]; then
fi
EOF
chmod +x ${B}/gtkdoc-qemuwrapper
+ fi
}
-
-
-inherit pkgconfig
diff --git a/import-layers/yocto-poky/meta/classes/icecc.bbclass b/import-layers/yocto-poky/meta/classes/icecc.bbclass
index 1cc1c4ddb..0ca8de86c 100644
--- a/import-layers/yocto-poky/meta/classes/icecc.bbclass
+++ b/import-layers/yocto-poky/meta/classes/icecc.bbclass
@@ -28,15 +28,44 @@
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
-BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC"
+BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \
+ ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \
+ ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
+ ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
+ ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
+ ICECC_ENV_DEBUG \
+ "
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
+# This version can be incremented when changes are made to the environment that
+# invalidate the version on the compile nodes. Changing it will cause a new
+# environment to be created.
+#
+# A useful thing to do for testing Icecream changes locally is to add a
+# subversion in local.conf:
+# ICECC_ENV_VERSION_append = "-my-ver-1"
+ICECC_ENV_VERSION = "2"
+
+# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
+# will locally recompile any files that have warnings, which can adversely
+# affect performance.
+#
+# See: https://github.com/icecc/icecream/issues/190
+export ICECC_CARET_WORKAROUND ??= "0"
+
+ICECC_CFLAGS = ""
+CFLAGS += "${ICECC_CFLAGS}"
+CXXFLAGS += "${ICECC_CFLAGS}"
+
+# Debug flags when generating environments
+ICECC_ENV_DEBUG ??= ""
+
def icecc_dep_prepend(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
- if not d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ if not d.getVar('INHIBIT_DEFAULT_DEPS'):
return "icecc-create-env-native"
return ""
@@ -44,21 +73,20 @@ DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
- kernel_cc = d.getVar('KERNEL_CC', False)
+ kernel_cc = d.getVar('KERNEL_CC')
# evaluate the expression by the shell if necessary
if '`' in kernel_cc or '$(' in kernel_cc:
import subprocess
kernel_cc = subprocess.check_output("echo %s" % kernel_cc, shell=True).decode("utf-8")[:-1]
- kernel_cc = d.expand(kernel_cc)
kernel_cc = kernel_cc.replace('ccache', '').strip()
kernel_cc = kernel_cc.split(' ')[0]
kernel_cc = kernel_cc.strip()
return kernel_cc
def get_icecc(d):
- return d.getVar('ICECC_PATH', False) or bb.utils.which(os.getenv("PATH"), "icecc")
+ return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc")
def create_path(compilers, bb, d):
"""
@@ -93,7 +121,7 @@ def create_path(compilers, bb, d):
return staging
def use_icecc(bb,d):
- if d.getVar('ICECC_DISABLED', False) == "1":
+ if d.getVar('ICECC_DISABLED') == "1":
# don't even try it, when explicitly disabled
return "no"
@@ -101,10 +129,13 @@ def use_icecc(bb,d):
if icecc_is_allarch(bb, d):
return "no"
+ if icecc_is_cross_canadian(bb, d):
+ return "no"
+
pn = d.getVar('PN')
system_class_blacklist = []
- user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL', False) or "none").split()
+ user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
package_class_blacklist = system_class_blacklist + user_class_blacklist
for black in package_class_blacklist:
@@ -121,8 +152,8 @@ def use_icecc(bb,d):
# e.g. when there is new version
# building libgcc-initial with icecc fails with CPP sanity check error if host sysroot contains cross gcc built for another target tune/variant
system_package_blacklist = ["libgcc-initial"]
- user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL', False) or "").split()
- user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL', False) or "").split()
+ user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
+ user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
package_blacklist = system_package_blacklist + user_package_blacklist
if pn in package_blacklist:
@@ -133,7 +164,7 @@ def use_icecc(bb,d):
bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
return "yes"
- if d.getVar('PARALLEL_MAKE', False) == "":
+ if d.getVar('PARALLEL_MAKE') == "":
bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn)
return "no"
@@ -151,16 +182,27 @@ def icecc_is_native(bb, d):
bb.data.inherits_class("cross", d) or \
bb.data.inherits_class("native", d);
+def icecc_is_cross_canadian(bb, d):
+ return bb.data.inherits_class("cross-canadian", d)
+
+def icecc_dir(bb, d):
+ return d.expand('${TMPDIR}/work-shared/ice')
+
# Don't pollute allarch signatures with TARGET_FPU
icecc_version[vardepsexclude] += "TARGET_FPU"
def icecc_version(bb, d):
if use_icecc(bb, d) == "no":
return ""
- parallel = d.getVar('ICECC_PARALLEL_MAKE', False) or ""
- if not d.getVar('PARALLEL_MAKE', False) == "" and parallel:
+ parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
+ if not d.getVar('PARALLEL_MAKE') == "" and parallel:
d.setVar("PARALLEL_MAKE", parallel)
+ # Disable showing the caret in the GCC compiler output if the workaround is
+ # disabled
+ if d.getVar('ICECC_CARET_WORKAROUND') == '0':
+ d.setVar('ICECC_CFLAGS', '-fno-diagnostics-show-caret')
+
if icecc_is_native(bb, d):
archive_name = "local-host-env"
elif d.expand('${HOST_PREFIX}') == "":
@@ -169,14 +211,18 @@ def icecc_version(bb, d):
prefix = d.expand('${HOST_PREFIX}' )
distro = d.expand('${DISTRO}')
target_sys = d.expand('${TARGET_SYS}')
- float = d.getVar('TARGET_FPU', False) or "hard"
+ float = d.getVar('TARGET_FPU') or "hard"
archive_name = prefix + distro + "-" + target_sys + "-" + float
if icecc_is_kernel(bb, d):
archive_name += "-kernel"
import socket
- ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
- tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
+ ice_dir = icecc_dir(bb, d)
+ tar_file = os.path.join(ice_dir, "{archive}-{version}-@VERSION@-{hostname}.tar.gz".format(
+ archive=archive_name,
+ version=d.getVar('ICECC_ENV_VERSION'),
+ hostname=socket.gethostname()
+ ))
return tar_file
@@ -197,25 +243,42 @@ def icecc_get_external_tool(bb, d, tool):
target_prefix = d.expand('${TARGET_PREFIX}')
return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
+def icecc_get_tool_link(tool, d):
+ import subprocess
+ return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
+
+def icecc_get_path_tool(tool, d):
+ # This is a little ugly, but we want to make sure we add an actual
+ # compiler to the toolchain, not ccache. Some distros (e.g. Fedora)
+ # have ccache enabled by default using symlinks PATH, meaning ccache
+ # would be found first when looking for the compiler.
+ paths = os.getenv("PATH").split(':')
+ while True:
+ p, hist = bb.utils.which(':'.join(paths), tool, history=True)
+ if not p or os.path.basename(icecc_get_tool_link(p, d)) != 'ccache':
+ return p
+ paths = paths[len(hist):]
+
+ return ""
+
# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
icecc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
def icecc_get_tool(bb, d, tool):
if icecc_is_native(bb, d):
- return bb.utils.which(os.getenv("PATH"), tool)
+ return icecc_get_path_tool(tool, d)
elif icecc_is_kernel(bb, d):
- return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
+ return icecc_get_path_tool(get_cross_kernel_cc(bb, d), d)
else:
ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
target_sys = d.expand('${TARGET_SYS}')
- tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
- if os.path.isfile(tool_bin):
- return tool_bin
- else:
- external_tool_bin = icecc_get_external_tool(bb, d, tool)
- if os.path.isfile(external_tool_bin):
- return external_tool_bin
- else:
- return ""
+ for p in ice_dir.split(':'):
+ tool_bin = os.path.join(p, "%s-%s" % (target_sys, tool))
+ if os.path.isfile(tool_bin):
+ return tool_bin
+ external_tool_bin = icecc_get_external_tool(bb, d, tool)
+ if os.path.isfile(external_tool_bin):
+ return external_tool_bin
+ return ""
def icecc_get_and_check_tool(bb, d, tool):
# Check that g++ or gcc is not a symbolic link to icecc binary in
@@ -223,8 +286,7 @@ def icecc_get_and_check_tool(bb, d, tool):
# compiler environment package.
t = icecc_get_tool(bb, d, tool)
if t:
- import subprocess
- link_path = subprocess.check_output("readlink -f %s" % t, shell=True).decode("utf-8")[:-1]
+ link_path = icecc_get_tool_link(tool, d)
if link_path == get_icecc(d):
bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
return ""
@@ -305,7 +367,7 @@ set_icecc_env() {
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
if flock -n "${ICECC_VERSION}.lock" \
- ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
+ ${ICECC_ENV_EXEC} ${ICECC_ENV_DEBUG} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
then
touch "${ICECC_VERSION}.done"
elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ]
@@ -316,9 +378,13 @@ set_icecc_env() {
fi
fi
+ # Don't let ccache find the icecream compiler links that have been created, otherwise
+ # it can end up invoking icecream recursively.
+ export CCACHE_PATH="$PATH"
+ export CCACHE_DISBALE="1"
+
export ICECC_VERSION ICECC_CC ICECC_CXX
export PATH="$ICE_PATH:$PATH"
- export CCACHE_PATH="$PATH"
bbnote "Using icecc"
}
@@ -338,3 +404,13 @@ do_compile_kernelmodules_prepend() {
do_install_prepend() {
set_icecc_env
}
+
+# IceCream is not (currently) supported in the extensible SDK
+ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
+ICECC_SDK_HOST_TASK_task-populate-sdk-ext = ""
+
+# Don't include IceCream in uninative tarball
+ICECC_SDK_HOST_TASK_pn-uninative-tarball = ""
+
+# Add the toolchain scripts to the SDK
+TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}"
diff --git a/import-layers/yocto-poky/meta/classes/image-combined-dbg.bbclass b/import-layers/yocto-poky/meta/classes/image-combined-dbg.bbclass
new file mode 100644
index 000000000..f4772f7ea
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/image-combined-dbg.bbclass
@@ -0,0 +1,9 @@
+IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
+
+combine_dbg_image () {
+ if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
+ # copy target files into -dbg rootfs, so it can be used for
+ # debug purposes directly
+ tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
+ fi
+}
diff --git a/import-layers/yocto-poky/meta/classes/image-live.bbclass b/import-layers/yocto-poky/meta/classes/image-live.bbclass
index 1623c1598..966277c67 100644
--- a/import-layers/yocto-poky/meta/classes/image-live.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-live.bbclass
@@ -19,7 +19,6 @@
# External variables (also used by syslinux.bbclass)
# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
# ${NOISO} - skip building the ISO image if set to 1
# ${NOHDD} - skip building the HDD image if set to 1
# ${HDDIMG_ID} - FAT image volume-id
@@ -33,7 +32,6 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${@oe.utils.ifelse(d.getVar('COMPRESSISO', False),'zisofs-tools-native:do_populate_sysroot','')} \
${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
"
@@ -65,7 +63,6 @@ HDDDIR = "${S}/hddimg"
ISODIR = "${S}/iso"
EFIIMGDIR = "${S}/efi_img"
COMPACT_ISODIR = "${S}/iso.z"
-COMPRESSISO ?= "0"
ISOLINUXDIR ?= "/isolinux"
ISO_BOOTIMG = "isolinux/isolinux.bin"
@@ -115,18 +112,8 @@ build_iso() {
install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
fi
- if [ "${COMPRESSISO}" = "1" ] ; then
- # create compact directory, compress iso
- mkdir -p ${COMPACT_ISODIR}
- mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
-
- # move compact iso to iso, then remove compact directory
- mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
- rm -Rf ${COMPACT_ISODIR}
- mkisofs_compress_opts="-R -z -D -l"
- else
- mkisofs_compress_opts="-r"
- fi
+ # We used to have support for zisofs; this is a relic of that
+ mkisofs_compress_opts="-r"
# Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
# when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
diff --git a/import-layers/yocto-poky/meta/classes/image-prelink.bbclass b/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
index f3bb68b9e..6a8afa8fa 100644
--- a/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image-prelink.bbclass
@@ -36,7 +36,7 @@ prelink_image () {
dynamic_loader=$(linuxloader)
# prelink!
- if [ "$BUILD_REPRODUCIBLE_BINARIES" = "1" ]; then
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
diff --git a/import-layers/yocto-poky/meta/classes/image.bbclass b/import-layers/yocto-poky/meta/classes/image.bbclass
index d88ce5c07..2247b305d 100644
--- a/import-layers/yocto-poky/meta/classes/image.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image.bbclass
@@ -17,7 +17,7 @@ RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
INHIBIT_DEFAULT_DEPS = "1"
-TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
+TESTIMAGECLASS = "${@oe.utils.conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
inherit ${TESTIMAGECLASS}
# IMAGE_FEATURES may contain any available package group
@@ -289,7 +289,7 @@ SSTATETASKS += "do_image_complete"
SSTATE_SKIP_CREATION_task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
-do_image_complete[stamp-extra-info] = "${MACHINE}"
+do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
addtask do_image_complete after do_image before do_build
python do_image_complete_setscene () {
sstate_setscene(d)
@@ -536,21 +536,29 @@ def get_rootfs_size(d):
output = subprocess.check_output(['du', '-ks',
d.getVar('IMAGE_ROOTFS')])
size_kb = int(output.split()[0])
+
base_size = size_kb * overhead_factor
- base_size = max(base_size, rootfs_req_size) + rootfs_extra_space
+ bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
+ base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
+ bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), overhead_factor))
+ base_size = base_size2
if base_size != int(base_size):
base_size = int(base_size + 1)
else:
base_size = int(base_size)
+ bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
+ base_size_saved = base_size
base_size += rootfs_alignment - 1
base_size -= base_size % rootfs_alignment
+ bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
# Do not check image size of the debugfs image. This is not supposed
# to be deployed, etc. so it doesn't make sense to limit the size
# of the debug.
if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
+ bb.debug(1, 'returning debugfs size %d' % (base_size))
return base_size
# Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
@@ -568,6 +576,8 @@ def get_rootfs_size(d):
(base_size, initramfs_maxsize_int))
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
+
+ bb.debug(1, 'returning %d' % (base_size))
return base_size
python set_image_size () {
@@ -617,9 +627,9 @@ deltask do_populate_sysroot
do_package[noexec] = "1"
deltask do_package_qa
do_packagedata[noexec] = "1"
-do_package_write_ipk[noexec] = "1"
-do_package_write_deb[noexec] = "1"
-do_package_write_rpm[noexec] = "1"
+deltask do_package_write_ipk
+deltask do_package_write_deb
+deltask do_package_write_rpm
# Prepare the root links to point to the /usr counterparts.
create_merged_usr_symlinks() {
@@ -654,7 +664,7 @@ ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge'
POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
reproducible_final_image_task () {
- if [ "$BUILD_REPRODUCIBLE_BINARIES" = "1" ]; then
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
REPRODUCIBLE_TIMESTAMP_ROOTFS=`git log -1 --pretty=%ct`
fi
@@ -664,3 +674,5 @@ reproducible_final_image_task () {
fi
}
IMAGE_PREPROCESS_COMMAND_append = " reproducible_final_image_task; "
+
+CVE_PRODUCT = ""
diff --git a/import-layers/yocto-poky/meta/classes/image_types.bbclass b/import-layers/yocto-poky/meta/classes/image_types.bbclass
index e881d0cc2..00a00d318 100644
--- a/import-layers/yocto-poky/meta/classes/image_types.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types.bbclass
@@ -86,7 +86,7 @@ oe_mkext234fs () {
bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
- bbdebug 1 "Actual Partion size: `ls -s ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
+ bbdebug 1 "Actual Partion size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
# Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
@@ -125,7 +125,7 @@ IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAM
# required when extracting, but it seems prudent to use it in both cases.
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
@@ -147,16 +147,6 @@ IMAGE_CMD_cpio () {
fi
}
-ELF_KERNEL ?= "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE}"
-ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
-
-IMAGE_CMD_elf () {
- test -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
- mkelfImage --kernel=${ELF_KERNEL} --initrd=${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.cpio.gz --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
-}
-
-IMAGE_TYPEDEP_elf = "cpio.gz"
-
UBI_VOLNAME ?= "${MACHINE}-rootfs"
multiubi_mkfs() {
@@ -218,10 +208,27 @@ IMAGE_CMD_ubi () {
IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+MIN_F2FS_SIZE ?= "524288"
+IMAGE_CMD_f2fs () {
+ # We need to add additional smarts here form devices smaller than 1.5G
+ # Need to scale appropriately between 40M -> 1.5G as the "overprovision
+ # ratio" goes down as the device gets bigger (70% -> 4.5%), below about
+ # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
+ # space here when under 500M
+ size=${ROOTFS_SIZE}
+ if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
+ size=${MIN_F2FS_SIZE}
+ bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
+ fi
+ dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
+ mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
+ sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
+}
+
EXTRA_IMAGECMD = ""
inherit siteinfo kernel-arch
-JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
+JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
@@ -230,7 +237,7 @@ EXTRA_IMAGECMD_ext2 ?= "-i 4096"
EXTRA_IMAGECMD_ext3 ?= "-i 4096"
EXTRA_IMAGECMD_ext4 ?= "-i 4096"
EXTRA_IMAGECMD_btrfs ?= "-n 4096"
-EXTRA_IMAGECMD_elf ?= ""
+EXTRA_IMAGECMD_f2fs ?= ""
do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
@@ -242,10 +249,10 @@ do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
-do_image_elf[depends] += "virtual/kernel:do_populate_sysroot mkelfimage-native:do_populate_sysroot"
do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
+do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = " \
@@ -261,9 +268,9 @@ IMAGE_TYPES = " \
ubi ubifs multiubi \
tar tar.gz tar.bz2 tar.xz tar.lz4 \
cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
- elf \
wic wic.gz wic.bz2 wic.lzma \
container \
+ f2fs \
"
# Compression is a special case of conversion. The old variable
@@ -274,11 +281,10 @@ COMPRESSIONTYPES ?= ""
CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 ${COMPRESSIONTYPES}"
CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "gzip -f -9 -n -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD_gz = "pigz -f -9 -n -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4 -9 -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-CONVERSION_CMD_lz4_legacy = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
diff --git a/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass b/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
index dcf620cee..5b40a9e91 100644
--- a/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
+++ b/import-layers/yocto-poky/meta/classes/image_types_wic.bbclass
@@ -102,6 +102,7 @@ python () {
# a variable and let the metadata deal with the deps.
d.setVar('_WKS_TEMPLATE', body)
bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
+ bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
}
#
diff --git a/import-layers/yocto-poky/meta/classes/insane.bbclass b/import-layers/yocto-poky/meta/classes/insane.bbclass
index 0a3b528dd..fa1546084 100644
--- a/import-layers/yocto-poky/meta/classes/insane.bbclass
+++ b/import-layers/yocto-poky/meta/classes/insane.bbclass
@@ -68,6 +68,11 @@ def package_qa_get_machine_dict(d):
"epiphany": (4643, 0, 0, True, 32),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
+ "microblaze": (189, 0, 0, False, 32),
+ "microblazeeb":(189, 0, 0, False, 32),
+ "microblazeel":(189, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
},
"linux" : {
"aarch64" : (183, 0, 0, True, 64),
@@ -94,6 +99,8 @@ def package_qa_get_machine_dict(d):
"mipsisa64r6": ( 8, 0, 0, False, 64),
"mipsisa64r6el": ( 8, 0, 0, True, 64),
"nios2": (113, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
"s390": (22, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
"sparc": ( 2, 0, 0, False, 32),
@@ -119,6 +126,8 @@ def package_qa_get_machine_dict(d):
"microblaze": (189, 0, 0, False, 32),
"microblazeeb":(189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
+ "riscv32": (243, 0, 0, True, 32),
+ "riscv64": (243, 0, 0, True, 64),
"sh4": ( 42, 0, 0, True, 32),
},
"uclinux-uclibc" : {
@@ -142,6 +151,9 @@ def package_qa_get_machine_dict(d):
"powerpc": (20, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
},
+ "linux-gnu_ilp32" : {
+ "aarch64" : (183, 0, 0, True, 32),
+ },
"linux-gnux32" : {
"x86_64": (62, 0, 0, True, 32),
},
@@ -422,7 +434,7 @@ def package_qa_check_arch(path,name,d, elf, messages):
# Check the architecture and endiannes of the binary
is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
- (target_os == "linux-gnux32" or target_os == "linux-muslx32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
+ (target_os == "linux-gnux32" or target_os == "linux-muslx32" or target_os == "linux-gnu_ilp32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
if not ((machine == elf.machine()) or is_32):
package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
(oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
diff --git a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
index c7b33d99f..09793fc9c 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-arch.bbclass
@@ -14,7 +14,7 @@ valid_archs = "alpha cris ia64 \
parisc s390 v850 \
avr32 blackfin \
microblaze \
- nios2 arc xtensa"
+ nios2 arc riscv xtensa"
def map_kernel_arch(a, d):
import re
@@ -28,6 +28,7 @@ def map_kernel_arch(a, d):
elif re.match('aarch64_ilp32$', a): return 'arm64'
elif re.match('aarch64_be_ilp32$', a): return 'arm64'
elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
+ elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
elif re.match('sh(3|4)$', a): return 'sh'
elif re.match('bfin', a): return 'blackfin'
@@ -58,7 +59,7 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
TOOLCHAIN = "gcc"
diff --git a/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass b/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass
index 6e08be4b7..4f80cc62e 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-devicetree.bbclass
@@ -1,10 +1,10 @@
# Support for device tree generation
PACKAGES_append = " \
- kernel-devicetree \
- ${@['kernel-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
+ ${KERNEL_PACKAGE_NAME}-devicetree \
+ ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
"
-FILES_kernel-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
-FILES_kernel-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
+FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
diff --git a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
index 9baf399f2..50a91e199 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-fitimage.bbclass
@@ -14,6 +14,8 @@ python __anonymous () {
replacementtype = "vmlinuz.bin"
elif uarch == "x86":
replacementtype = "bzImage"
+ elif uarch == "microblaze":
+ replacementtype = "linux.bin"
else:
replacementtype = "zImage"
@@ -100,7 +102,7 @@ fitimage_emit_section_kernel() {
kernel_csum="sha1"
- ENTRYPOINT=${UBOOT_ENTRYPOINT}
+ ENTRYPOINT="${UBOOT_ENTRYPOINT}"
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
diff --git a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
index 1035525da..67ab4161d 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-module-split.bbclass
@@ -30,7 +30,7 @@ do_install_append() {
PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
-KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
+KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
KERNEL_MODULE_PACKAGE_PREFIX ?= ""
KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
@@ -129,16 +129,19 @@ python split_kernel_module_packages () {
postfix = format.split('%s')[1]
d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
+ kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
+ kernel_version = d.getVar("KERNEL_VERSION")
+
module_regex = '^(.*)\.k?o$'
module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
- module_pattern = module_pattern_prefix + 'kernel-module-%s' + module_pattern_suffix
+ module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
postinst = d.getVar('pkg_postinst_modules')
postrm = d.getVar('pkg_postrm_modules')
- modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION")))
+ modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
if modules:
metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
diff --git a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
index 663c6557d..4ac3a39e4 100644
--- a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -247,6 +247,7 @@ do_kernel_checkout() {
fi
rm -f .gitignore
git init
+ check_git_config
git add .
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
diff --git a/import-layers/yocto-poky/meta/classes/kernel.bbclass b/import-layers/yocto-poky/meta/classes/kernel.bbclass
index 14f41e9b1..78d6c30b0 100644
--- a/import-layers/yocto-poky/meta/classes/kernel.bbclass
+++ b/import-layers/yocto-poky/meta/classes/kernel.bbclass
@@ -1,7 +1,10 @@
inherit linux-kernel-base kernel-module-split
-PROVIDES += "virtual/kernel"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native"
+KERNEL_PACKAGE_NAME ??= "kernel"
+KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
+
+PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native"
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
@@ -34,11 +37,32 @@ KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION')
KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
python __anonymous () {
+ pn = d.getVar("PN")
+ kpn = d.getVar("KERNEL_PACKAGE_NAME")
+
+ # XXX Remove this after bug 11905 is resolved
+ # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
+ if kpn == pn:
+ bb.warn("Some packages (E.g. *-dev) might be missing due to "
+ "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
+
+ # The default kernel recipe builds in a shared location defined by
+ # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
+ # Set these variables to directories under ${WORKDIR} in alternate
+ # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
+ # may build in parallel with the default kernel without clobbering.
+ if kpn != "kernel":
+ workdir = d.getVar("WORKDIR")
+ sourceDir = os.path.join(workdir, 'kernel-source')
+ artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
+ d.setVar("STAGING_KERNEL_DIR", sourceDir)
+ d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
# Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
type = d.getVar('KERNEL_IMAGETYPE') or ""
alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
types = d.getVar('KERNEL_IMAGETYPES') or ""
+ kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
if type not in types.split():
types = (type + ' ' + types).strip()
if alttype not in types.split():
@@ -55,15 +79,15 @@ python __anonymous () {
typelower = type.lower()
imagedest = d.getVar('KERNEL_IMAGEDEST')
- d.appendVar('PACKAGES', ' ' + 'kernel-image-' + typelower)
+ d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
- d.setVar('FILES_kernel-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
+ d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
- d.appendVar('RDEPENDS_kernel-image', ' ' + 'kernel-image-' + typelower)
+ d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
- d.setVar('PKG_kernel-image-' + typelower, 'kernel-image-' + typelower + '-${KERNEL_VERSION_PKG_NAME}')
+ d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
- d.setVar('ALLOW_EMPTY_kernel-image-' + typelower, '1')
+ d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
image = d.getVar('INITRAMFS_IMAGE')
if image:
@@ -121,12 +145,13 @@ base_do_unpack_append () {
inherit kernel-arch deploy
-PACKAGES_DYNAMIC += "^kernel-module-.*"
-PACKAGES_DYNAMIC += "^kernel-image-.*"
-PACKAGES_DYNAMIC += "^kernel-firmware-.*"
+PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
+PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
+PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
+export KBUILD_BUILD_VERSION = "1"
export KBUILD_BUILD_USER = "oe-user"
export KBUILD_BUILD_HOST = "oe-host"
@@ -207,7 +232,7 @@ do_bundle_initramfs () {
copy_initramfs
# Backing up kernel image relies on its type(regular file or symbolic link)
tmp_path=""
- for type in ${KERNEL_IMAGETYPES} ; do
+ for type in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
if [ -h ${KERNEL_OUTPUT_DIR}/$type ] ; then
linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$type`
realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$type`
@@ -255,7 +280,7 @@ get_cc_option () {
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "$BUILD_REPRODUCIBLE_BINARIES" = "1" ]; then
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "$SOURCE_DATE_EPOCH" = "0" ]; then
@@ -339,7 +364,9 @@ kernel_do_install() {
install -d ${D}/boot
for type in ${KERNEL_IMAGETYPES} ; do
install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} ${D}/${KERNEL_IMAGEDEST}/${type}-${KERNEL_VERSION}
- ln -sf ${type}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${type}
+ if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
+ ln -sf ${type}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${type}
+ fi
done
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
@@ -393,13 +420,14 @@ do_shared_workdir_setscene () {
emit_depmod_pkgdata() {
# Stash data for depmod
- install -d ${PKGDESTWORK}/kernel-depmod/
- echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/kernel-depmod/kernel-abiversion
- cp ${B}/System.map ${PKGDESTWORK}/kernel-depmod/System.map-${KERNEL_VERSION}
+ install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
+ echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
+ cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
}
PACKAGEFUNCS += "emit_depmod_pkgdata"
+do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
do_shared_workdir () {
cd ${B}
@@ -410,7 +438,7 @@ do_shared_workdir () {
# Store the kernel version in sysroots for module-base.bbclass
#
- echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
+ echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
@@ -439,8 +467,10 @@ do_shared_workdir () {
# arch/powerpc/lib/crtsavres.o which is present in
# KBUILD_LDFLAGS_MODULE, making it required to build external modules.
if [ ${ARCH} = "powerpc" ]; then
- mkdir -p $kerneldir/arch/powerpc/lib/
- cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ if [ -e arch/powerpc/lib/crtsavres.o ]; then
+ mkdir -p $kerneldir/arch/powerpc/lib/
+ cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ fi
fi
if [ -d include/generated ]; then
@@ -459,7 +489,7 @@ sysroot_stage_all () {
:
}
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig || yes '' | oe_runmake -C ${S} O=${B} oldconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} oldnoconfig"
python check_oldest_kernel() {
oldest_kernel = d.getVar('OLDEST_KERNEL')
@@ -504,32 +534,34 @@ addtask savedefconfig after do_configure
inherit cml1
+KCONFIG_CONFIG_COMMAND_append = " HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+
EXPORT_FUNCTIONS do_compile do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
-PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
+PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
FILES_${PN} = ""
-FILES_kernel-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin"
-FILES_kernel-image = ""
-FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
-FILES_kernel-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
-FILES_kernel-modules = ""
-RDEPENDS_kernel = "kernel-base"
+FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin"
+FILES_${KERNEL_PACKAGE_NAME}-image = ""
+FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
+FILES_${KERNEL_PACKAGE_NAME}-modules = ""
+RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
-RDEPENDS_kernel-base ?= "kernel-image"
-PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
-RDEPENDS_kernel-image += "${@base_conditional('KERNEL_IMAGETYPE', 'vmlinux', 'kernel-vmlinux', '', d)}"
-PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
-RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}"
-ALLOW_EMPTY_kernel = "1"
-ALLOW_EMPTY_kernel-base = "1"
-ALLOW_EMPTY_kernel-image = "1"
-ALLOW_EMPTY_kernel-modules = "1"
-DESCRIPTION_kernel-modules = "Kernel modules meta package"
-
-pkg_postinst_kernel-base () {
+RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
+PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
+RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
+PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name('${KERNEL_VERSION}')}"
+RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
+ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
+DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
+
+pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
@@ -543,7 +575,7 @@ pkg_postinst_kernel-base () {
PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
python split_kernel_packages () {
- do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
}
# Many scripts want to look in arch/$arch/boot for the bootable
@@ -634,21 +666,27 @@ MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
MODULE_TARBALL_DEPLOY ?= "1"
kernel_do_deploy() {
+ deployDir="${DEPLOYDIR}"
+ if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
+ deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
+ mkdir "$deployDir"
+ fi
+
for type in ${KERNEL_IMAGETYPES} ; do
base_name=${type}-${KERNEL_IMAGE_BASE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} ${DEPLOYDIR}/${base_name}.bin
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} $deployDir/${base_name}.bin
done
if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
mkdir -p ${D}/lib
- tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
- ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME}
+ tar -cvzf $deployDir/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
+ ln -sf ${MODULE_TARBALL_BASE_NAME} $deployDir/${MODULE_TARBALL_SYMLINK_NAME}
fi
for type in ${KERNEL_IMAGETYPES} ; do
base_name=${type}-${KERNEL_IMAGE_BASE_NAME}
symlink_name=${type}-${KERNEL_IMAGE_SYMLINK_NAME}
- ln -sf ${base_name}.bin ${DEPLOYDIR}/${symlink_name}.bin
- ln -sf ${base_name}.bin ${DEPLOYDIR}/${type}
+ ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
+ ln -sf ${base_name}.bin $deployDir/${type}
done
cd ${B}
@@ -658,8 +696,8 @@ kernel_do_deploy() {
echo "Copying deploy ${type} kernel-initramfs image and setting up links..."
initramfs_base_name=${type}-${INITRAMFS_BASE_NAME}
initramfs_symlink_name=${type}-initramfs-${MACHINE}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${type}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin
- ln -sf ${initramfs_base_name}.bin ${DEPLOYDIR}/${initramfs_symlink_name}.bin
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${type}.initramfs $deployDir/${initramfs_base_name}.bin
+ ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
fi
done
}
diff --git a/import-layers/yocto-poky/meta/classes/libc-package.bbclass b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
index 739adce69..2e7cd2529 100644
--- a/import-layers/yocto-poky/meta/classes/libc-package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/libc-package.bbclass
@@ -113,9 +113,9 @@ python package_do_split_gconvs () {
bb.error("datadir not defined")
return
- gconv_libdir = base_path_join(libdir, "gconv")
- charmap_dir = base_path_join(datadir, "i18n", "charmaps")
- locales_dir = base_path_join(datadir, "i18n", "locales")
+ gconv_libdir = oe.path.join(libdir, "gconv")
+ charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
+ locales_dir = oe.path.join(datadir, "i18n", "locales")
binary_locales_dir = d.getVar('localedir')
def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
@@ -189,7 +189,7 @@ python package_do_split_gconvs () {
# Read in supported locales and associated encodings
supported = {}
- with open(base_path_join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
+ with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
for line in f.readlines():
try:
locale, charset = line.rstrip().split()
@@ -231,12 +231,12 @@ python package_do_split_gconvs () {
commands = {}
def output_locale_binary(name, pkgname, locale, encoding):
- treedir = base_path_join(d.getVar("WORKDIR"), "locale-tree")
- ldlibdir = base_path_join(treedir, d.getVar("base_libdir"))
+ treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
+ ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
path = d.getVar("PATH")
- i18npath = base_path_join(treedir, datadir, "i18n")
- gconvpath = base_path_join(treedir, "iconvdata")
- outputpath = base_path_join(treedir, binary_locales_dir)
+ i18npath = oe.path.join(treedir, datadir, "i18n")
+ gconvpath = oe.path.join(treedir, "iconvdata")
+ outputpath = oe.path.join(treedir, binary_locales_dir)
use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
if use_cross_localedef == "1":
@@ -344,7 +344,7 @@ python package_do_split_gconvs () {
d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
if use_bin == "compile":
- makefile = base_path_join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
+ makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
m = open(makefile, "w")
m.write("all: %s\n\n" % " ".join(commands.keys()))
for cmd in commands:
diff --git a/import-layers/yocto-poky/meta/classes/license.bbclass b/import-layers/yocto-poky/meta/classes/license.bbclass
index d35311046..06dd4a8c1 100644
--- a/import-layers/yocto-poky/meta/classes/license.bbclass
+++ b/import-layers/yocto-poky/meta/classes/license.bbclass
@@ -226,9 +226,7 @@ def get_deployed_dependencies(d):
# The manifest file name contains the arch. Because we are not running
# in the recipe context it is necessary to check every arch used.
sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
- sstate_archs = d.getVar("SSTATE_ARCHS")
- extra_archs = d.getVar("PACKAGE_EXTRA_ARCHS")
- archs = list(set(("%s %s" % (sstate_archs, extra_archs)).split()))
+ archs = list(set(d.getVar("SSTATE_ARCHS").split()))
for dep in depends:
# Some recipes have an arch on their own, so we try that first.
special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep)
@@ -336,7 +334,7 @@ def add_package_and_files(d):
files = d.getVar('LICENSE_FILES_DIRECTORY')
pn = d.getVar('PN')
pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
- if pn_lic in packages:
+ if pn_lic in packages.split():
bb.warn("%s package already existed in %s." % (pn_lic, pn))
else:
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
diff --git a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
index e1d8b1843..68105d9b8 100644
--- a/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
+++ b/import-layers/yocto-poky/meta/classes/live-vm-common.bbclass
@@ -32,19 +32,16 @@ PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
-KERNEL_IMAGETYPE ??= "bzImage"
-VM_DEFAULT_KERNEL ??= "${KERNEL_IMAGETYPE}"
-
populate_kernel() {
dest=$1
install -d $dest
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} as $dest/vmlinuz"
- if [ -e ${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} ]; then
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} $dest/vmlinuz
+ bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
+ if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
else
- bbwarn "${DEPLOY_DIR_IMAGE}/${VM_DEFAULT_KERNEL} doesn't exist"
+ bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
fi
# initrd is made of concatenation of multiple filesystem images
diff --git a/import-layers/yocto-poky/meta/classes/logging.bbclass b/import-layers/yocto-poky/meta/classes/logging.bbclass
index 06c7c31c3..a0c94e98c 100644
--- a/import-layers/yocto-poky/meta/classes/logging.bbclass
+++ b/import-layers/yocto-poky/meta/classes/logging.bbclass
@@ -86,7 +86,7 @@ bbdebug() {
# Strip off the debug level and ensure it is an integer
DBGLVL=$1; shift
- NONDIGITS=$(echo "$DBGLVL" | tr -d [:digit:])
+ NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
if [ "$NONDIGITS" ]; then
bbfatal "$USAGE"
fi
diff --git a/import-layers/yocto-poky/meta/classes/meson.bbclass b/import-layers/yocto-poky/meta/classes/meson.bbclass
new file mode 100644
index 000000000..2d7ee4fff
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/meson.bbclass
@@ -0,0 +1,130 @@
+inherit python3native
+
+DEPENDS_append = " meson-native ninja-native"
+
+# As Meson enforces out-of-tree builds we can just use cleandirs
+B = "${WORKDIR}/build"
+do_configure[cleandirs] = "${B}"
+
+# Where the meson.build build configuration is
+MESON_SOURCEPATH = "${S}"
+
+def noprefix(var, d):
+ return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
+
+MESONOPTS = " --prefix ${prefix} \
+ --buildtype plain \
+ --bindir ${@noprefix('bindir', d)} \
+ --sbindir ${@noprefix('sbindir', d)} \
+ --datadir ${@noprefix('datadir', d)} \
+ --libdir ${@noprefix('libdir', d)} \
+ --libexecdir ${@noprefix('libexecdir', d)} \
+ --includedir ${@noprefix('includedir', d)} \
+ --mandir ${@noprefix('mandir', d)} \
+ --infodir ${@noprefix('infodir', d)} \
+ --sysconfdir ${sysconfdir} \
+ --localstatedir ${localstatedir} \
+ --sharedstatedir ${sharedstatedir}"
+
+MESON_TOOLCHAIN_ARGS = "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
+MESON_C_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CFLAGS}"
+MESON_CPP_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CXXFLAGS}"
+MESON_LINK_ARGS = "${MESON_TOOLCHAIN_ARGS} ${LDFLAGS}"
+
+# both are required but not used by meson
+MESON_HOST_ENDIAN = "bogus-endian"
+MESON_TARGET_ENDIAN = "bogus-endian"
+
+EXTRA_OEMESON += "${PACKAGECONFIG_CONFARGS}"
+
+MESON_CROSS_FILE = ""
+MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
+
+def meson_array(var, d):
+ return "', '".join(d.getVar(var).split()).join(("'", "'"))
+
+addtask write_config before do_configure
+do_write_config[vardeps] += "MESON_C_ARGS MESON_CPP_ARGS MESON_LINK_ARGS CC CXX LD AR NM STRIP READELF"
+do_write_config() {
+ # This needs to be Py to split the args into single-element lists
+ cat >${WORKDIR}/meson.cross <<EOF
+[binaries]
+c = [${@meson_array('CC', d)}]
+cpp = [${@meson_array('CXX', d)}]
+ar = [${@meson_array('AR', d)}]
+nm = [${@meson_array('NM', d)}]
+ld = [${@meson_array('LD', d)}]
+strip = [${@meson_array('STRIP', d)}]
+readelf = [${@meson_array('READELF', d)}]
+pkgconfig = 'pkg-config'
+
+[properties]
+needs_exe_wrapper = true
+c_args = [${@meson_array('MESON_C_ARGS', d)}]
+c_link_args = [${@meson_array('MESON_LINK_ARGS', d)}]
+cpp_args = [${@meson_array('MESON_CPP_ARGS', d)}]
+cpp_link_args = [${@meson_array('MESON_LINK_ARGS', d)}]
+gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
+
+[host_machine]
+system = '${HOST_OS}'
+cpu_family = '${HOST_ARCH}'
+cpu = '${HOST_ARCH}'
+endian = '${MESON_HOST_ENDIAN}'
+
+[target_machine]
+system = '${TARGET_OS}'
+cpu_family = '${TARGET_ARCH}'
+cpu = '${TARGET_ARCH}'
+endian = '${MESON_TARGET_ENDIAN}'
+EOF
+}
+
+CONFIGURE_FILES = "meson.build"
+
+meson_do_configure() {
+ if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
+ cat ${B}/meson-logs/meson-log.txt
+ bbfatal_log meson failed
+ fi
+}
+
+meson_do_configure_prepend_class-target() {
+ # Set these so that meson uses the native tools for its build sanity tests,
+ # which require executables to be runnable. The cross file will still
+ # override these for the target build. Note that we do *not* set CFLAGS,
+ # LDFLAGS, etc. as they will be slurped in by meson and applied to the
+ # target build, causing errors.
+ export CC="${BUILD_CC}"
+ export CXX="${BUILD_CXX}"
+ export LD="${BUILD_LD}"
+ export AR="${BUILD_AR}"
+}
+
+meson_do_configure_prepend_class-nativesdk() {
+ # Set these so that meson uses the native tools for its build sanity tests,
+ # which require executables to be runnable. The cross file will still
+ # override these for the nativesdk build. Note that we do *not* set CFLAGS,
+ # LDFLAGS, etc. as they will be slurped in by meson and applied to the
+ # nativesdk build, causing errors.
+ export CC="${BUILD_CC}"
+ export CXX="${BUILD_CXX}"
+ export LD="${BUILD_LD}"
+ export AR="${BUILD_AR}"
+}
+
+meson_do_configure_prepend_class-native() {
+ export PKG_CONFIG="pkg-config-native"
+}
+
+do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
+meson_do_compile() {
+ ninja ${PARALLEL_MAKE}
+}
+
+meson_do_install() {
+ DESTDIR='${D}' ninja ${PARALLEL_MAKEINST} install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/mirrors.bbclass b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
index 766f1cb6f..b331afc5d 100644
--- a/import-layers/yocto-poky/meta/classes/mirrors.bbclass
+++ b/import-layers/yocto-poky/meta/classes/mirrors.bbclass
@@ -67,7 +67,7 @@ ${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
# where git native protocol fetches may fail due to local firewall rules, etc.
MIRRORS += "\
-git://anonscm.debian.org/.* git://anonscm.debian.org/git/PATH;protocol=https \n \
+git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
git://git.gnome.org/.* git://git.gnome.org/browse/PATH;protocol=https \n \
git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
diff --git a/import-layers/yocto-poky/meta/classes/module-base.bbclass b/import-layers/yocto-poky/meta/classes/module-base.bbclass
index 6fe77c01b..27bd69ff3 100644
--- a/import-layers/yocto-poky/meta/classes/module-base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/module-base.bbclass
@@ -1,7 +1,8 @@
inherit kernel-arch
-# This is instead of DEPENDS = "virtual/kernel"
-do_configure[depends] += "virtual/kernel:do_compile_kernelmodules"
+# We do the dependency this way because the output is not preserved
+# in sstate, so we must force do_compile to run (once).
+do_configure[depends] += "make-mod-scripts:do_compile"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
@@ -12,16 +13,9 @@ export CROSS_COMPILE = "${TARGET_PREFIX}"
# we didn't pick the name.
export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
-export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
+export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
# kernel modules are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"
-# Function to ensure the kernel scripts are created. Expected to
-# be called before do_compile. See module.bbclass for an example.
-do_make_scripts() {
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
- -C ${STAGING_KERNEL_DIR} O=${STAGING_KERNEL_BUILDDIR} scripts
-}
diff --git a/import-layers/yocto-poky/meta/classes/module.bbclass b/import-layers/yocto-poky/meta/classes/module.bbclass
index 78d1b21db..e34496024 100644
--- a/import-layers/yocto-poky/meta/classes/module.bbclass
+++ b/import-layers/yocto-poky/meta/classes/module.bbclass
@@ -1,9 +1,5 @@
inherit module-base kernel-module-split pkgconfig
-addtask make_scripts after do_prepare_recipe_sysroot before do_configure
-do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
-do_make_scripts[depends] += "virtual/kernel:do_shared_workdir"
-
EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
MODULES_INSTALL_TARGET ?= "modules_install"
diff --git a/import-layers/yocto-poky/meta/classes/multilib.bbclass b/import-layers/yocto-poky/meta/classes/multilib.bbclass
index 816f54e7f..519c1a55b 100644
--- a/import-layers/yocto-poky/meta/classes/multilib.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib.bbclass
@@ -11,8 +11,8 @@ python multilib_virtclass_handler () {
# There should only be one kernel in multilib configs
# We also skip multilib setup for module packages.
provides = (e.data.getVar("PROVIDES") or "").split()
- if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
- raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
+ if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data) or "make-mod-scripts" in e.data.getVar("PN"):
+ raise bb.parse.SkipRecipe("We shouldn't have multilib variants for the kernel")
save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME") or ""
for name in save_var_name.split():
@@ -41,13 +41,13 @@ python multilib_virtclass_handler () {
return
if bb.data.inherits_class('native', e.data):
- raise bb.parse.SkipPackage("We can't extend native recipes")
+ raise bb.parse.SkipRecipe("We can't extend native recipes")
if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
- raise bb.parse.SkipPackage("We can't extend nativesdk recipes")
+ raise bb.parse.SkipRecipe("We can't extend nativesdk recipes")
if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
- raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups")
+ raise bb.parse.SkipRecipe("Don't extend allarch recipes which are not packagegroups")
# Expand this since this won't work correctly once we set a multilib into place
@@ -76,7 +76,6 @@ python multilib_virtclass_handler () {
newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
- e.data.setVar('DEFAULTTUNE_ML_%s' % variant, newtune)
}
addhandler multilib_virtclass_handler
@@ -100,8 +99,8 @@ python __anonymous () {
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
-
- if bb.data.inherits_class('image', d):
+ bb.build.deltask('do_populate_sdk', d)
+ bb.build.deltask('do_populate_sdk_ext', d)
return
clsextend.map_depends_variable("DEPENDS")
@@ -115,7 +114,6 @@ python __anonymous () {
clsextend.map_packagevars()
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
- clsextend.map_variable("PACKAGE_INSTALL")
clsextend.map_variable("INITSCRIPT_PACKAGES")
clsextend.map_variable("USERADD_PACKAGES")
clsextend.map_variable("SYSTEMD_PACKAGES")
diff --git a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
index fd0bfe127..d2ec1adfe 100644
--- a/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
+++ b/import-layers/yocto-poky/meta/classes/multilib_global.bbclass
@@ -13,11 +13,14 @@ def preferred_ml_updates(d):
versions = []
providers = []
+ rproviders = []
for v in d.keys():
if v.startswith("PREFERRED_VERSION_"):
versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
+ if v.startswith("PREFERRED_RPROVIDER_"):
+ rproviders.append(v)
for v in versions:
val = d.getVar(v, False)
@@ -91,6 +94,29 @@ def preferred_ml_updates(d):
if prov != provexp and d.getVar(prov, False):
d.renameVar(prov, provexp)
+ for prov in rproviders:
+ val = d.getVar(prov, False)
+ pkg = prov.replace("PREFERRED_RPROVIDER_", "")
+ for p in prefixes:
+ newval = p + "-" + val
+
+ # implement variable keys
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ newname = localdata.expand(prov)
+ if newname != prov and not d.getVar(newname, False):
+ d.setVar(newname, localdata.expand(newval))
+
+ # implement alternative multilib name
+ newname = localdata.expand("PREFERRED_RPROVIDER_" + p + "-" + pkg)
+ if not d.getVar(newname, False) and newval != None:
+ d.setVar(newname, localdata.expand(newval))
+ # Avoid future variable key expansion
+ provexp = d.expand(prov)
+ if prov != provexp and d.getVar(prov, False):
+ d.renameVar(prov, provexp)
+
def translate_provide(prefix, prov):
if not prov.startswith("virtual/"):
return prefix + "-" + prov
@@ -162,7 +188,7 @@ python multilib_virtclass_handler_global () {
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
- # Process RPROVIDES_${PN}...
+ # Process RPROVIDES_${PN}...
for pkg in (e.data.getVar("PACKAGES") or "").split():
origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg) or ""
for clsextend in clsextends:
diff --git a/import-layers/yocto-poky/meta/classes/native.bbclass b/import-layers/yocto-poky/meta/classes/native.bbclass
index 9c434dce8..a911f2aeb 100644
--- a/import-layers/yocto-poky/meta/classes/native.bbclass
+++ b/import-layers/yocto-poky/meta/classes/native.bbclass
@@ -52,8 +52,6 @@ STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
# native pkg doesn't need the TOOLCHAIN_OPTIONS.
TOOLCHAIN_OPTIONS = ""
-DEPENDS_GETTEXT = "gettext-native"
-
# Don't build ptest natively
PTEST_ENABLED = "0"
diff --git a/import-layers/yocto-poky/meta/classes/npm.bbclass b/import-layers/yocto-poky/meta/classes/npm.bbclass
index a69bedbb2..c351ff086 100644
--- a/import-layers/yocto-poky/meta/classes/npm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/npm.bbclass
@@ -2,7 +2,15 @@ DEPENDS_prepend = "nodejs-native "
RDEPENDS_${PN}_prepend = "nodejs "
S = "${WORKDIR}/npmpkg"
-NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
+def node_pkgname(d):
+ bpn = d.getVar('BPN')
+ if bpn.startswith("node-"):
+ return bpn[5:]
+ return bpn
+
+NPMPN ?= "${@node_pkgname(d)}"
+
+NPM_INSTALLDIR = "${D}${libdir}/node_modules/${NPMPN}"
# function maps arch names to npm arch names
def npm_oe_arch_map(target_arch, d):
@@ -31,7 +39,7 @@ npm_do_compile() {
fi
npm set cache ${WORKDIR}/npm_cache
# clear cache before every build
- npm cache clear
+ npm cache clear --force
# Install pkg into ${S} without going to the registry
if [ "${NPM_INSTALL_DEV}" = "1" ]; then
npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --no-registry install
@@ -45,7 +53,8 @@ npm_do_install() {
# be created in this directory
export HOME=${WORKDIR}
mkdir -p ${NPM_INSTALLDIR}/
- npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry
+ npm pack .
+ npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry ${NPMPN}-${PV}.tgz
if [ -d ${D}${prefix}/etc ] ; then
# This will be empty
rmdir ${D}${prefix}/etc
@@ -53,13 +62,13 @@ npm_do_install() {
}
python populate_packages_prepend () {
- instdir = d.expand('${D}${libdir}/node_modules/${PN}')
+ instdir = d.expand('${D}${libdir}/node_modules/${NPMPN}')
extrapackages = oe.package.npm_split_package_dirs(instdir)
pkgnames = extrapackages.keys()
d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
for pkgname in pkgnames:
pkgrelpath, pdata = extrapackages[pkgname]
- pkgpath = '${libdir}/node_modules/${PN}/' + pkgrelpath
+ pkgpath = '${libdir}/node_modules/${NPMPN}/' + pkgrelpath
# package names can't have underscores but npm packages sometimes use them
oe_pkg_name = pkgname.replace('_', '-')
expanded_pkgname = d.expand(oe_pkg_name)
@@ -75,7 +84,7 @@ python populate_packages_prepend () {
}
FILES_${PN} += " \
- ${libdir}/node_modules/${PN} \
+ ${libdir}/node_modules/${NPMPN} \
"
EXPORT_FUNCTIONS do_compile do_install
diff --git a/import-layers/yocto-poky/meta/classes/package.bbclass b/import-layers/yocto-poky/meta/classes/package.bbclass
index 2053d4639..edeffa978 100644
--- a/import-layers/yocto-poky/meta/classes/package.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package.bbclass
@@ -26,7 +26,7 @@
# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
#
# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
-# depenedencies found. Also stores the package name so anyone else using this library
+# dependencies found. Also stores the package name so anyone else using this library
# knows which package to depend on.
#
# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
@@ -52,7 +52,8 @@ LOCALE_SECTION ?= ''
ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
# rpm is used for the per-file dependency identification
-PACKAGE_DEPENDS += "rpm-native"
+# dwarfsrcfiles is used to determine the list of debug source files
+PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
# If your postinstall can execute at rootfs creation time rather than on
@@ -334,6 +335,33 @@ def checkbuildpath(file, d):
return False
+def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
+ debugfiles = {}
+
+ for line in dwarfsrcfiles_output.splitlines():
+ if line.startswith("\t"):
+ debugfiles[os.path.normpath(line.split()[0])] = ""
+
+ return debugfiles.keys()
+
+def append_source_info(file, sourcefile, d, fatal=True):
+ cmd = "'dwarfsrcfiles' '%s'" % (file)
+ (retval, output) = oe.utils.getstatusoutput(cmd)
+ # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
+ if retval != 0 and retval != 255:
+ msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
+ if fatal:
+ bb.fatal(msg)
+ bb.note(msg)
+
+ debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(debugsources) + '\0'
+ open(sourcefile, 'a').write(debuglistoutput)
+
+
def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
@@ -345,7 +373,6 @@ def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
dvar = d.getVar('PKGD')
objcopy = d.getVar("OBJCOPY")
- debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/debugedit")
# We ignore kernel modules, we don't generate debug info files.
if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
@@ -359,10 +386,7 @@ def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
# We need to extract the debug src information here...
if debugsrcdir:
- cmd = "'%s' -i -l '%s' '%s'" % (debugedit, sourcefile, file)
- (retval, output) = oe.utils.getstatusoutput(cmd)
- if retval:
- bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+ append_source_info(file, sourcefile, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -383,7 +407,7 @@ def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
return 0
def copydebugsources(debugsrcdir, d):
- # The debug src information written out to sourcefile is further procecessed
+ # The debug src information written out to sourcefile is further processed
# and copied to the destination here.
import stat
@@ -393,7 +417,6 @@ def copydebugsources(debugsrcdir, d):
dvar = d.getVar('PKGD')
strip = d.getVar("STRIP")
objcopy = d.getVar("OBJCOPY")
- debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
workdir = d.getVar("WORKDIR")
workparentdir = os.path.dirname(os.path.dirname(workdir))
workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
@@ -633,7 +656,7 @@ python fixup_perms () {
# __str__ can be used to print out an entry in the input format
#
# if fs_perms_entry.path is None:
- # an error occured
+ # an error occurred
# if fs_perms_entry.link, you can retrieve:
# fs_perms_entry.path = path
# fs_perms_entry.link = target of link
@@ -860,6 +883,7 @@ python split_and_strip_files () {
dvar = d.getVar('PKGD')
pn = d.getVar('PN')
+ targetos = d.getVar('TARGET_OS')
oldcwd = os.getcwd()
os.chdir(dvar)
@@ -901,7 +925,7 @@ python split_and_strip_files () {
# 16 - kernel module
def isELF(path):
type = 0
- ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
+ ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
if ret:
msg = "split_and_strip_files: 'file %s' failed" % path
@@ -919,6 +943,15 @@ python split_and_strip_files () {
type |= 8
return type
+ def isStaticLib(path):
+ if path.endswith('.a') and not os.path.islink(path):
+ with open(path, 'rb') as fh:
+ # The magic must include the first slash to avoid
+ # matching golang static libraries
+ magic = b'!<arch>\x0a/'
+ start = fh.read(len(magic))
+ return start == magic
+ return False
#
# First lets figure out all of the files we may have to process ... do this only once!
@@ -926,9 +959,11 @@ python split_and_strip_files () {
elffiles = {}
symlinks = {}
kernmods = []
+ staticlibs = []
inodes = {}
libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
for root, dirs, files in cpath.walk(dvar):
@@ -937,6 +972,9 @@ python split_and_strip_files () {
if file.endswith(".ko") and file.find("/lib/modules/") != -1:
kernmods.append(file)
continue
+ if isStaticLib(file):
+ staticlibs.append(file)
+ continue
# Skip debug files
if debugappend and file.endswith(debugappend):
@@ -944,6 +982,9 @@ python split_and_strip_files () {
if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
continue
+ if file in skipfiles:
+ continue
+
try:
ltarget = cpath.realpath(file, dvar, False)
s = cpath.lstat(ltarget)
@@ -955,7 +996,7 @@ python split_and_strip_files () {
continue
if not s:
continue
- # Check its an excutable
+ # Check its an executable
if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
# If it's a symlink, and points to an ELF file, we capture the readlink target
@@ -983,7 +1024,7 @@ python split_and_strip_files () {
# b) Only strip any hardlinked file once (no races)
# c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
- # Use a reference of device ID and inode number to indentify files
+ # Use a reference of device ID and inode number to identify files
file_reference = "%d_%d" % (s.st_dev, s.st_ino)
if file_reference in inodes:
os.unlink(file)
@@ -1012,6 +1053,10 @@ python split_and_strip_files () {
# Only store off the hard link reference if we successfully split!
splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
+ if debugsrcdir and not targetos.startswith("mingw"):
+ for file in staticlibs:
+ append_source_info(file, sourcefile, d, fatal=False)
+
# Hardlink our debug symbols to the other hardlink copies
for ref in inodes:
if len(inodes[ref]) == 1:
@@ -1105,7 +1150,7 @@ python populate_packages () {
d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
# Sanity check PACKAGES for duplicates
- # Sanity should be moved to sanity.bbclass once we have the infrastucture
+ # Sanity should be moved to sanity.bbclass once we have the infrastructure
package_list = []
for pkg in packages.split():
@@ -1303,6 +1348,36 @@ python emit_pkgdata() {
from glob import glob
import json
+ def process_postinst_on_target(pkg, mlprefix):
+ defer_fragment = """
+if [ -n "$D" ]; then
+ $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
+ exit 0
+fi
+""" % (pkg, mlprefix)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
+
+ if postinst_ontarget:
+ bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += defer_fragment
+ postinst += postinst_ontarget
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ def add_set_e_to_scriptlets(pkg):
+ for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
+ scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
+ if scriptlet:
+ scriptlet_split = scriptlet.split('\n')
+ if scriptlet_split[0].startswith("#!"):
+ scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
+ else:
+ scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
+ d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
+
def write_if_exists(f, pkg, var):
def encode(str):
import codecs
@@ -1398,6 +1473,8 @@ python emit_pkgdata() {
write_if_exists(sf, pkg, 'ALLOW_EMPTY')
write_if_exists(sf, pkg, 'FILES')
write_if_exists(sf, pkg, 'CONFFILES')
+ process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
+ add_set_e_to_scriptlets(pkg)
write_if_exists(sf, pkg, 'pkg_postinst')
write_if_exists(sf, pkg, 'pkg_postrm')
write_if_exists(sf, pkg, 'pkg_preinst')
@@ -1541,7 +1618,7 @@ python package_do_shlibs() {
shlibswork_dir = d.getVar('SHLIBSWORKDIR')
# Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
def linux_so(file, needed, sonames, renames, pkgver):
needs_ldconfig = False
@@ -1732,6 +1809,9 @@ python package_do_shlibs() {
for pkg in packages.split():
bb.debug(2, "calculating shlib requirements for %s" % pkg)
+ private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+
deps = list()
for n in needed[pkg]:
# if n is in private libraries, don't try to search provider for it
@@ -1823,7 +1903,7 @@ python package_do_pkgconfig () {
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
# Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
@@ -2156,11 +2236,9 @@ do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
addtask package after do_install
-PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
SSTATETASKS += "do_package"
do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
-do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
do_package_setscene[dirs] = "${STAGING_DIR}"
python do_package_setscene () {
@@ -2175,10 +2253,13 @@ do_packagedata () {
addtask packagedata before do_build after do_package
SSTATETASKS += "do_packagedata"
+# PACKAGELOCK protects readers of PKGDATA_DIR against writes
+# whilst code is reading in do_package
+PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
-do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
-do_packagedata[stamp-extra-info] = "${MACHINE}"
+do_packagedata[sstate-lockfile] = "${PACKAGELOCK}"
+do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
python do_packagedata_setscene () {
sstate_setscene(d)
diff --git a/import-layers/yocto-poky/meta/classes/package_deb.bbclass b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
index 5d297939b..2e8d17d3c 100644
--- a/import-layers/yocto-poky/meta/classes/package_deb.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_deb.bbclass
@@ -230,9 +230,11 @@ def deb_write_pkg(pkg, d):
# '>' = greater or equal
# adjust these to the '<<' and '>>' equivalents
#
- for dep in var:
- if '(' in dep:
- newdep = re.sub(r'[(:)]', '__', dep)
+ for dep in list(var.keys()):
+ if '(' in dep or '/' in dep:
+ newdep = re.sub(r'[(:)/]', '__', dep)
+ if newdep.startswith("__"):
+ newdep = "A" + newdep
if newdep != dep:
var[newdep] = var[dep]
del var[dep]
diff --git a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
index 6c1fdaa39..a0b34fa7a 100644
--- a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass
@@ -8,7 +8,7 @@ IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
# Program to be used to build opkg packages
-OPKGBUILDCMD ??= "opkg-build"
+OPKGBUILDCMD ??= "opkg-build -Z xz"
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
@@ -307,7 +307,7 @@ addtask do_package_write_ipk_setscene
python () {
if d.getVar('PACKAGES') != '':
- deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot xz-native:do_populate_sysroot'
d.appendVarFlag('do_package_write_ipk', 'depends', deps)
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
}
diff --git a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
index a428d3064..21ada348a 100644
--- a/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/package_rpm.bbclass
@@ -382,6 +382,12 @@ python write_specfile () {
# Gather special src/first package data
if srcname == splitname:
+ archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
+ bb.data.inherits_class('archiver', d)
+ if archiving and srclicense != splitlicense:
+ bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
+
+ srclicense = splitlicense
srcrdepends = splitrdepends
srcrrecommends = splitrrecommends
srcrsuggests = splitrsuggests
@@ -421,8 +427,7 @@ python write_specfile () {
spec_preamble_bottom.append('Release: %s' % splitrelease)
if srcepoch != splitepoch:
spec_preamble_bottom.append('Epoch: %s' % splitepoch)
- if srclicense != splitlicense:
- spec_preamble_bottom.append('License: %s' % splitlicense)
+ spec_preamble_bottom.append('License: %s' % splitlicense)
spec_preamble_bottom.append('Group: %s' % splitsection)
if srccustomtagschunk != splitcustomtagschunk:
@@ -665,13 +670,15 @@ python do_package_rpm () {
cmd = rpmbuild
cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
- cmd = cmd + " --define '_builddir " + d.getVar('S') + "'"
+ cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
cmd = cmd + " --define '_binary_payload w6T.xzdio'"
cmd = cmd + " --define '_source_payload w6T.xzdio'"
+ cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
+ cmd = cmd + " --define '_buildhost reproducible'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
diff --git a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
index eea2e5b9f..d540d4214 100644
--- a/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
+++ b/import-layers/yocto-poky/meta/classes/packagegroup.bbclass
@@ -56,3 +56,4 @@ python () {
bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
}
+CVE_PRODUCT = ""
diff --git a/import-layers/yocto-poky/meta/classes/patch.bbclass b/import-layers/yocto-poky/meta/classes/patch.bbclass
index 8f35cb4f9..2fc6925e4 100644
--- a/import-layers/yocto-poky/meta/classes/patch.bbclass
+++ b/import-layers/yocto-poky/meta/classes/patch.bbclass
@@ -26,9 +26,23 @@ python () {
python patch_task_patch_prefunc() {
# Prefunc for do_patch
- func = d.getVar('BB_RUNTASK')
srcsubdir = d.getVar('S')
+ workdir = os.path.abspath(d.getVar('WORKDIR'))
+ testsrcdir = os.path.abspath(srcsubdir)
+ if (testsrcdir + os.sep).startswith(workdir + os.sep):
+ # Double-check that either workdir or S or some directory in-between is a git repository
+ found = False
+ while testsrcdir != '/':
+ if os.path.exists(os.path.join(testsrcdir, '.git')):
+ found = True
+ break
+ if testsrcdir == workdir:
+ break
+ testsrcdir = os.path.dirname(testsrcdir)
+ if not found:
+ bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
+
patchdir = os.path.join(srcsubdir, 'patches')
if os.path.exists(patchdir):
if os.listdir(patchdir):
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
index 30fcefca3..3da350747 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -20,6 +20,9 @@ def complementary_globs(featurevar, d):
SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
+PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
+SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
+
# List of locales to install, or "all" for all of them, or unset for none.
SDKIMAGE_LINGUAS ?= "all"
@@ -37,12 +40,13 @@ SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')}"
+TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
+ ${@multilib_pkg_extend(d, 'target-sdk-provides-dummy')}"
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native pixz-native cross-localedef-native"
+SDK_DEPENDS = "virtual/fakeroot-native xz-native cross-localedef-native ${MLPREFIX}qemuwrapper-cross"
SDK_DEPENDS_append_libc-glibc = " nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
@@ -152,7 +156,7 @@ SSTATE_SKIP_CREATION_task-populate-sdk = '1'
do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
-do_populate_sdk[stamp-extra-info] = "${MACHINE}${SDKMACHINE}"
+do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
@@ -221,7 +225,7 @@ fakeroot tar_sdk() {
# Package it up
mkdir -p ${SDKDEPLOYDIR}
cd ${SDK_OUTPUT}/${SDKPATH}
- tar ${SDKTAROPTS} -cf - . | pixz > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ tar ${SDKTAROPTS} -cf - . | xz -T 0 > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
}
fakeroot create_shar() {
diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
index 2dd21237e..e1bba49ea 100644
--- a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
+++ b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -162,18 +162,16 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
except FileNotFoundError:
pass
os.rename(sdkbasepath, temp_sdkbasepath)
+ cmdprefix = '. %s .; ' % conf_initpath
+ logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
try:
- cmdprefix = '. %s .; ' % conf_initpath
- logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
- try:
- oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
- except bb.process.ExecutionError as e:
- msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
- if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
- msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
- bb.fatal(msg)
- finally:
- os.rename(temp_sdkbasepath, sdkbasepath)
+ oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
+ except bb.process.ExecutionError as e:
+ msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
+ if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
+ msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
+ bb.fatal(msg)
+ os.rename(temp_sdkbasepath, sdkbasepath)
# Clean out residue of running bitbake, which check_sstate_task_list()
# will effectively do
clean_esdk_builddir(d, sdkbasepath)
@@ -535,7 +533,7 @@ def get_sdk_required_utilities(buildtools_fn, d):
install_tools() {
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
- scripts="devtool recipetool oe-find-native-sysroot runqemu*"
+ scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
for script in $scripts; do
for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
lnr ${scriptfn} ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/`basename $scriptfn`
@@ -724,6 +722,6 @@ SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
-do_populate_sdk_ext[stamp-extra-info] = "${MACHINE}"
+do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}"
addtask populate_sdk_ext after do_sdk_depends
diff --git a/import-layers/yocto-poky/meta/classes/ptest-perl.bbclass b/import-layers/yocto-poky/meta/classes/ptest-perl.bbclass
new file mode 100644
index 000000000..a4bc40b51
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/ptest-perl.bbclass
@@ -0,0 +1,30 @@
+inherit ptest
+
+FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
+
+SRC_URI += "file://ptest-perl/run-ptest"
+
+do_install_ptest_perl() {
+ install -d ${D}${PTEST_PATH}
+ if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
+ install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
+ fi
+ cp -r ${B}/t ${D}${PTEST_PATH}
+ chown -R root:root ${D}${PTEST_PATH}
+}
+
+FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
+
+RDEPENDS_${PN}-ptest_prepend = "perl "
+
+addtask install_ptest_perl after do_install_ptest_base before do_package
+
+python () {
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1')
+
+ # Remove all '*ptest_perl' tasks when ptest is not enabled
+ if not(d.getVar('PTEST_ENABLED') == "1"):
+ for i in ['do_install_ptest_perl']:
+ bb.build.deltask(i, d)
+}
diff --git a/import-layers/yocto-poky/meta/classes/reproducible_build.bbclass b/import-layers/yocto-poky/meta/classes/reproducible_build.bbclass
new file mode 100644
index 000000000..2df805330
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/reproducible_build.bbclass
@@ -0,0 +1,150 @@
+#
+# reproducible_build.bbclass
+#
+# This bbclass is mainly responsible to determine SOURCE_DATE_EPOCH on a per recipe base.
+# We need to set a recipe specific SOURCE_DATE_EPOCH in each recipe environment for various tasks.
+# One way would be to modify all recipes one-by-one to specify SOURCE_DATE_EPOCH explicitly,
+# but that is not realistic as there are hundreds (probably thousands) of recipes in various meta-layers.
+# Therefore we do it this class.
+# After sources are unpacked but before they are patched, we try to determine the value for SOURCE_DATE_EPOCH.
+#
+# There are 4 ways to determine SOURCE_DATE_EPOCH:
+#
+# 1. Use value from __source_date_epoch.txt file if this file exists.
+# This file was most likely created in the previous build by one of the following methods 2,3,4.
+# In principle, it could actually provided by a recipe via SRC_URI
+#
+# If the file does not exist, first try to determine the value for SOURCE_DATE_EPOCH:
+#
+# 2. If we detected a folder .git, use .git last commit date timestamp, as git does not allow checking out
+# files and preserving their timestamps.
+#
+# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
+# This will work fine for any well kept repository distributed via tarballs.
+#
+# 4. If the above steps fail, we need to check all package source files and use the youngest file of the source tree.
+#
+# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe ${WORKDIR}/source_date_epoch folder
+# in a text file "__source_date_epoch.txt'. If this file is found by other recipe task, the value is exported in
+# the SOURCE_DATE_EPOCH variable in the task environment. This is done in an anonymous python function,
+# so SOURCE_DATE_EPOCH is guaranteed to exist for all tasks the may use it (do_configure, do_compile, do_package, ...)
+
+BUILD_REPRODUCIBLE_BINARIES ??= '1'
+inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
+
+SDE_DIR ="${WORKDIR}/source-date-epoch"
+SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
+
+SSTATETASKS += "do_deploy_source_date_epoch"
+
+do_deploy_source_date_epoch () {
+ echo "Deploying SDE to ${SDE_DIR}."
+}
+
+python do_deploy_source_date_epoch_setscene () {
+ sstate_setscene(d)
+}
+
+do_deploy_source_date_epoch[dirs] = "${SDE_DIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DIR}"
+addtask do_deploy_source_date_epoch_setscene
+addtask do_deploy_source_date_epoch before do_configure after do_patch
+
+def get_source_date_epoch_known_files(d, path):
+ source_date_epoch = 0
+ known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
+ for file in known_files:
+ filepath = os.path.join(path,file)
+ if os.path.isfile(filepath):
+ mtime = int(os.path.getmtime(filepath))
+ # There may be more than one "known_file" present, if so, use the youngest one
+ if mtime > source_date_epoch:
+ source_date_epoch = mtime
+ return source_date_epoch
+
+def find_git_folder(path):
+ exclude = set(["temp", "license-destdir", "patches", "recipe-sysroot-native", "recipe-sysroot", "pseudo", "build", "image", "sysroot-destdir"])
+ for root, dirs, files in os.walk(path, topdown=True):
+ dirs[:] = [d for d in dirs if d not in exclude]
+ if '.git' in dirs:
+ #bb.warn("found root:%s" % (str(root)))
+ return root
+
+def get_source_date_epoch_git(d, path):
+ source_date_epoch = 0
+ if "git://" in d.getVar('SRC_URI'):
+ gitpath = find_git_folder(d.getVar('WORKDIR'))
+ if gitpath != None:
+ import subprocess
+ if os.path.isdir(os.path.join(gitpath,".git")):
+ try:
+ source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=path))
+ #bb.warn("JB *** gitpath:%s sde: %d" % (gitpath,source_date_epoch))
+ bb.debug(1, "git repo path:%s sde: %d" % (gitpath,source_date_epoch))
+ except subprocess.CalledProcessError as grepexc:
+ #bb.warn( "Expected git repository not found, (path: %s) error:%d" % (gitpath, grepexc.returncode))
+ bb.debug(1, "Expected git repository not found, (path: %s) error:%d" % (gitpath, grepexc.returncode))
+ else:
+ bb.warn("Failed to find a git repository for path:%s" % (path))
+ return source_date_epoch
+
+python do_create_source_date_epoch_stamp() {
+ path = d.getVar('S')
+ if not os.path.isdir(path):
+ bb.warn("Unable to determine source_date_epoch! path:%s" % path)
+ return
+
+ epochfile = d.getVar('SDE_FILE')
+ if os.path.isfile(epochfile):
+ bb.debug(1, " path: %s reusing __source_date_epoch.txt" % epochfile)
+ return
+
+ # Try to detect/find a git repository
+ source_date_epoch = get_source_date_epoch_git(d, path)
+ if source_date_epoch == 0:
+ source_date_epoch = get_source_date_epoch_known_files(d, path)
+ if source_date_epoch == 0:
+ # Do it the hard way: check all files and find the youngest one...
+ filename_dbg = None
+ exclude = set(["temp", "license-destdir", "patches", "recipe-sysroot-native", "recipe-sysroot", "pseudo", "build", "image", "sysroot-destdir"])
+ for root, dirs, files in os.walk(path, topdown=True):
+ files = [f for f in files if not f[0] == '.']
+ dirs[:] = [d for d in dirs if d not in exclude]
+
+ for fname in files:
+ filename = os.path.join(root, fname)
+ try:
+ mtime = int(os.path.getmtime(filename))
+ except ValueError:
+ mtime = 0
+ if mtime > source_date_epoch:
+ source_date_epoch = mtime
+ filename_dbg = filename
+
+ if filename_dbg != None:
+ bb.debug(1," SOURCE_DATE_EPOCH %d derived from: %s" % (source_date_epoch, filename_dbg))
+
+ if source_date_epoch == 0:
+ # empty folder, not a single file ...
+ # kernel source do_unpack is special cased
+ if not bb.data.inherits_class('kernel', d):
+ bb.debug(1, "Unable to determine source_date_epoch! path:%s" % path)
+
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ with open(epochfile, 'w') as f:
+ f.write(str(source_date_epoch))
+}
+
+BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
+
+python () {
+ if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
+ d.appendVarFlag("do_unpack", "postfuncs", " do_create_source_date_epoch_stamp")
+ epochfile = d.getVar('SDE_FILE')
+ source_date_epoch = "0"
+ if os.path.isfile(epochfile):
+ with open(epochfile, 'r') as f:
+ source_date_epoch = f.read()
+ bb.debug(1, "source_date_epoch stamp found ---> stamp %s" % source_date_epoch)
+ d.setVar('SOURCE_DATE_EPOCH', source_date_epoch)
+}
diff --git a/import-layers/yocto-poky/meta/classes/reproducible_build_simple.bbclass b/import-layers/yocto-poky/meta/classes/reproducible_build_simple.bbclass
new file mode 100644
index 000000000..8a60deef3
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/reproducible_build_simple.bbclass
@@ -0,0 +1,10 @@
+# Setup default environment for reproducible builds.
+
+BUILD_REPRODUCIBLE_BINARIES = "1"
+
+export PYTHONHASHSEED = "0"
+export PERL_HASH_SEED = "0"
+export SOURCE_DATE_EPOCH ??= "1520598896"
+
+REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
+
diff --git a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
index a4e627fef..552220953 100644
--- a/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
+++ b/import-layers/yocto-poky/meta/classes/rootfs-postcommands.bbclass
@@ -56,6 +56,7 @@ ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
python () {
d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
+ d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
}
systemd_create_users () {
@@ -256,10 +257,17 @@ python write_image_manifest () {
os.symlink(os.path.basename(manifest_name), manifest_link)
}
-# Can be use to create /etc/timestamp during image construction to give a reasonably
+# Can be used to create /etc/timestamp during image construction to give a reasonably
# sane default time setting
rootfs_update_timestamp () {
- date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
+ if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
+ # Convert UTC into %4Y%2m%2d%2H%2M%2S
+ sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
+ else
+ sformatted=`date -u +%4Y%2m%2d%2H%2M%2S`
+ fi
+ echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp
+ bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted"
}
# Prevent X from being started
@@ -328,3 +336,16 @@ python rootfs_log_check_recommends() {
if 'unsatisfied recommendation for' in line:
bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
}
+
+# Perform any additional adjustments needed to make rootf binary reproducible
+rootfs_reproducible () {
+ if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
+ # Convert UTC into %4Y%2m%2d%2H%2M%2S
+ sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
+ echo $sformatted > ${IMAGE_ROOTFS}/etc/version
+ bbnote "rootfs_reproducible: set /etc/version to $sformatted"
+
+ find ${IMAGE_ROOTFS}/etc/gconf -name '%gconf.xml' -print0 | xargs -0r \
+ sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ fi
+}
diff --git a/import-layers/yocto-poky/meta/classes/sanity.bbclass b/import-layers/yocto-poky/meta/classes/sanity.bbclass
index 1feb7949d..e0e57ceec 100644
--- a/import-layers/yocto-poky/meta/classes/sanity.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sanity.bbclass
@@ -336,7 +336,7 @@ def check_path_length(filepath, pathname, limit):
return ""
def get_filesystem_id(path):
- status, result = oe.utils.getstatusoutput("stat -f -c '%s' %s" % ("%t", path))
+ status, result = oe.utils.getstatusoutput("stat -f -c '%s' '%s'" % ("%t", path))
if status == 0:
return result
else:
@@ -456,13 +456,29 @@ def check_sanity_validmachine(sanity_data):
return messages
+# Patch before 2.7 can't handle all the features in git-style diffs. Some
+# patches may incorrectly apply, and others won't apply at all.
+def check_patch_version(sanity_data):
+ from distutils.version import LooseVersion
+ import re, subprocess
+
+ try:
+ result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT, universal_newlines=True)
+ version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
+ if LooseVersion(version) < LooseVersion("2.7"):
+ return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
+ else:
+ return None
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
+
# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
def check_make_version(sanity_data):
from distutils.version import LooseVersion
status, result = oe.utils.getstatusoutput("make --version")
if status != 0:
- return "Unable to execute make --version, exit code %s\n" % status
+ return "Unable to execute make --version, exit code %d\n" % status
version = result.split()[2]
if LooseVersion(version) == LooseVersion("3.82"):
# Construct a test file
@@ -498,7 +514,7 @@ def check_tar_version(sanity_data):
from distutils.version import LooseVersion
status, result = oe.utils.getstatusoutput("tar --version")
if status != 0:
- return "Unable to execute tar --version, exit code %s\n" % status
+ return "Unable to execute tar --version, exit code %d\n" % status
version = result.split()[3]
if LooseVersion(version) < LooseVersion("1.24"):
return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
@@ -511,7 +527,7 @@ def check_git_version(sanity_data):
from distutils.version import LooseVersion
status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
if status != 0:
- return "Unable to execute git --version, exit code %s\n" % status
+ return "Unable to execute git --version, exit code %d\n" % status
version = result.split()[2]
if LooseVersion(version) < LooseVersion("1.8.3.1"):
return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
@@ -596,6 +612,7 @@ def check_sanity_version_change(status, d):
import stat
status.addresult(check_make_version(d))
+ status.addresult(check_patch_version(d))
status.addresult(check_tar_version(d))
status.addresult(check_git_version(d))
status.addresult(check_perl_modules(d))
@@ -692,7 +709,7 @@ def sanity_check_locale(d):
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
- raise_sanity_error("You system needs to support the en_US.UTF-8 locale.", d)
+ raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
def check_sanity_everybuild(status, d):
import os, stat
diff --git a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
index f03c4802d..7ff3a35a2 100644
--- a/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_package_feed.bbclass
@@ -43,4 +43,4 @@ python () {
}
do_package_index[depends] += "signing-keys:do_deploy"
-do_rootfs[depends] += "signing-keys:do_populate_sysroot"
+do_rootfs[depends] += "signing-keys:do_populate_sysroot gnupg-native:do_populate_sysroot"
diff --git a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
index 4961b0361..64ae7ce30 100644
--- a/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sign_rpm.bbclass
@@ -68,8 +68,4 @@ python sign_rpm () {
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot"
-# Newer versions of gpg (at least 2.1.5 and 2.2.1) have issues when signing occurs in parallel
-# so unfortunately the signing must be done serially. Once the upstream problem is fixed,
-# the following line must be removed otherwise we loose all the intrinsic parallelism from
-# bitbake. For more information, check https://bugzilla.yoctoproject.org/show_bug.cgi?id=12022.
-do_package_write_rpm[lockfiles] += "${TMPDIR}/gpg.lock"
+PACKAGE_WRITE_DEPS += "gnupg-native"
diff --git a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
index 1aada4069..86bb853be 100644
--- a/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
+++ b/import-layers/yocto-poky/meta/classes/siteinfo.bbclass
@@ -47,6 +47,8 @@ def siteinfo_data(d):
"ppc": "endian-big bit-32 powerpc-common",
"ppc64": "endian-big bit-64 powerpc-common",
"ppc64le" : "endian-little bit-64 powerpc-common",
+ "riscv32": "endian-little bit-32 riscv-common",
+ "riscv64": "endian-little bit-64 riscv-common",
"sh3": "endian-little bit-32 sh-common",
"sh4": "endian-little bit-32 sh-common",
"sparc": "endian-big bit-32",
@@ -58,6 +60,7 @@ def siteinfo_data(d):
"darwin9": "common-darwin",
"linux": "common-linux common-glibc",
"linux-gnu": "common-linux common-glibc",
+ "linux-gnu_ilp32": "common-linux common-glibc",
"linux-gnux32": "common-linux common-glibc",
"linux-gnun32": "common-linux common-glibc",
"linux-gnueabi": "common-linux common-glibc",
@@ -73,12 +76,18 @@ def siteinfo_data(d):
targetinfo = {
"aarch64-linux-gnu": "aarch64-linux",
"aarch64_be-linux-gnu": "aarch64_be-linux",
+ "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
+ "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
"aarch64-linux-musl": "aarch64-linux",
"aarch64_be-linux-musl": "aarch64_be-linux",
"arm-linux-gnueabi": "arm-linux",
"arm-linux-musleabi": "arm-linux",
"armeb-linux-gnueabi": "armeb-linux",
"armeb-linux-musleabi": "armeb-linux",
+ "microblazeeb-linux" : "microblaze-linux",
+ "microblazeeb-linux-musl" : "microblaze-linux",
+ "microblazeel-linux" : "microblaze-linux",
+ "microblazeel-linux-musl" : "microblaze-linux",
"mips-linux-musl": "mips-linux",
"mipsel-linux-musl": "mipsel-linux",
"mips64-linux-musl": "mips64-linux",
@@ -95,6 +104,10 @@ def siteinfo_data(d):
"powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
"powerpc64-linux": "powerpc-linux",
"powerpc64-linux-musl": "powerpc-linux",
+ "riscv32-linux": "riscv32-linux",
+ "riscv32-linux-musl": "riscv32-linux",
+ "riscv64-linux": "riscv64-linux",
+ "riscv64-linux-musl": "riscv64-linux",
"x86_64-cygwin": "bit-64",
"x86_64-darwin": "bit-64",
"x86_64-darwin9": "bit-64",
diff --git a/import-layers/yocto-poky/meta/classes/sstate.bbclass b/import-layers/yocto-poky/meta/classes/sstate.bbclass
index e30fbe128..0b2885014 100644
--- a/import-layers/yocto-poky/meta/classes/sstate.bbclass
+++ b/import-layers/yocto-poky/meta/classes/sstate.bbclass
@@ -28,6 +28,9 @@ SSTATE_EXTRAPATH[vardepvalue] = ""
SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/noarch/"
# Avoid docbook/sgml catalog warnings for now
SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
# Archive the sources for many architectures in one deploy folder
SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
@@ -45,7 +48,8 @@ SSTATE_ARCHS = " \
${SDK_ARCH}_${PACKAGE_ARCH} \
allarch \
${PACKAGE_ARCH} \
- ${MACHINE}"
+ ${PACKAGE_EXTRA_ARCHS} \
+ ${MACHINE_ARCH}"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
@@ -538,15 +542,15 @@ python sstate_hardcode_path () {
staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
sstate_builddir = d.getVar('SSTATE_BUILDDIR')
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIRHOST:g'" % (staging_target, staging_host)
+ sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
else:
- sstate_grep_cmd = "grep -l -e '%s'" % (staging_target)
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % (staging_target)
+ sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
+ sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
for fixmevar in extra_staging_fixmes.split():
@@ -845,7 +849,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
missed.append(task)
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
pass
- bb.event.fire(bb.event.ProcessProgress("Checking sstate mirror object availability", len(tasklist) - thread_worker.tasks.qsize()), d)
+ bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
for task in range(len(sq_fn)):
@@ -856,7 +860,8 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
tasklist.append((task, sstatefile))
if tasklist:
- bb.event.fire(bb.event.ProcessStarted("Checking sstate mirror object availability", len(tasklist)), d)
+ msg = "Checking sstate mirror object availability"
+ bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
import multiprocessing
nproc = min(multiprocessing.cpu_count(), len(tasklist))
@@ -870,7 +875,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
pool.wait_completion()
bb.event.disable_threadlock()
- bb.event.fire(bb.event.ProcessFinished("Checking sstate mirror object availability"), d)
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
@@ -1022,7 +1027,7 @@ python sstate_eventhandler2() {
with open(preservestampfile, 'r') as f:
preservestamps = f.readlines()
seen = []
- for a in d.getVar("SSTATE_ARCHS").split():
+ for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
toremove = []
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
@@ -1038,18 +1043,25 @@ python sstate_eventhandler2() {
seen.append(stamp)
if toremove:
- bb.note("There are %d recipes to be removed from sysroot %s, removing..." % (len(toremove), a))
+ msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
+ bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
+
+ removed = 0
+ for r in toremove:
+ (stamp, manifest, workdir) = r.split()
+ for m in glob.glob(manifest + ".*"):
+ if m.endswith(".postrm"):
+ continue
+ sstate_clean_manifest(m, d)
+ bb.utils.remove(stamp + "*")
+ if removeworkdir:
+ bb.utils.remove(workdir, recurse = True)
+ lines.remove(r)
+ removed = removed + 1
+ bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
+
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
- for r in toremove:
- (stamp, manifest, workdir) = r.split()
- for m in glob.glob(manifest + ".*"):
- if m.endswith(".postrm"):
- continue
- sstate_clean_manifest(m, d)
- bb.utils.remove(stamp + "*")
- if removeworkdir:
- bb.utils.remove(workdir, recurse = True)
- lines.remove(r)
with open(i, "w") as f:
for l in lines:
f.write(l)
diff --git a/import-layers/yocto-poky/meta/classes/staging.bbclass b/import-layers/yocto-poky/meta/classes/staging.bbclass
index c479bd93e..3fcbc9f15 100644
--- a/import-layers/yocto-poky/meta/classes/staging.bbclass
+++ b/import-layers/yocto-poky/meta/classes/staging.bbclass
@@ -171,7 +171,7 @@ def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
fixme_path = d.getVar(fixmevar)
cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
bb.debug(2, cmd)
- subprocess.check_output(cmd, shell=True)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
@@ -228,7 +228,7 @@ def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
for p in postinsts:
- subprocess.check_output(p, shell=True)
+ subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
#
# Manifests here are complicated. The main sysroot area has the unpacked sstate
@@ -470,40 +470,14 @@ python extend_recipe_sysroot() {
os.symlink(c + "." + taskhash, depdir + "/" + c)
- d2 = d
- destsysroot = recipesysroot
- variant = ''
- if setscenedeps[dep][2].startswith("virtual:multilib"):
- variant = setscenedeps[dep][2].split(":")[2]
- if variant != current_variant:
- if variant not in multilibs:
- multilibs[variant] = get_multilib_datastore(variant, d)
- d2 = multilibs[variant]
- destsysroot = d2.getVar("RECIPE_SYSROOT")
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
+ destsysroot = d2.getVar("RECIPE_SYSROOT")
native = False
- if c.endswith("-native"):
- manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
+ if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
native = True
- elif c.startswith("nativesdk-"):
- manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
- elif "-cross-" in c:
- manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}_${TARGET_ARCH}-%s.populate_sysroot" % c)
- native = True
- elif "-crosssdk" in c:
- manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
- native = True
- else:
- pkgarchs = ['${MACHINE_ARCH}']
- pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
- pkgarchs.append('allarch')
- for pkgarch in pkgarchs:
- manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.populate_sysroot" % (pkgarch, c))
- if os.path.exists(manifest):
- break
- if not os.path.exists(manifest):
- bb.warn("Manifest %s not found?" % manifest)
- else:
+
+ if manifest:
newmanifest = collections.OrderedDict()
if native:
fm = fixme['native']
@@ -576,7 +550,7 @@ python extend_recipe_sysroot() {
staging_processfixme(fixme[f], multilibs[f].getVar("RECIPE_SYSROOT"), recipesysroot, recipesysrootnative, d)
for p in postinsts:
- subprocess.check_output(p, shell=True)
+ subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
for dep in manifests:
c = setscenedeps[dep][0]
diff --git a/import-layers/yocto-poky/meta/classes/syslinux.bbclass b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
index d6f882420..031dacbf7 100644
--- a/import-layers/yocto-poky/meta/classes/syslinux.bbclass
+++ b/import-layers/yocto-poky/meta/classes/syslinux.bbclass
@@ -173,8 +173,9 @@ python build_syslinux_cfg () {
if not root:
bb.fatal('SYSLINUX_ROOT not defined')
+ kernel = localdata.getVar('KERNEL_IMAGETYPE')
for btype in btypes:
- cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
+ cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel))
exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
if exargs:
diff --git a/import-layers/yocto-poky/meta/classes/systemd-boot-cfg.bbclass b/import-layers/yocto-poky/meta/classes/systemd-boot-cfg.bbclass
new file mode 100644
index 000000000..021c9f933
--- /dev/null
+++ b/import-layers/yocto-poky/meta/classes/systemd-boot-cfg.bbclass
@@ -0,0 +1,68 @@
+SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
+SYSTEMD_BOOT_ENTRIES ?= ""
+SYSTEMD_BOOT_TIMEOUT ?= "10"
+
+# Need UUID utility code.
+inherit fs-uuid
+
+python build_efi_cfg() {
+ s = d.getVar("S")
+ labels = d.getVar('LABELS')
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('SYSTEMD_BOOT_CFG')
+ cdir = os.path.dirname(cfile)
+ if not os.path.exists(cdir):
+ os.makedirs(cdir)
+ try:
+ cfgfile = open(cfile, 'w')
+ except OSError:
+ bb.fatal('Unable to open %s' % cfile)
+
+ cfgfile.write('# Automatically created by OE\n')
+ cfgfile.write('default %s\n' % (labels.split()[0]))
+ timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
+ if timeout:
+ cfgfile.write('timeout %s\n' % timeout)
+ else:
+ cfgfile.write('timeout 10\n')
+ cfgfile.close()
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ entryfile = "%s/%s.conf" % (s, label)
+ if not os.path.exists(s):
+ os.makedirs(s)
+ d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
+ try:
+ entrycfg = open(entryfile, "w")
+ except OSError:
+ bb.fatal('Unable to open %s' % entryfile)
+
+ entrycfg.write('title %s\n' % label)
+
+ kernel = localdata.getVar("KERNEL_IMAGETYPE")
+ entrycfg.write('linux /%s\n' % kernel)
+
+ append = localdata.getVar('APPEND')
+ initrd = localdata.getVar('INITRD')
+
+ if initrd:
+ entrycfg.write('initrd /initrd\n')
+ lb = label
+ if label == "install":
+ lb = "install-efi"
+ entrycfg.write('options LABEL=%s ' % lb)
+ if append:
+ append = replace_rootfs_uuid(d, append)
+ entrycfg.write('%s' % append)
+ entrycfg.write('\n')
+ entrycfg.close()
+}
diff --git a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
index 937307076..3cd6811a6 100644
--- a/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
+++ b/import-layers/yocto-poky/meta/classes/systemd-boot.bbclass
@@ -12,11 +12,6 @@
do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
EFIDIR = "/EFI/BOOT"
-
-SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
-SYSTEMD_BOOT_ENTRIES ?= ""
-SYSTEMD_BOOT_TIMEOUT ?= "10"
-
# Need UUID utility code.
inherit fs-uuid
@@ -50,7 +45,7 @@ efi_iso_populate() {
mkdir -p ${EFIIMGDIR}/${EFIDIR}
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
cp -r $iso_dir/loader ${EFIIMGDIR}
- cp $iso_dir/vmlinuz ${EFIIMGDIR}
+ cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
if [ -f "$iso_dir/initrd" ] ; then
@@ -62,67 +57,4 @@ efi_hddimg_populate() {
efi_populate $1
}
-python build_efi_cfg() {
- s = d.getVar("S")
- labels = d.getVar('LABELS')
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('SYSTEMD_BOOT_CFG')
- cdir = os.path.dirname(cfile)
- if not os.path.exists(cdir):
- os.makedirs(cdir)
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- bb.fatal('Unable to open %s' % cfile)
-
- cfgfile.write('# Automatically created by OE\n')
- cfgfile.write('default %s\n' % (labels.split()[0]))
- timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
- if timeout:
- cfgfile.write('timeout %s\n' % timeout)
- else:
- cfgfile.write('timeout 10\n')
- cfgfile.close()
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES')
- if not overrides:
- bb.fatal('OVERRIDES not defined')
-
- entryfile = "%s/%s.conf" % (s, label)
- if not os.path.exists(s):
- os.makedirs(s)
- d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
- try:
- entrycfg = open(entryfile, "w")
- except OSError:
- bb.fatal('Unable to open %s' % entryfile)
- localdata.setVar('OVERRIDES', label + ':' + overrides)
-
- entrycfg.write('title %s\n' % label)
- entrycfg.write('linux /vmlinuz\n')
-
- append = localdata.getVar('APPEND')
- initrd = localdata.getVar('INITRD')
-
- if initrd:
- entrycfg.write('initrd /initrd\n')
- lb = label
- if label == "install":
- lb = "install-efi"
- entrycfg.write('options LABEL=%s ' % lb)
- if append:
- append = replace_rootfs_uuid(d, append)
- entrycfg.write('%s' % append)
- entrycfg.write('\n')
- entrycfg.close()
-}
+inherit systemd-boot-cfg
diff --git a/import-layers/yocto-poky/meta/classes/testimage.bbclass b/import-layers/yocto-poky/meta/classes/testimage.bbclass
index 45bb2bda3..77291c22c 100644
--- a/import-layers/yocto-poky/meta/classes/testimage.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testimage.bbclass
@@ -44,20 +44,22 @@ DEVTESTSUITE = "gcc kernelmodule ldd"
DEFAULT_TEST_SUITES = "${MINTESTSUITE} auto"
DEFAULT_TEST_SUITES_pn-core-image-minimal = "${MINTESTSUITE}"
DEFAULT_TEST_SUITES_pn-core-image-minimal-dev = "${MINTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-full-cmdline = "${NETTESTSUITE} perl python logrotate"
+DEFAULT_TEST_SUITES_pn-core-image-full-cmdline = "${NETTESTSUITE} perl python logrotate ptest"
DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE} ptest"
DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
- ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)}"
+ ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)} ptest gi"
DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} buildcpio buildlzip buildgalculator \
- connman ${DEVTESTSUITE} logrotate perl parselogs python ${RPMTESTSUITE} xorg"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
+ connman ${DEVTESTSUITE} logrotate perl parselogs python ${RPMTESTSUITE} xorg ptest gi stap"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE} ptest gi"
DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcpio buildlzip buildgalculator \
- connman ${DEVTESTSUITE} logrotate pam parselogs perl python ${RPMTESTSUITE}"
+ connman ${DEVTESTSUITE} logrotate pam parselogs perl python ${RPMTESTSUITE} ptest gi stap"
DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
# aarch64 has no graphics
DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
+# musl doesn't support systemtap
+DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
# mitigate this by removing build tests for qemumips machines.
@@ -215,7 +217,7 @@ def testimage_main(d):
# Get use_kvm
qemu_use_kvm = d.getVar("QEMU_USE_KVM")
if qemu_use_kvm and \
- (qemu_use_kvm == 'True' and 'x86' in machine or \
+ (oe.types.boolean(qemu_use_kvm) and 'x86' in machine or \
d.getVar('MACHINE') in qemu_use_kvm.split()):
kvm = True
else:
diff --git a/import-layers/yocto-poky/meta/classes/testsdk.bbclass b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
index 6b51a33db..2e4334364 100644
--- a/import-layers/yocto-poky/meta/classes/testsdk.bbclass
+++ b/import-layers/yocto-poky/meta/classes/testsdk.bbclass
@@ -156,10 +156,11 @@ def testsdkext_main(d):
with open(os.path.join(sdk_dir, 'conf', 'auto.conf'), 'a+') as f:
f.write('SSTATE_MIRRORS += " \\n file://.* file://%s/PATH"\n' % test_data.get('SSTATE_DIR'))
f.write('SOURCE_MIRROR_URL = "file://%s"\n' % test_data.get('DL_DIR'))
- f.write('INHERIT += "own-mirrors"')
+ f.write('INHERIT += "own-mirrors"\n')
# We need to do this in case we have a minimal SDK
- subprocess.check_output(". %s > /dev/null; devtool sdk-install meta-extsdk-toolchain" % sdk_env, cwd=sdk_dir, shell=True)
+ subprocess.check_output(". %s > /dev/null; devtool sdk-install meta-extsdk-toolchain" % \
+ sdk_env, cwd=sdk_dir, shell=True, stderr=subprocess.STDOUT)
tc = OESDKExtTestContext(td=test_data, logger=logger, sdk_dir=sdk_dir,
sdk_env=sdk_env, target_pkg_manifest=target_pkg_manifest,
diff --git a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
index 9bcfe708c..71da5e540 100644
--- a/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
+++ b/import-layers/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -62,7 +62,8 @@ toolchain_create_tree_env_script () {
script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
rm -f $script
touch $script
- echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
+ echo 'orig=`pwd`; cd ${COREBASE}; . ./oe-init-build-env ${TOPDIR}; cd $orig' >> $script
+ echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
@@ -116,6 +117,21 @@ fi
EOF
}
+toolchain_create_post_relocate_script() {
+ script=$1
+ rm -f $script
+ touch $script
+
+ cat >> $script <<EOF
+if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
+ for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*.sh; do
+ \$s "\$1"
+ done
+ rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
+fi
+EOF
+}
+
#we get the cached site config in the runtime
TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
diff --git a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
index 533e175a3..89ff970fc 100644
--- a/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uboot-config.bbclass
@@ -27,13 +27,13 @@ python () {
FILE = os.path.basename(d.getVar("FILE"))
bb.debug(1, "To build %s, see %s for instructions on \
setting up your machine config" % (PN, FILE))
- raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
+ raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
if ubootmachine and ubootconfig:
- raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
+ raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
if ubootconfigflags and ubootbinaries:
- raise bb.parse.SkipPackage("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
+ raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
if len(ubootconfig) > 0:
for config in ubootconfig:
@@ -41,7 +41,7 @@ python () {
if config == f:
items = v.split(',')
if items[0] and len(items) > 3:
- raise bb.parse.SkipPackage('Only config,images,binary can be specified!')
+ raise bb.parse.SkipRecipe('Only config,images,binary can be specified!')
d.appendVar('UBOOT_MACHINE', ' ' + items[0])
# IMAGE_FSTYPES appending
if len(items) > 1 and items[1]:
diff --git a/import-layers/yocto-poky/meta/classes/uninative.bbclass b/import-layers/yocto-poky/meta/classes/uninative.bbclass
index 172336428..de2221a36 100644
--- a/import-layers/yocto-poky/meta/classes/uninative.bbclass
+++ b/import-layers/yocto-poky/meta/classes/uninative.bbclass
@@ -8,6 +8,9 @@ UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2"
#UNINATIVE_CHECKSUM[x86_64] = "dead"
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
+# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
+BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
+
addhandler uninative_event_fetchloader
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
@@ -126,6 +129,9 @@ def enable_uninative(d):
d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
+ d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
+ d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
+ d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
python uninative_changeinterp () {
diff --git a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
index 589a99ff4..64bf6dc82 100644
--- a/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
+++ b/import-layers/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -307,5 +307,5 @@ python __anonymous() {
update_useradd_static_config(d)
except NotImplementedError as f:
bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN'), f))
- raise bb.parse.SkipPackage(f)
+ raise bb.parse.SkipRecipe(f)
}
diff --git a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
index 587bfd4ab..b1f27d365 100644
--- a/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utility-tasks.bbclass
@@ -50,17 +50,4 @@ python do_checkuri() {
bb.fatal(str(e))
}
-addtask checkuriall after do_checkuri
-do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri"
-do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_checkuriall[nostamp] = "1"
-do_checkuriall() {
- :
-}
-addtask fetchall after do_fetch
-do_fetchall[recrdeptask] = "do_fetchall do_fetch"
-do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}"
-do_fetchall() {
- :
-}
diff --git a/import-layers/yocto-poky/meta/classes/utils.bbclass b/import-layers/yocto-poky/meta/classes/utils.bbclass
index 8e07eac07..4f016e3d0 100644
--- a/import-layers/yocto-poky/meta/classes/utils.bbclass
+++ b/import-layers/yocto-poky/meta/classes/utils.bbclass
@@ -1,44 +1,3 @@
-# For compatibility
-def base_path_join(a, *p):
- return oe.path.join(a, *p)
-
-def base_path_relative(src, dest):
- return oe.path.relative(src, dest)
-
-def base_path_out(path, d):
- return oe.path.format_display(path, d)
-
-def base_read_file(filename):
- return oe.utils.read_file(filename)
-
-def base_ifelse(condition, iftrue = True, iffalse = False):
- return oe.utils.ifelse(condition, iftrue, iffalse)
-
-def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
- return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
-
-def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
-
-def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
-
-def base_contains(variable, checkvalues, truevalue, falsevalue, d):
- bb.note('base_contains is deprecated, please use bb.utils.contains instead.')
- return bb.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
-
-def base_both_contain(variable1, variable2, checkvalue, d):
- return oe.utils.both_contain(variable1, variable2, checkvalue, d)
-
-def base_prune_suffix(var, suffixes, d):
- return oe.utils.prune_suffix(var, suffixes, d)
-
-def oe_filter(f, str, d):
- return oe.utils.str_filter(f, str, d)
-
-def oe_filter_out(f, str, d):
- return oe.utils.str_filter_out(f, str, d)
-
def machine_paths(d):
"""List any existing machine specific filespath directories"""
machine = d.getVar("MACHINE")
@@ -364,12 +323,7 @@ def multilib_pkg_extend(d, pkg):
return pkgs
def get_multilib_datastore(variant, d):
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
- localdata.setVar("OVERRIDES", overrides)
- localdata.setVar("MLPREFIX", variant + "-")
- return localdata
-get_multilib_datastore[vardepsexclude] = "OVERRIDES"
+ return oe.utils.get_multilib_datastore(variant, d)
def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
"""Return a string of all ${var} in all multilib tune configuration"""
diff --git a/import-layers/yocto-poky/meta/classes/waf.bbclass b/import-layers/yocto-poky/meta/classes/waf.bbclass
index acbda278a..19e93761b 100644
--- a/import-layers/yocto-poky/meta/classes/waf.bbclass
+++ b/import-layers/yocto-poky/meta/classes/waf.bbclass
@@ -3,39 +3,20 @@ DISABLE_STATIC = ""
EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
-def get_waf_parallel_make(d):
- pm = d.getVar('PARALLEL_MAKE')
- if pm:
- # look for '-j' and throw other options (e.g. '-l') away
- # because they might have different meaning in bjam
- pm = pm.split()
- while pm:
- v = None
- opt = pm.pop(0)
- if opt == '-j':
- v = pm.pop(0)
- elif opt.startswith('-j'):
- v = opt[2:].strip()
- else:
- v = None
-
- if v:
- v = min(64, int(v))
- return '-j' + str(v)
-
- return ""
-
python waf_preconfigure() {
+ import subprocess
from distutils.version import StrictVersion
- srcsubdir = d.getVar('S')
- wafbin = os.path.join(srcsubdir, 'waf')
- status, result = oe.utils.getstatusoutput(wafbin + " --version")
- if status != 0:
- bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % status)
- return
- version = result.split()[1]
- if StrictVersion(version) >= StrictVersion("1.8.7"):
- d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
+ subsrcdir = d.getVar('S')
+ wafbin = os.path.join(subsrcdir, 'waf')
+ try:
+ result = subprocess.check_output([wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
+ version = result.decode('utf-8').split()[1]
+ if StrictVersion(version) >= StrictVersion("1.8.7"):
+ d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
+ except subprocess.CalledProcessError as e:
+ bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
+ except FileNotFoundError:
+ bb.fatal("waf does not exist in %s" % subsrcdir)
}
do_configure[prefuncs] += "waf_preconfigure"
@@ -44,8 +25,9 @@ waf_do_configure() {
${S}/waf configure --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF}
}
+do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
waf_do_compile() {
- ${S}/waf build ${@get_waf_parallel_make(d)}
+ ${S}/waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)}
}
waf_do_install() {
OpenPOWER on IntegriCloud